]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-3.19.2-201503251807.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-3.19.2-201503251807.patch
CommitLineData
0555664f
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index a311db8..415b28c 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 176d4fe..17ceefa 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1191,6 +1191,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+
327 hashdist= [KNL,NUMA] Large hashes allocated during boot
328 are distributed across NUMA nodes. Defaults on
329 for 64-bit NUMA, off otherwise.
330@@ -2283,6 +2287,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
331 noexec=on: enable non-executable mappings (default)
332 noexec=off: disable non-executable mappings
333
334+ nopcid [X86-64]
335+ Disable PCID (Process-Context IDentifier) even if it
336+ is supported by the processor.
337+
338 nosmap [X86]
339 Disable SMAP (Supervisor Mode Access Prevention)
340 even if it is supported by processor.
341@@ -2584,6 +2592,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
342 the specified number of seconds. This is to be used if
343 your oopses keep scrolling off the screen.
344
345+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
346+ virtualization environments that don't cope well with the
347+ expand down segment used by UDEREF on X86-32 or the frequent
348+ page table updates on X86-64.
349+
350+ pax_sanitize_slab=
351+ Format: { 0 | 1 | off | fast | full }
352+ Options '0' and '1' are only provided for backward
353+ compatibility, 'off' or 'fast' should be used instead.
354+ 0|off : disable slab object sanitization
355+ 1|fast: enable slab object sanitization excluding
356+ whitelisted slabs (default)
357+ full : sanitize all slabs, even the whitelisted ones
358+
359+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
360+
361+ pax_extra_latent_entropy
362+ Enable a very simple form of latent entropy extraction
363+ from the first 4GB of memory as the bootmem allocator
364+ passes the memory pages to the buddy allocator.
365+
366+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
367+ when the processor supports PCID.
368+
369 pcbit= [HW,ISDN]
370
371 pcd. [PARIDE]
372diff --git a/Makefile b/Makefile
373index e49665a..7c65470 100644
374--- a/Makefile
375+++ b/Makefile
376@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
377 HOSTCC = gcc
378 HOSTCXX = g++
379 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
380-HOSTCXXFLAGS = -O2
381+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
382+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
383+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
384
385 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
386 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
387@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
388 # Rules shared between *config targets and build targets
389
390 # Basic helpers built in scripts/
391-PHONY += scripts_basic
392-scripts_basic:
393+PHONY += scripts_basic gcc-plugins
394+scripts_basic: gcc-plugins
395 $(Q)$(MAKE) $(build)=scripts/basic
396 $(Q)rm -f .tmp_quiet_recordmcount
397
398@@ -622,6 +624,72 @@ endif
399 # Tell gcc to never replace conditional load with a non-conditional one
400 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
401
402+ifndef DISABLE_PAX_PLUGINS
403+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
404+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
405+else
406+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
407+endif
408+ifneq ($(PLUGINCC),)
409+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
410+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
411+endif
412+ifdef CONFIG_PAX_MEMORY_STACKLEAK
413+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
414+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
415+endif
416+ifdef CONFIG_KALLOCSTAT_PLUGIN
417+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
418+endif
419+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
420+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
421+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
422+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
423+endif
424+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
425+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
426+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
427+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
428+endif
429+endif
430+ifdef CONFIG_CHECKER_PLUGIN
431+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
432+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
433+endif
434+endif
435+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
436+ifdef CONFIG_PAX_SIZE_OVERFLOW
437+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
438+endif
439+ifdef CONFIG_PAX_LATENT_ENTROPY
440+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
441+endif
442+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
443+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
444+endif
445+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
446+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
447+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
448+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
450+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
451+ifeq ($(KBUILD_EXTMOD),)
452+gcc-plugins:
453+ $(Q)$(MAKE) $(build)=tools/gcc
454+else
455+gcc-plugins: ;
456+endif
457+else
458+gcc-plugins:
459+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
460+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
461+else
462+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
463+endif
464+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
465+endif
466+endif
467+
468 ifdef CONFIG_READABLE_ASM
469 # Disable optimizations that make assembler listings hard to read.
470 # reorder blocks reorders the control in the function
471@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
472 else
473 KBUILD_CFLAGS += -g
474 endif
475-KBUILD_AFLAGS += -Wa,-gdwarf-2
476+KBUILD_AFLAGS += -Wa,--gdwarf-2
477 endif
478 ifdef CONFIG_DEBUG_INFO_DWARF4
479 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
480@@ -879,7 +947,7 @@ export mod_sign_cmd
481
482
483 ifeq ($(KBUILD_EXTMOD),)
484-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
485+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
486
487 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
488 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
489@@ -926,6 +994,8 @@ endif
490
491 # The actual objects are generated when descending,
492 # make sure no implicit rule kicks in
493+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
494+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
495 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
496
497 # Handle descending into subdirectories listed in $(vmlinux-dirs)
498@@ -935,7 +1005,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499 # Error messages still appears in the original language
500
501 PHONY += $(vmlinux-dirs)
502-$(vmlinux-dirs): prepare scripts
503+$(vmlinux-dirs): gcc-plugins prepare scripts
504 $(Q)$(MAKE) $(build)=$@
505
506 define filechk_kernel.release
507@@ -978,10 +1048,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
508
509 archprepare: archheaders archscripts prepare1 scripts_basic
510
511+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
512+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
513 prepare0: archprepare FORCE
514 $(Q)$(MAKE) $(build)=.
515
516 # All the preparing..
517+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
518 prepare: prepare0
519
520 # Generate some files
521@@ -1095,6 +1168,8 @@ all: modules
522 # using awk while concatenating to the final file.
523
524 PHONY += modules
525+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
526+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
527 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
528 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
529 @$(kecho) ' Building modules, stage 2.';
530@@ -1110,7 +1185,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
531
532 # Target to prepare building external modules
533 PHONY += modules_prepare
534-modules_prepare: prepare scripts
535+modules_prepare: gcc-plugins prepare scripts
536
537 # Target to install modules
538 PHONY += modules_install
539@@ -1176,7 +1251,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
540 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
541 signing_key.priv signing_key.x509 x509.genkey \
542 extra_certificates signing_key.x509.keyid \
543- signing_key.x509.signer
544+ signing_key.x509.signer \
545+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
546+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
547+ tools/gcc/randomize_layout_seed.h
548
549 # clean - Delete most, but leave enough to build external modules
550 #
551@@ -1215,7 +1293,7 @@ distclean: mrproper
552 @find $(srctree) $(RCS_FIND_IGNORE) \
553 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
554 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
555- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
556+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
557 -type f -print | xargs rm -f
558
559
560@@ -1381,6 +1459,8 @@ PHONY += $(module-dirs) modules
561 $(module-dirs): crmodverdir $(objtree)/Module.symvers
562 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
563
564+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
565+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
566 modules: $(module-dirs)
567 @$(kecho) ' Building modules, stage 2.';
568 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
569@@ -1521,17 +1601,21 @@ else
570 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
571 endif
572
573-%.s: %.c prepare scripts FORCE
574+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
575+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
576+%.s: %.c gcc-plugins prepare scripts FORCE
577 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
578 %.i: %.c prepare scripts FORCE
579 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
580-%.o: %.c prepare scripts FORCE
581+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
582+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
583+%.o: %.c gcc-plugins prepare scripts FORCE
584 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
585 %.lst: %.c prepare scripts FORCE
586 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
587-%.s: %.S prepare scripts FORCE
588+%.s: %.S gcc-plugins prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.o: %.S prepare scripts FORCE
591+%.o: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593 %.symtypes: %.c prepare scripts FORCE
594 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
595@@ -1543,11 +1627,15 @@ endif
596 $(build)=$(build-dir)
597 # Make sure the latest headers are built for Documentation
598 Documentation/: headers_install
599-%/: prepare scripts FORCE
600+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
601+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
602+%/: gcc-plugins prepare scripts FORCE
603 $(cmd_crmodverdir)
604 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
605 $(build)=$(build-dir)
606-%.ko: prepare scripts FORCE
607+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
608+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
609+%.ko: gcc-plugins prepare scripts FORCE
610 $(cmd_crmodverdir)
611 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
612 $(build)=$(build-dir) $(@:.ko=.o)
613diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
614index 8f8eafb..3405f46 100644
615--- a/arch/alpha/include/asm/atomic.h
616+++ b/arch/alpha/include/asm/atomic.h
617@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
618 #define atomic_dec(v) atomic_sub(1,(v))
619 #define atomic64_dec(v) atomic64_sub(1,(v))
620
621+#define atomic64_read_unchecked(v) atomic64_read(v)
622+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
623+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
624+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
625+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
626+#define atomic64_inc_unchecked(v) atomic64_inc(v)
627+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
628+#define atomic64_dec_unchecked(v) atomic64_dec(v)
629+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
630+
631 #endif /* _ALPHA_ATOMIC_H */
632diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
633index ad368a9..fbe0f25 100644
634--- a/arch/alpha/include/asm/cache.h
635+++ b/arch/alpha/include/asm/cache.h
636@@ -4,19 +4,19 @@
637 #ifndef __ARCH_ALPHA_CACHE_H
638 #define __ARCH_ALPHA_CACHE_H
639
640+#include <linux/const.h>
641
642 /* Bytes per L1 (data) cache line. */
643 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
644-# define L1_CACHE_BYTES 64
645 # define L1_CACHE_SHIFT 6
646 #else
647 /* Both EV4 and EV5 are write-through, read-allocate,
648 direct-mapped, physical.
649 */
650-# define L1_CACHE_BYTES 32
651 # define L1_CACHE_SHIFT 5
652 #endif
653
654+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
655 #define SMP_CACHE_BYTES L1_CACHE_BYTES
656
657 #endif
658diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
659index 968d999..d36b2df 100644
660--- a/arch/alpha/include/asm/elf.h
661+++ b/arch/alpha/include/asm/elf.h
662@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
663
664 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
665
666+#ifdef CONFIG_PAX_ASLR
667+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
668+
669+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
670+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
671+#endif
672+
673 /* $0 is set by ld.so to a pointer to a function which might be
674 registered using atexit. This provides a mean for the dynamic
675 linker to call DT_FINI functions for shared libraries that have
676diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
677index aab14a0..b4fa3e7 100644
678--- a/arch/alpha/include/asm/pgalloc.h
679+++ b/arch/alpha/include/asm/pgalloc.h
680@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
681 pgd_set(pgd, pmd);
682 }
683
684+static inline void
685+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
686+{
687+ pgd_populate(mm, pgd, pmd);
688+}
689+
690 extern pgd_t *pgd_alloc(struct mm_struct *mm);
691
692 static inline void
693diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
694index d8f9b7e..f6222fa 100644
695--- a/arch/alpha/include/asm/pgtable.h
696+++ b/arch/alpha/include/asm/pgtable.h
697@@ -102,6 +102,17 @@ struct vm_area_struct;
698 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
699 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
700 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
704+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
705+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
706+#else
707+# define PAGE_SHARED_NOEXEC PAGE_SHARED
708+# define PAGE_COPY_NOEXEC PAGE_COPY
709+# define PAGE_READONLY_NOEXEC PAGE_READONLY
710+#endif
711+
712 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
713
714 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
715diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
716index 2fd00b7..cfd5069 100644
717--- a/arch/alpha/kernel/module.c
718+++ b/arch/alpha/kernel/module.c
719@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
720
721 /* The small sections were sorted to the end of the segment.
722 The following should definitely cover them. */
723- gp = (u64)me->module_core + me->core_size - 0x8000;
724+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
725 got = sechdrs[me->arch.gotsecindex].sh_addr;
726
727 for (i = 0; i < n; i++) {
728diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
729index e51f578..16c64a3 100644
730--- a/arch/alpha/kernel/osf_sys.c
731+++ b/arch/alpha/kernel/osf_sys.c
732@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
733 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
734
735 static unsigned long
736-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
737- unsigned long limit)
738+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
739+ unsigned long limit, unsigned long flags)
740 {
741 struct vm_unmapped_area_info info;
742+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
743
744 info.flags = 0;
745 info.length = len;
746@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
747 info.high_limit = limit;
748 info.align_mask = 0;
749 info.align_offset = 0;
750+ info.threadstack_offset = offset;
751 return vm_unmapped_area(&info);
752 }
753
754@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
755 merely specific addresses, but regions of memory -- perhaps
756 this feature should be incorporated into all ports? */
757
758+#ifdef CONFIG_PAX_RANDMMAP
759+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
760+#endif
761+
762 if (addr) {
763- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
764+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
765 if (addr != (unsigned long) -ENOMEM)
766 return addr;
767 }
768
769 /* Next, try allocating at TASK_UNMAPPED_BASE. */
770- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
771- len, limit);
772+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
773+
774 if (addr != (unsigned long) -ENOMEM)
775 return addr;
776
777 /* Finally, try allocating in low memory. */
778- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
779+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
780
781 return addr;
782 }
783diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
784index 9d0ac09..479a962 100644
785--- a/arch/alpha/mm/fault.c
786+++ b/arch/alpha/mm/fault.c
787@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
788 __reload_thread(pcb);
789 }
790
791+#ifdef CONFIG_PAX_PAGEEXEC
792+/*
793+ * PaX: decide what to do with offenders (regs->pc = fault address)
794+ *
795+ * returns 1 when task should be killed
796+ * 2 when patched PLT trampoline was detected
797+ * 3 when unpatched PLT trampoline was detected
798+ */
799+static int pax_handle_fetch_fault(struct pt_regs *regs)
800+{
801+
802+#ifdef CONFIG_PAX_EMUPLT
803+ int err;
804+
805+ do { /* PaX: patched PLT emulation #1 */
806+ unsigned int ldah, ldq, jmp;
807+
808+ err = get_user(ldah, (unsigned int *)regs->pc);
809+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
810+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
811+
812+ if (err)
813+ break;
814+
815+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
816+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
817+ jmp == 0x6BFB0000U)
818+ {
819+ unsigned long r27, addr;
820+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
821+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
822+
823+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
824+ err = get_user(r27, (unsigned long *)addr);
825+ if (err)
826+ break;
827+
828+ regs->r27 = r27;
829+ regs->pc = r27;
830+ return 2;
831+ }
832+ } while (0);
833+
834+ do { /* PaX: patched PLT emulation #2 */
835+ unsigned int ldah, lda, br;
836+
837+ err = get_user(ldah, (unsigned int *)regs->pc);
838+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
839+ err |= get_user(br, (unsigned int *)(regs->pc+8));
840+
841+ if (err)
842+ break;
843+
844+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
845+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
846+ (br & 0xFFE00000U) == 0xC3E00000U)
847+ {
848+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
849+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
850+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
851+
852+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
853+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
854+ return 2;
855+ }
856+ } while (0);
857+
858+ do { /* PaX: unpatched PLT emulation */
859+ unsigned int br;
860+
861+ err = get_user(br, (unsigned int *)regs->pc);
862+
863+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
864+ unsigned int br2, ldq, nop, jmp;
865+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
866+
867+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
868+ err = get_user(br2, (unsigned int *)addr);
869+ err |= get_user(ldq, (unsigned int *)(addr+4));
870+ err |= get_user(nop, (unsigned int *)(addr+8));
871+ err |= get_user(jmp, (unsigned int *)(addr+12));
872+ err |= get_user(resolver, (unsigned long *)(addr+16));
873+
874+ if (err)
875+ break;
876+
877+ if (br2 == 0xC3600000U &&
878+ ldq == 0xA77B000CU &&
879+ nop == 0x47FF041FU &&
880+ jmp == 0x6B7B0000U)
881+ {
882+ regs->r28 = regs->pc+4;
883+ regs->r27 = addr+16;
884+ regs->pc = resolver;
885+ return 3;
886+ }
887+ }
888+ } while (0);
889+#endif
890+
891+ return 1;
892+}
893+
894+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
895+{
896+ unsigned long i;
897+
898+ printk(KERN_ERR "PAX: bytes at PC: ");
899+ for (i = 0; i < 5; i++) {
900+ unsigned int c;
901+ if (get_user(c, (unsigned int *)pc+i))
902+ printk(KERN_CONT "???????? ");
903+ else
904+ printk(KERN_CONT "%08x ", c);
905+ }
906+ printk("\n");
907+}
908+#endif
909
910 /*
911 * This routine handles page faults. It determines the address,
912@@ -133,8 +251,29 @@ retry:
913 good_area:
914 si_code = SEGV_ACCERR;
915 if (cause < 0) {
916- if (!(vma->vm_flags & VM_EXEC))
917+ if (!(vma->vm_flags & VM_EXEC)) {
918+
919+#ifdef CONFIG_PAX_PAGEEXEC
920+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
921+ goto bad_area;
922+
923+ up_read(&mm->mmap_sem);
924+ switch (pax_handle_fetch_fault(regs)) {
925+
926+#ifdef CONFIG_PAX_EMUPLT
927+ case 2:
928+ case 3:
929+ return;
930+#endif
931+
932+ }
933+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
934+ do_group_exit(SIGKILL);
935+#else
936 goto bad_area;
937+#endif
938+
939+ }
940 } else if (!cause) {
941 /* Allow reads even for write-only mappings */
942 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
943diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
944index 97d07ed..2931f2b 100644
945--- a/arch/arm/Kconfig
946+++ b/arch/arm/Kconfig
947@@ -1727,7 +1727,7 @@ config ALIGNMENT_TRAP
948
949 config UACCESS_WITH_MEMCPY
950 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
951- depends on MMU
952+ depends on MMU && !PAX_MEMORY_UDEREF
953 default y if CPU_FEROCEON
954 help
955 Implement faster copy_to_user and clear_user methods for CPU
956@@ -1991,6 +1991,7 @@ config XIP_PHYS_ADDR
957 config KEXEC
958 bool "Kexec system call (EXPERIMENTAL)"
959 depends on (!SMP || PM_SLEEP_SMP)
960+ depends on !GRKERNSEC_KMEM
961 help
962 kexec is a system call that implements the ability to shutdown your
963 current kernel, and to start another kernel. It is like a reboot
964diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
965index e22c119..eaa807d 100644
966--- a/arch/arm/include/asm/atomic.h
967+++ b/arch/arm/include/asm/atomic.h
968@@ -18,17 +18,41 @@
969 #include <asm/barrier.h>
970 #include <asm/cmpxchg.h>
971
972+#ifdef CONFIG_GENERIC_ATOMIC64
973+#include <asm-generic/atomic64.h>
974+#endif
975+
976 #define ATOMIC_INIT(i) { (i) }
977
978 #ifdef __KERNEL__
979
980+#ifdef CONFIG_THUMB2_KERNEL
981+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
982+#else
983+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
984+#endif
985+
986+#define _ASM_EXTABLE(from, to) \
987+" .pushsection __ex_table,\"a\"\n"\
988+" .align 3\n" \
989+" .long " #from ", " #to"\n" \
990+" .popsection"
991+
992 /*
993 * On ARM, ordinary assignment (str instruction) doesn't clear the local
994 * strex/ldrex monitor on some implementations. The reason we can use it for
995 * atomic_set() is the clrex or dummy strex done on every exception return.
996 */
997 #define atomic_read(v) ACCESS_ONCE((v)->counter)
998+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
999+{
1000+ return ACCESS_ONCE(v->counter);
1001+}
1002 #define atomic_set(v,i) (((v)->counter) = (i))
1003+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1004+{
1005+ v->counter = i;
1006+}
1007
1008 #if __LINUX_ARM_ARCH__ >= 6
1009
1010@@ -38,26 +62,50 @@
1011 * to ensure that the update happens.
1012 */
1013
1014-#define ATOMIC_OP(op, c_op, asm_op) \
1015-static inline void atomic_##op(int i, atomic_t *v) \
1016+#ifdef CONFIG_PAX_REFCOUNT
1017+#define __OVERFLOW_POST \
1018+ " bvc 3f\n" \
1019+ "2: " REFCOUNT_TRAP_INSN "\n"\
1020+ "3:\n"
1021+#define __OVERFLOW_POST_RETURN \
1022+ " bvc 3f\n" \
1023+" mov %0, %1\n" \
1024+ "2: " REFCOUNT_TRAP_INSN "\n"\
1025+ "3:\n"
1026+#define __OVERFLOW_EXTABLE \
1027+ "4:\n" \
1028+ _ASM_EXTABLE(2b, 4b)
1029+#else
1030+#define __OVERFLOW_POST
1031+#define __OVERFLOW_POST_RETURN
1032+#define __OVERFLOW_EXTABLE
1033+#endif
1034+
1035+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1036+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1037 { \
1038 unsigned long tmp; \
1039 int result; \
1040 \
1041 prefetchw(&v->counter); \
1042- __asm__ __volatile__("@ atomic_" #op "\n" \
1043+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1044 "1: ldrex %0, [%3]\n" \
1045 " " #asm_op " %0, %0, %4\n" \
1046+ post_op \
1047 " strex %1, %0, [%3]\n" \
1048 " teq %1, #0\n" \
1049-" bne 1b" \
1050+" bne 1b\n" \
1051+ extable \
1052 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1053 : "r" (&v->counter), "Ir" (i) \
1054 : "cc"); \
1055 } \
1056
1057-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1058-static inline int atomic_##op##_return(int i, atomic_t *v) \
1059+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op, , )\
1060+ __ATOMIC_OP(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1061+
1062+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1063+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1064 { \
1065 unsigned long tmp; \
1066 int result; \
1067@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1068 smp_mb(); \
1069 prefetchw(&v->counter); \
1070 \
1071- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1072+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1073 "1: ldrex %0, [%3]\n" \
1074 " " #asm_op " %0, %0, %4\n" \
1075+ post_op \
1076 " strex %1, %0, [%3]\n" \
1077 " teq %1, #0\n" \
1078-" bne 1b" \
1079+" bne 1b\n" \
1080+ extable \
1081 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1082 : "r" (&v->counter), "Ir" (i) \
1083 : "cc"); \
1084@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1085 return result; \
1086 }
1087
1088+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op, , )\
1089+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1090+
1091 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1092 {
1093 int oldval;
1094@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1095 __asm__ __volatile__ ("@ atomic_add_unless\n"
1096 "1: ldrex %0, [%4]\n"
1097 " teq %0, %5\n"
1098-" beq 2f\n"
1099-" add %1, %0, %6\n"
1100+" beq 4f\n"
1101+" adds %1, %0, %6\n"
1102+
1103+#ifdef CONFIG_PAX_REFCOUNT
1104+" bvc 3f\n"
1105+"2: " REFCOUNT_TRAP_INSN "\n"
1106+"3:\n"
1107+#endif
1108+
1109 " strex %2, %1, [%4]\n"
1110 " teq %2, #0\n"
1111 " bne 1b\n"
1112-"2:"
1113+"4:"
1114+
1115+#ifdef CONFIG_PAX_REFCOUNT
1116+ _ASM_EXTABLE(2b, 4b)
1117+#endif
1118+
1119 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1120 : "r" (&v->counter), "r" (u), "r" (a)
1121 : "cc");
1122@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1123 return oldval;
1124 }
1125
1126+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1127+{
1128+ unsigned long oldval, res;
1129+
1130+ smp_mb();
1131+
1132+ do {
1133+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1134+ "ldrex %1, [%3]\n"
1135+ "mov %0, #0\n"
1136+ "teq %1, %4\n"
1137+ "strexeq %0, %5, [%3]\n"
1138+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1139+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1140+ : "cc");
1141+ } while (res);
1142+
1143+ smp_mb();
1144+
1145+ return oldval;
1146+}
1147+
1148 #else /* ARM_ARCH_6 */
1149
1150 #ifdef CONFIG_SMP
1151 #error SMP not supported on pre-ARMv6 CPUs
1152 #endif
1153
1154-#define ATOMIC_OP(op, c_op, asm_op) \
1155-static inline void atomic_##op(int i, atomic_t *v) \
1156+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1157+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1158 { \
1159 unsigned long flags; \
1160 \
1161@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1162 raw_local_irq_restore(flags); \
1163 } \
1164
1165-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1166-static inline int atomic_##op##_return(int i, atomic_t *v) \
1167+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1168+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1169+
1170+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1171+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1172 { \
1173 unsigned long flags; \
1174 int val; \
1175@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1176 return val; \
1177 }
1178
1179+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1180+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1181+
1182 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1183 {
1184 int ret;
1185@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 return ret;
1187 }
1188
1189+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1190+{
1191+ return atomic_cmpxchg((atomic_t *)v, old, new);
1192+}
1193+
1194 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1195 {
1196 int c, old;
1197@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1198
1199 #undef ATOMIC_OPS
1200 #undef ATOMIC_OP_RETURN
1201+#undef __ATOMIC_OP_RETURN
1202 #undef ATOMIC_OP
1203+#undef __ATOMIC_OP
1204
1205 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1206+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1207+{
1208+ return xchg(&v->counter, new);
1209+}
1210
1211 #define atomic_inc(v) atomic_add(1, v)
1212+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1213+{
1214+ atomic_add_unchecked(1, v);
1215+}
1216 #define atomic_dec(v) atomic_sub(1, v)
1217+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1218+{
1219+ atomic_sub_unchecked(1, v);
1220+}
1221
1222 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1223+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1224+{
1225+ return atomic_add_return_unchecked(1, v) == 0;
1226+}
1227 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1228 #define atomic_inc_return(v) (atomic_add_return(1, v))
1229+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1230+{
1231+ return atomic_add_return_unchecked(1, v);
1232+}
1233 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1234 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1235
1236@@ -216,6 +336,14 @@ typedef struct {
1237 long long counter;
1238 } atomic64_t;
1239
1240+#ifdef CONFIG_PAX_REFCOUNT
1241+typedef struct {
1242+ long long counter;
1243+} atomic64_unchecked_t;
1244+#else
1245+typedef atomic64_t atomic64_unchecked_t;
1246+#endif
1247+
1248 #define ATOMIC64_INIT(i) { (i) }
1249
1250 #ifdef CONFIG_ARM_LPAE
1251@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1252 return result;
1253 }
1254
1255+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1256+{
1257+ long long result;
1258+
1259+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1260+" ldrd %0, %H0, [%1]"
1261+ : "=&r" (result)
1262+ : "r" (&v->counter), "Qo" (v->counter)
1263+ );
1264+
1265+ return result;
1266+}
1267+
1268 static inline void atomic64_set(atomic64_t *v, long long i)
1269 {
1270 __asm__ __volatile__("@ atomic64_set\n"
1271@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1272 : "r" (&v->counter), "r" (i)
1273 );
1274 }
1275+
1276+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1277+{
1278+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1279+" strd %2, %H2, [%1]"
1280+ : "=Qo" (v->counter)
1281+ : "r" (&v->counter), "r" (i)
1282+ );
1283+}
1284 #else
1285 static inline long long atomic64_read(const atomic64_t *v)
1286 {
1287@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1288 return result;
1289 }
1290
1291+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1292+{
1293+ long long result;
1294+
1295+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1296+" ldrexd %0, %H0, [%1]"
1297+ : "=&r" (result)
1298+ : "r" (&v->counter), "Qo" (v->counter)
1299+ );
1300+
1301+ return result;
1302+}
1303+
1304 static inline void atomic64_set(atomic64_t *v, long long i)
1305 {
1306 long long tmp;
1307@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1308 : "r" (&v->counter), "r" (i)
1309 : "cc");
1310 }
1311+
1312+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1313+{
1314+ long long tmp;
1315+
1316+ prefetchw(&v->counter);
1317+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1318+"1: ldrexd %0, %H0, [%2]\n"
1319+" strexd %0, %3, %H3, [%2]\n"
1320+" teq %0, #0\n"
1321+" bne 1b"
1322+ : "=&r" (tmp), "=Qo" (v->counter)
1323+ : "r" (&v->counter), "r" (i)
1324+ : "cc");
1325+}
1326 #endif
1327
1328-#define ATOMIC64_OP(op, op1, op2) \
1329-static inline void atomic64_##op(long long i, atomic64_t *v) \
1330+#undef __OVERFLOW_POST_RETURN
1331+#define __OVERFLOW_POST_RETURN \
1332+ " bvc 3f\n" \
1333+" mov %0, %1\n" \
1334+" mov %H0, %H1\n" \
1335+ "2: " REFCOUNT_TRAP_INSN "\n"\
1336+ "3:\n"
1337+
1338+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1339+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1340 { \
1341 long long result; \
1342 unsigned long tmp; \
1343 \
1344 prefetchw(&v->counter); \
1345- __asm__ __volatile__("@ atomic64_" #op "\n" \
1346+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1347 "1: ldrexd %0, %H0, [%3]\n" \
1348 " " #op1 " %Q0, %Q0, %Q4\n" \
1349 " " #op2 " %R0, %R0, %R4\n" \
1350+ post_op \
1351 " strexd %1, %0, %H0, [%3]\n" \
1352 " teq %1, #0\n" \
1353-" bne 1b" \
1354+" bne 1b\n" \
1355+ extable \
1356 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1357 : "r" (&v->counter), "r" (i) \
1358 : "cc"); \
1359 } \
1360
1361-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1362-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1363+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, , op1, op2, , ) \
1364+ __ATOMIC64_OP(op, _unchecked, op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1365+
1366+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1367+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1368 { \
1369 long long result; \
1370 unsigned long tmp; \
1371@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1372 smp_mb(); \
1373 prefetchw(&v->counter); \
1374 \
1375- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1376+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1377 "1: ldrexd %0, %H0, [%3]\n" \
1378 " " #op1 " %Q0, %Q0, %Q4\n" \
1379 " " #op2 " %R0, %R0, %R4\n" \
1380+ post_op \
1381 " strexd %1, %0, %H0, [%3]\n" \
1382 " teq %1, #0\n" \
1383-" bne 1b" \
1384+" bne 1b\n" \
1385+ extable \
1386 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1387 : "r" (&v->counter), "r" (i) \
1388 : "cc"); \
1389@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1390 return result; \
1391 }
1392
1393+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, , op1, op2, , ) \
1394+ __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1395+
1396 #define ATOMIC64_OPS(op, op1, op2) \
1397 ATOMIC64_OP(op, op1, op2) \
1398 ATOMIC64_OP_RETURN(op, op1, op2)
1399@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1400
1401 #undef ATOMIC64_OPS
1402 #undef ATOMIC64_OP_RETURN
1403+#undef __ATOMIC64_OP_RETURN
1404 #undef ATOMIC64_OP
1405+#undef __ATOMIC64_OP
1406+#undef __OVERFLOW_EXTABLE
1407+#undef __OVERFLOW_POST_RETURN
1408+#undef __OVERFLOW_POST
1409
1410 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1411 long long new)
1412@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1413 return oldval;
1414 }
1415
1416+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1417+ long long new)
1418+{
1419+ long long oldval;
1420+ unsigned long res;
1421+
1422+ smp_mb();
1423+
1424+ do {
1425+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1426+ "ldrexd %1, %H1, [%3]\n"
1427+ "mov %0, #0\n"
1428+ "teq %1, %4\n"
1429+ "teqeq %H1, %H4\n"
1430+ "strexdeq %0, %5, %H5, [%3]"
1431+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1432+ : "r" (&ptr->counter), "r" (old), "r" (new)
1433+ : "cc");
1434+ } while (res);
1435+
1436+ smp_mb();
1437+
1438+ return oldval;
1439+}
1440+
1441 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1442 {
1443 long long result;
1444@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1446 {
1447 long long result;
1448- unsigned long tmp;
1449+ u64 tmp;
1450
1451 smp_mb();
1452 prefetchw(&v->counter);
1453
1454 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1455-"1: ldrexd %0, %H0, [%3]\n"
1456-" subs %Q0, %Q0, #1\n"
1457-" sbc %R0, %R0, #0\n"
1458+"1: ldrexd %1, %H1, [%3]\n"
1459+" subs %Q0, %Q1, #1\n"
1460+" sbcs %R0, %R1, #0\n"
1461+
1462+#ifdef CONFIG_PAX_REFCOUNT
1463+" bvc 3f\n"
1464+" mov %Q0, %Q1\n"
1465+" mov %R0, %R1\n"
1466+"2: " REFCOUNT_TRAP_INSN "\n"
1467+"3:\n"
1468+#endif
1469+
1470 " teq %R0, #0\n"
1471-" bmi 2f\n"
1472+" bmi 4f\n"
1473 " strexd %1, %0, %H0, [%3]\n"
1474 " teq %1, #0\n"
1475 " bne 1b\n"
1476-"2:"
1477+"4:\n"
1478+
1479+#ifdef CONFIG_PAX_REFCOUNT
1480+ _ASM_EXTABLE(2b, 4b)
1481+#endif
1482+
1483 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1484 : "r" (&v->counter)
1485 : "cc");
1486@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1487 " teq %0, %5\n"
1488 " teqeq %H0, %H5\n"
1489 " moveq %1, #0\n"
1490-" beq 2f\n"
1491+" beq 4f\n"
1492 " adds %Q0, %Q0, %Q6\n"
1493-" adc %R0, %R0, %R6\n"
1494+" adcs %R0, %R0, %R6\n"
1495+
1496+#ifdef CONFIG_PAX_REFCOUNT
1497+" bvc 3f\n"
1498+"2: " REFCOUNT_TRAP_INSN "\n"
1499+"3:\n"
1500+#endif
1501+
1502 " strexd %2, %0, %H0, [%4]\n"
1503 " teq %2, #0\n"
1504 " bne 1b\n"
1505-"2:"
1506+"4:\n"
1507+
1508+#ifdef CONFIG_PAX_REFCOUNT
1509+ _ASM_EXTABLE(2b, 4b)
1510+#endif
1511+
1512 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1513 : "r" (&v->counter), "r" (u), "r" (a)
1514 : "cc");
1515@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1516
1517 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1518 #define atomic64_inc(v) atomic64_add(1LL, (v))
1519+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1520 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1521+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1522 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1523 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1524 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1525+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1526 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1527 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1528 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1529diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1530index d2f81e6..3c4dba5 100644
1531--- a/arch/arm/include/asm/barrier.h
1532+++ b/arch/arm/include/asm/barrier.h
1533@@ -67,7 +67,7 @@
1534 do { \
1535 compiletime_assert_atomic_type(*p); \
1536 smp_mb(); \
1537- ACCESS_ONCE(*p) = (v); \
1538+ ACCESS_ONCE_RW(*p) = (v); \
1539 } while (0)
1540
1541 #define smp_load_acquire(p) \
1542diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1543index 75fe66b..ba3dee4 100644
1544--- a/arch/arm/include/asm/cache.h
1545+++ b/arch/arm/include/asm/cache.h
1546@@ -4,8 +4,10 @@
1547 #ifndef __ASMARM_CACHE_H
1548 #define __ASMARM_CACHE_H
1549
1550+#include <linux/const.h>
1551+
1552 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1553-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1554+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1555
1556 /*
1557 * Memory returned by kmalloc() may be used for DMA, so we must make
1558@@ -24,5 +26,6 @@
1559 #endif
1560
1561 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1562+#define __read_only __attribute__ ((__section__(".data..read_only")))
1563
1564 #endif
1565diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1566index 2d46862..a35415b 100644
1567--- a/arch/arm/include/asm/cacheflush.h
1568+++ b/arch/arm/include/asm/cacheflush.h
1569@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1570 void (*dma_unmap_area)(const void *, size_t, int);
1571
1572 void (*dma_flush_range)(const void *, const void *);
1573-};
1574+} __no_const;
1575
1576 /*
1577 * Select the calling method
1578diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1579index 5233151..87a71fa 100644
1580--- a/arch/arm/include/asm/checksum.h
1581+++ b/arch/arm/include/asm/checksum.h
1582@@ -37,7 +37,19 @@ __wsum
1583 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1584
1585 __wsum
1586-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1587+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1588+
1589+static inline __wsum
1590+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1591+{
1592+ __wsum ret;
1593+ pax_open_userland();
1594+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1595+ pax_close_userland();
1596+ return ret;
1597+}
1598+
1599+
1600
1601 /*
1602 * Fold a partial checksum without adding pseudo headers
1603diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1604index abb2c37..96db950 100644
1605--- a/arch/arm/include/asm/cmpxchg.h
1606+++ b/arch/arm/include/asm/cmpxchg.h
1607@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1608
1609 #define xchg(ptr,x) \
1610 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1611+#define xchg_unchecked(ptr,x) \
1612+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1613
1614 #include <asm-generic/cmpxchg-local.h>
1615
1616diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1617index 6ddbe44..b5e38b1a 100644
1618--- a/arch/arm/include/asm/domain.h
1619+++ b/arch/arm/include/asm/domain.h
1620@@ -48,18 +48,37 @@
1621 * Domain types
1622 */
1623 #define DOMAIN_NOACCESS 0
1624-#define DOMAIN_CLIENT 1
1625 #ifdef CONFIG_CPU_USE_DOMAINS
1626+#define DOMAIN_USERCLIENT 1
1627+#define DOMAIN_KERNELCLIENT 1
1628 #define DOMAIN_MANAGER 3
1629+#define DOMAIN_VECTORS DOMAIN_USER
1630 #else
1631+
1632+#ifdef CONFIG_PAX_KERNEXEC
1633 #define DOMAIN_MANAGER 1
1634+#define DOMAIN_KERNEXEC 3
1635+#else
1636+#define DOMAIN_MANAGER 1
1637+#endif
1638+
1639+#ifdef CONFIG_PAX_MEMORY_UDEREF
1640+#define DOMAIN_USERCLIENT 0
1641+#define DOMAIN_UDEREF 1
1642+#define DOMAIN_VECTORS DOMAIN_KERNEL
1643+#else
1644+#define DOMAIN_USERCLIENT 1
1645+#define DOMAIN_VECTORS DOMAIN_USER
1646+#endif
1647+#define DOMAIN_KERNELCLIENT 1
1648+
1649 #endif
1650
1651 #define domain_val(dom,type) ((type) << (2*(dom)))
1652
1653 #ifndef __ASSEMBLY__
1654
1655-#ifdef CONFIG_CPU_USE_DOMAINS
1656+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1657 static inline void set_domain(unsigned val)
1658 {
1659 asm volatile(
1660@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1661 isb();
1662 }
1663
1664-#define modify_domain(dom,type) \
1665- do { \
1666- struct thread_info *thread = current_thread_info(); \
1667- unsigned int domain = thread->cpu_domain; \
1668- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1669- thread->cpu_domain = domain | domain_val(dom, type); \
1670- set_domain(thread->cpu_domain); \
1671- } while (0)
1672-
1673+extern void modify_domain(unsigned int dom, unsigned int type);
1674 #else
1675 static inline void set_domain(unsigned val) { }
1676 static inline void modify_domain(unsigned dom, unsigned type) { }
1677diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1678index afb9caf..9a0bac0 100644
1679--- a/arch/arm/include/asm/elf.h
1680+++ b/arch/arm/include/asm/elf.h
1681@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1682 the loader. We need to make sure that it is out of the way of the program
1683 that it will "exec", and that there is sufficient room for the brk. */
1684
1685-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1686+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1687+
1688+#ifdef CONFIG_PAX_ASLR
1689+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1690+
1691+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1692+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1693+#endif
1694
1695 /* When the program starts, a1 contains a pointer to a function to be
1696 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1697@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1698 extern void elf_set_personality(const struct elf32_hdr *);
1699 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1700
1701-struct mm_struct;
1702-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1703-#define arch_randomize_brk arch_randomize_brk
1704-
1705 #ifdef CONFIG_MMU
1706 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1707 struct linux_binprm;
1708diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1709index de53547..52b9a28 100644
1710--- a/arch/arm/include/asm/fncpy.h
1711+++ b/arch/arm/include/asm/fncpy.h
1712@@ -81,7 +81,9 @@
1713 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1714 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1715 \
1716+ pax_open_kernel(); \
1717 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1718+ pax_close_kernel(); \
1719 flush_icache_range((unsigned long)(dest_buf), \
1720 (unsigned long)(dest_buf) + (size)); \
1721 \
1722diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1723index 53e69da..3fdc896 100644
1724--- a/arch/arm/include/asm/futex.h
1725+++ b/arch/arm/include/asm/futex.h
1726@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1727 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1728 return -EFAULT;
1729
1730+ pax_open_userland();
1731+
1732 smp_mb();
1733 /* Prefetching cannot fault */
1734 prefetchw(uaddr);
1735@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1736 : "cc", "memory");
1737 smp_mb();
1738
1739+ pax_close_userland();
1740+
1741 *uval = val;
1742 return ret;
1743 }
1744@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1745 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1746 return -EFAULT;
1747
1748+ pax_open_userland();
1749+
1750 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1751 "1: " TUSER(ldr) " %1, [%4]\n"
1752 " teq %1, %2\n"
1753@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1754 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1755 : "cc", "memory");
1756
1757+ pax_close_userland();
1758+
1759 *uval = val;
1760 return ret;
1761 }
1762@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1763 return -EFAULT;
1764
1765 pagefault_disable(); /* implies preempt_disable() */
1766+ pax_open_userland();
1767
1768 switch (op) {
1769 case FUTEX_OP_SET:
1770@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1771 ret = -ENOSYS;
1772 }
1773
1774+ pax_close_userland();
1775 pagefault_enable(); /* subsumes preempt_enable() */
1776
1777 if (!ret) {
1778diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1779index 83eb2f7..ed77159 100644
1780--- a/arch/arm/include/asm/kmap_types.h
1781+++ b/arch/arm/include/asm/kmap_types.h
1782@@ -4,6 +4,6 @@
1783 /*
1784 * This is the "bare minimum". AIO seems to require this.
1785 */
1786-#define KM_TYPE_NR 16
1787+#define KM_TYPE_NR 17
1788
1789 #endif
1790diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1791index 9e614a1..3302cca 100644
1792--- a/arch/arm/include/asm/mach/dma.h
1793+++ b/arch/arm/include/asm/mach/dma.h
1794@@ -22,7 +22,7 @@ struct dma_ops {
1795 int (*residue)(unsigned int, dma_t *); /* optional */
1796 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1797 const char *type;
1798-};
1799+} __do_const;
1800
1801 struct dma_struct {
1802 void *addr; /* single DMA address */
1803diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1804index f98c7f3..e5c626d 100644
1805--- a/arch/arm/include/asm/mach/map.h
1806+++ b/arch/arm/include/asm/mach/map.h
1807@@ -23,17 +23,19 @@ struct map_desc {
1808
1809 /* types 0-3 are defined in asm/io.h */
1810 enum {
1811- MT_UNCACHED = 4,
1812- MT_CACHECLEAN,
1813- MT_MINICLEAN,
1814+ MT_UNCACHED_RW = 4,
1815+ MT_CACHECLEAN_RO,
1816+ MT_MINICLEAN_RO,
1817 MT_LOW_VECTORS,
1818 MT_HIGH_VECTORS,
1819- MT_MEMORY_RWX,
1820+ __MT_MEMORY_RWX,
1821 MT_MEMORY_RW,
1822- MT_ROM,
1823- MT_MEMORY_RWX_NONCACHED,
1824+ MT_MEMORY_RX,
1825+ MT_ROM_RX,
1826+ MT_MEMORY_RW_NONCACHED,
1827+ MT_MEMORY_RX_NONCACHED,
1828 MT_MEMORY_RW_DTCM,
1829- MT_MEMORY_RWX_ITCM,
1830+ MT_MEMORY_RX_ITCM,
1831 MT_MEMORY_RW_SO,
1832 MT_MEMORY_DMA_READY,
1833 };
1834diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1835index 891a56b..48f337e 100644
1836--- a/arch/arm/include/asm/outercache.h
1837+++ b/arch/arm/include/asm/outercache.h
1838@@ -36,7 +36,7 @@ struct outer_cache_fns {
1839
1840 /* This is an ARM L2C thing */
1841 void (*write_sec)(unsigned long, unsigned);
1842-};
1843+} __no_const;
1844
1845 extern struct outer_cache_fns outer_cache;
1846
1847diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1848index 4355f0e..cd9168e 100644
1849--- a/arch/arm/include/asm/page.h
1850+++ b/arch/arm/include/asm/page.h
1851@@ -23,6 +23,7 @@
1852
1853 #else
1854
1855+#include <linux/compiler.h>
1856 #include <asm/glue.h>
1857
1858 /*
1859@@ -114,7 +115,7 @@ struct cpu_user_fns {
1860 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1861 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1862 unsigned long vaddr, struct vm_area_struct *vma);
1863-};
1864+} __no_const;
1865
1866 #ifdef MULTI_USER
1867 extern struct cpu_user_fns cpu_user;
1868diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1869index 19cfab5..3f5c7e9 100644
1870--- a/arch/arm/include/asm/pgalloc.h
1871+++ b/arch/arm/include/asm/pgalloc.h
1872@@ -17,6 +17,7 @@
1873 #include <asm/processor.h>
1874 #include <asm/cacheflush.h>
1875 #include <asm/tlbflush.h>
1876+#include <asm/system_info.h>
1877
1878 #define check_pgt_cache() do { } while (0)
1879
1880@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1881 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1882 }
1883
1884+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1885+{
1886+ pud_populate(mm, pud, pmd);
1887+}
1888+
1889 #else /* !CONFIG_ARM_LPAE */
1890
1891 /*
1892@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1893 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1894 #define pmd_free(mm, pmd) do { } while (0)
1895 #define pud_populate(mm,pmd,pte) BUG()
1896+#define pud_populate_kernel(mm,pmd,pte) BUG()
1897
1898 #endif /* CONFIG_ARM_LPAE */
1899
1900@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1901 __free_page(pte);
1902 }
1903
1904+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1905+{
1906+#ifdef CONFIG_ARM_LPAE
1907+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1908+#else
1909+ if (addr & SECTION_SIZE)
1910+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1911+ else
1912+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1913+#endif
1914+ flush_pmd_entry(pmdp);
1915+}
1916+
1917 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1918 pmdval_t prot)
1919 {
1920diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1921index 5e68278..1869bae 100644
1922--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1923+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924@@ -27,7 +27,7 @@
1925 /*
1926 * - section
1927 */
1928-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1929+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1930 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1931 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1932 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1933@@ -39,6 +39,7 @@
1934 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1935 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1936 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1937+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1938
1939 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1940 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1941@@ -68,6 +69,7 @@
1942 * - extended small page/tiny page
1943 */
1944 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1945+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1946 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1947 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1948 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1949diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1950index f027941..f36ce30 100644
1951--- a/arch/arm/include/asm/pgtable-2level.h
1952+++ b/arch/arm/include/asm/pgtable-2level.h
1953@@ -126,6 +126,9 @@
1954 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1955 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1956
1957+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1958+#define L_PTE_PXN (_AT(pteval_t, 0))
1959+
1960 /*
1961 * These are the memory types, defined to be compatible with
1962 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1963diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1964index a31ecdad..95e98d4 100644
1965--- a/arch/arm/include/asm/pgtable-3level.h
1966+++ b/arch/arm/include/asm/pgtable-3level.h
1967@@ -81,6 +81,7 @@
1968 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1969 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1970 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1971+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1972 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1973 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1974 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1975@@ -92,10 +93,12 @@
1976 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1977 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1978 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1979+#define PMD_SECT_RDONLY PMD_SECT_AP2
1980
1981 /*
1982 * To be used in assembly code with the upper page attributes.
1983 */
1984+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1985 #define L_PTE_XN_HIGH (1 << (54 - 32))
1986 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1987
1988diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1989index d5cac54..906ea3e 100644
1990--- a/arch/arm/include/asm/pgtable.h
1991+++ b/arch/arm/include/asm/pgtable.h
1992@@ -33,6 +33,9 @@
1993 #include <asm/pgtable-2level.h>
1994 #endif
1995
1996+#define ktla_ktva(addr) (addr)
1997+#define ktva_ktla(addr) (addr)
1998+
1999 /*
2000 * Just any arbitrary offset to the start of the vmalloc VM area: the
2001 * current 8MB value just means that there will be a 8MB "hole" after the
2002@@ -48,6 +51,9 @@
2003 #define LIBRARY_TEXT_START 0x0c000000
2004
2005 #ifndef __ASSEMBLY__
2006+extern pteval_t __supported_pte_mask;
2007+extern pmdval_t __supported_pmd_mask;
2008+
2009 extern void __pte_error(const char *file, int line, pte_t);
2010 extern void __pmd_error(const char *file, int line, pmd_t);
2011 extern void __pgd_error(const char *file, int line, pgd_t);
2012@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2013 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2014 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2015
2016+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2017+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2018+
2019+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2020+#include <asm/domain.h>
2021+#include <linux/thread_info.h>
2022+#include <linux/preempt.h>
2023+
2024+static inline int test_domain(int domain, int domaintype)
2025+{
2026+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2027+}
2028+#endif
2029+
2030+#ifdef CONFIG_PAX_KERNEXEC
2031+static inline unsigned long pax_open_kernel(void) {
2032+#ifdef CONFIG_ARM_LPAE
2033+ /* TODO */
2034+#else
2035+ preempt_disable();
2036+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2037+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2038+#endif
2039+ return 0;
2040+}
2041+
2042+static inline unsigned long pax_close_kernel(void) {
2043+#ifdef CONFIG_ARM_LPAE
2044+ /* TODO */
2045+#else
2046+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2047+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2048+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2049+ preempt_enable_no_resched();
2050+#endif
2051+ return 0;
2052+}
2053+#else
2054+static inline unsigned long pax_open_kernel(void) { return 0; }
2055+static inline unsigned long pax_close_kernel(void) { return 0; }
2056+#endif
2057+
2058 /*
2059 * This is the lowest virtual address we can permit any user space
2060 * mapping to be mapped at. This is particularly important for
2061@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2062 /*
2063 * The pgprot_* and protection_map entries will be fixed up in runtime
2064 * to include the cachable and bufferable bits based on memory policy,
2065- * as well as any architecture dependent bits like global/ASID and SMP
2066- * shared mapping bits.
2067+ * as well as any architecture dependent bits like global/ASID, PXN,
2068+ * and SMP shared mapping bits.
2069 */
2070 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2071
2072@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2073 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2074 {
2075 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2076- L_PTE_NONE | L_PTE_VALID;
2077+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2078 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2079 return pte;
2080 }
2081diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2082index c25ef3e..735f14b 100644
2083--- a/arch/arm/include/asm/psci.h
2084+++ b/arch/arm/include/asm/psci.h
2085@@ -32,7 +32,7 @@ struct psci_operations {
2086 int (*affinity_info)(unsigned long target_affinity,
2087 unsigned long lowest_affinity_level);
2088 int (*migrate_info_type)(void);
2089-};
2090+} __no_const;
2091
2092 extern struct psci_operations psci_ops;
2093 extern struct smp_operations psci_smp_ops;
2094diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2095index 18f5a55..5072a40 100644
2096--- a/arch/arm/include/asm/smp.h
2097+++ b/arch/arm/include/asm/smp.h
2098@@ -107,7 +107,7 @@ struct smp_operations {
2099 int (*cpu_disable)(unsigned int cpu);
2100 #endif
2101 #endif
2102-};
2103+} __no_const;
2104
2105 struct of_cpu_method {
2106 const char *method;
2107diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2108index d890e41..3921292 100644
2109--- a/arch/arm/include/asm/thread_info.h
2110+++ b/arch/arm/include/asm/thread_info.h
2111@@ -78,9 +78,9 @@ struct thread_info {
2112 .flags = 0, \
2113 .preempt_count = INIT_PREEMPT_COUNT, \
2114 .addr_limit = KERNEL_DS, \
2115- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2116- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2117- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2118+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2119+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2120+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2121 .restart_block = { \
2122 .fn = do_no_restart_syscall, \
2123 }, \
2124@@ -159,7 +159,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2125 #define TIF_SYSCALL_AUDIT 9
2126 #define TIF_SYSCALL_TRACEPOINT 10
2127 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2128-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2129+/* within 8 bits of TIF_SYSCALL_TRACE
2130+ * to meet flexible second operand requirements
2131+ */
2132+#define TIF_GRSEC_SETXID 12
2133+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2134 #define TIF_USING_IWMMXT 17
2135 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2136 #define TIF_RESTORE_SIGMASK 20
2137@@ -173,10 +177,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2138 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2139 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2140 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2141+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2142
2143 /* Checks for any syscall work in entry-common.S */
2144 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2145- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2146+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2147
2148 /*
2149 * Change these and you break ASM code in entry-common.S
2150diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2151index 5f833f7..76e6644 100644
2152--- a/arch/arm/include/asm/tls.h
2153+++ b/arch/arm/include/asm/tls.h
2154@@ -3,6 +3,7 @@
2155
2156 #include <linux/compiler.h>
2157 #include <asm/thread_info.h>
2158+#include <asm/pgtable.h>
2159
2160 #ifdef __ASSEMBLY__
2161 #include <asm/asm-offsets.h>
2162@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2163 * at 0xffff0fe0 must be used instead. (see
2164 * entry-armv.S for details)
2165 */
2166+ pax_open_kernel();
2167 *((unsigned int *)0xffff0ff0) = val;
2168+ pax_close_kernel();
2169 #endif
2170 }
2171
2172diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2173index 4767eb9..bf00668 100644
2174--- a/arch/arm/include/asm/uaccess.h
2175+++ b/arch/arm/include/asm/uaccess.h
2176@@ -18,6 +18,7 @@
2177 #include <asm/domain.h>
2178 #include <asm/unified.h>
2179 #include <asm/compiler.h>
2180+#include <asm/pgtable.h>
2181
2182 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2183 #include <asm-generic/uaccess-unaligned.h>
2184@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2185 static inline void set_fs(mm_segment_t fs)
2186 {
2187 current_thread_info()->addr_limit = fs;
2188- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2189+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2190 }
2191
2192 #define segment_eq(a,b) ((a) == (b))
2193
2194+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2195+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2196+
2197+static inline void pax_open_userland(void)
2198+{
2199+
2200+#ifdef CONFIG_PAX_MEMORY_UDEREF
2201+ if (segment_eq(get_fs(), USER_DS)) {
2202+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2203+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2204+ }
2205+#endif
2206+
2207+}
2208+
2209+static inline void pax_close_userland(void)
2210+{
2211+
2212+#ifdef CONFIG_PAX_MEMORY_UDEREF
2213+ if (segment_eq(get_fs(), USER_DS)) {
2214+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2215+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2216+ }
2217+#endif
2218+
2219+}
2220+
2221 #define __addr_ok(addr) ({ \
2222 unsigned long flag; \
2223 __asm__("cmp %2, %0; movlo %0, #0" \
2224@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2225
2226 #define get_user(x,p) \
2227 ({ \
2228+ int __e; \
2229 might_fault(); \
2230- __get_user_check(x,p); \
2231+ pax_open_userland(); \
2232+ __e = __get_user_check(x,p); \
2233+ pax_close_userland(); \
2234+ __e; \
2235 })
2236
2237 extern int __put_user_1(void *, unsigned int);
2238@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2239
2240 #define put_user(x,p) \
2241 ({ \
2242+ int __e; \
2243 might_fault(); \
2244- __put_user_check(x,p); \
2245+ pax_open_userland(); \
2246+ __e = __put_user_check(x,p); \
2247+ pax_close_userland(); \
2248+ __e; \
2249 })
2250
2251 #else /* CONFIG_MMU */
2252@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2253
2254 #endif /* CONFIG_MMU */
2255
2256+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2257 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2258
2259 #define user_addr_max() \
2260@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2261 #define __get_user(x,ptr) \
2262 ({ \
2263 long __gu_err = 0; \
2264+ pax_open_userland(); \
2265 __get_user_err((x),(ptr),__gu_err); \
2266+ pax_close_userland(); \
2267 __gu_err; \
2268 })
2269
2270 #define __get_user_error(x,ptr,err) \
2271 ({ \
2272+ pax_open_userland(); \
2273 __get_user_err((x),(ptr),err); \
2274+ pax_close_userland(); \
2275 (void) 0; \
2276 })
2277
2278@@ -368,13 +409,17 @@ do { \
2279 #define __put_user(x,ptr) \
2280 ({ \
2281 long __pu_err = 0; \
2282+ pax_open_userland(); \
2283 __put_user_err((x),(ptr),__pu_err); \
2284+ pax_close_userland(); \
2285 __pu_err; \
2286 })
2287
2288 #define __put_user_error(x,ptr,err) \
2289 ({ \
2290+ pax_open_userland(); \
2291 __put_user_err((x),(ptr),err); \
2292+ pax_close_userland(); \
2293 (void) 0; \
2294 })
2295
2296@@ -474,11 +519,44 @@ do { \
2297
2298
2299 #ifdef CONFIG_MMU
2300-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2301-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2302+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2303+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2304+
2305+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2306+{
2307+ unsigned long ret;
2308+
2309+ check_object_size(to, n, false);
2310+ pax_open_userland();
2311+ ret = ___copy_from_user(to, from, n);
2312+ pax_close_userland();
2313+ return ret;
2314+}
2315+
2316+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2317+{
2318+ unsigned long ret;
2319+
2320+ check_object_size(from, n, true);
2321+ pax_open_userland();
2322+ ret = ___copy_to_user(to, from, n);
2323+ pax_close_userland();
2324+ return ret;
2325+}
2326+
2327 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2328-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2329+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2330 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2331+
2332+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2333+{
2334+ unsigned long ret;
2335+ pax_open_userland();
2336+ ret = ___clear_user(addr, n);
2337+ pax_close_userland();
2338+ return ret;
2339+}
2340+
2341 #else
2342 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2343 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2344@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2345
2346 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2347 {
2348+ if ((long)n < 0)
2349+ return n;
2350+
2351 if (access_ok(VERIFY_READ, from, n))
2352 n = __copy_from_user(to, from, n);
2353 else /* security hole - plug it */
2354@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2355
2356 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2357 {
2358+ if ((long)n < 0)
2359+ return n;
2360+
2361 if (access_ok(VERIFY_WRITE, to, n))
2362 n = __copy_to_user(to, from, n);
2363 return n;
2364diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2365index 5af0ed1..cea83883 100644
2366--- a/arch/arm/include/uapi/asm/ptrace.h
2367+++ b/arch/arm/include/uapi/asm/ptrace.h
2368@@ -92,7 +92,7 @@
2369 * ARMv7 groups of PSR bits
2370 */
2371 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2372-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2373+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2374 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2375 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2376
2377diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2378index a88671c..1cc895e 100644
2379--- a/arch/arm/kernel/armksyms.c
2380+++ b/arch/arm/kernel/armksyms.c
2381@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2382
2383 /* networking */
2384 EXPORT_SYMBOL(csum_partial);
2385-EXPORT_SYMBOL(csum_partial_copy_from_user);
2386+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2387 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2388 EXPORT_SYMBOL(__csum_ipv6_magic);
2389
2390@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2391 #ifdef CONFIG_MMU
2392 EXPORT_SYMBOL(copy_page);
2393
2394-EXPORT_SYMBOL(__copy_from_user);
2395-EXPORT_SYMBOL(__copy_to_user);
2396-EXPORT_SYMBOL(__clear_user);
2397+EXPORT_SYMBOL(___copy_from_user);
2398+EXPORT_SYMBOL(___copy_to_user);
2399+EXPORT_SYMBOL(___clear_user);
2400
2401 EXPORT_SYMBOL(__get_user_1);
2402 EXPORT_SYMBOL(__get_user_2);
2403diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2404index 2f5555d..d493c91 100644
2405--- a/arch/arm/kernel/entry-armv.S
2406+++ b/arch/arm/kernel/entry-armv.S
2407@@ -47,6 +47,87 @@
2408 9997:
2409 .endm
2410
2411+ .macro pax_enter_kernel
2412+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2413+ @ make aligned space for saved DACR
2414+ sub sp, sp, #8
2415+ @ save regs
2416+ stmdb sp!, {r1, r2}
2417+ @ read DACR from cpu_domain into r1
2418+ mov r2, sp
2419+ @ assume 8K pages, since we have to split the immediate in two
2420+ bic r2, r2, #(0x1fc0)
2421+ bic r2, r2, #(0x3f)
2422+ ldr r1, [r2, #TI_CPU_DOMAIN]
2423+ @ store old DACR on stack
2424+ str r1, [sp, #8]
2425+#ifdef CONFIG_PAX_KERNEXEC
2426+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2427+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2428+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2429+#endif
2430+#ifdef CONFIG_PAX_MEMORY_UDEREF
2431+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2432+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2433+#endif
2434+ @ write r1 to current_thread_info()->cpu_domain
2435+ str r1, [r2, #TI_CPU_DOMAIN]
2436+ @ write r1 to DACR
2437+ mcr p15, 0, r1, c3, c0, 0
2438+ @ instruction sync
2439+ instr_sync
2440+ @ restore regs
2441+ ldmia sp!, {r1, r2}
2442+#endif
2443+ .endm
2444+
2445+ .macro pax_open_userland
2446+#ifdef CONFIG_PAX_MEMORY_UDEREF
2447+ @ save regs
2448+ stmdb sp!, {r0, r1}
2449+ @ read DACR from cpu_domain into r1
2450+ mov r0, sp
2451+ @ assume 8K pages, since we have to split the immediate in two
2452+ bic r0, r0, #(0x1fc0)
2453+ bic r0, r0, #(0x3f)
2454+ ldr r1, [r0, #TI_CPU_DOMAIN]
2455+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2456+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2457+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2458+ @ write r1 to current_thread_info()->cpu_domain
2459+ str r1, [r0, #TI_CPU_DOMAIN]
2460+ @ write r1 to DACR
2461+ mcr p15, 0, r1, c3, c0, 0
2462+ @ instruction sync
2463+ instr_sync
2464+ @ restore regs
2465+ ldmia sp!, {r0, r1}
2466+#endif
2467+ .endm
2468+
2469+ .macro pax_close_userland
2470+#ifdef CONFIG_PAX_MEMORY_UDEREF
2471+ @ save regs
2472+ stmdb sp!, {r0, r1}
2473+ @ read DACR from cpu_domain into r1
2474+ mov r0, sp
2475+ @ assume 8K pages, since we have to split the immediate in two
2476+ bic r0, r0, #(0x1fc0)
2477+ bic r0, r0, #(0x3f)
2478+ ldr r1, [r0, #TI_CPU_DOMAIN]
2479+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2480+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2481+ @ write r1 to current_thread_info()->cpu_domain
2482+ str r1, [r0, #TI_CPU_DOMAIN]
2483+ @ write r1 to DACR
2484+ mcr p15, 0, r1, c3, c0, 0
2485+ @ instruction sync
2486+ instr_sync
2487+ @ restore regs
2488+ ldmia sp!, {r0, r1}
2489+#endif
2490+ .endm
2491+
2492 .macro pabt_helper
2493 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2494 #ifdef MULTI_PABORT
2495@@ -89,11 +170,15 @@
2496 * Invalid mode handlers
2497 */
2498 .macro inv_entry, reason
2499+
2500+ pax_enter_kernel
2501+
2502 sub sp, sp, #S_FRAME_SIZE
2503 ARM( stmib sp, {r1 - lr} )
2504 THUMB( stmia sp, {r0 - r12} )
2505 THUMB( str sp, [sp, #S_SP] )
2506 THUMB( str lr, [sp, #S_LR] )
2507+
2508 mov r1, #\reason
2509 .endm
2510
2511@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2512 .macro svc_entry, stack_hole=0, trace=1
2513 UNWIND(.fnstart )
2514 UNWIND(.save {r0 - pc} )
2515+
2516+ pax_enter_kernel
2517+
2518 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2519+
2520 #ifdef CONFIG_THUMB2_KERNEL
2521 SPFIX( str r0, [sp] ) @ temporarily saved
2522 SPFIX( mov r0, sp )
2523@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2524 ldmia r0, {r3 - r5}
2525 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2526 mov r6, #-1 @ "" "" "" ""
2527+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2528+ @ offset sp by 8 as done in pax_enter_kernel
2529+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2530+#else
2531 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2532+#endif
2533 SPFIX( addeq r2, r2, #4 )
2534 str r3, [sp, #-4]! @ save the "real" r0 copied
2535 @ from the exception stack
2536@@ -368,6 +462,9 @@ ENDPROC(__fiq_abt)
2537 .macro usr_entry, trace=1
2538 UNWIND(.fnstart )
2539 UNWIND(.cantunwind ) @ don't unwind the user space
2540+
2541+ pax_enter_kernel_user
2542+
2543 sub sp, sp, #S_FRAME_SIZE
2544 ARM( stmib sp, {r1 - r12} )
2545 THUMB( stmia sp, {r0 - r12} )
2546@@ -478,7 +575,9 @@ __und_usr:
2547 tst r3, #PSR_T_BIT @ Thumb mode?
2548 bne __und_usr_thumb
2549 sub r4, r2, #4 @ ARM instr at LR - 4
2550+ pax_open_userland
2551 1: ldrt r0, [r4]
2552+ pax_close_userland
2553 ARM_BE8(rev r0, r0) @ little endian instruction
2554
2555 @ r0 = 32-bit ARM instruction which caused the exception
2556@@ -512,11 +611,15 @@ __und_usr_thumb:
2557 */
2558 .arch armv6t2
2559 #endif
2560+ pax_open_userland
2561 2: ldrht r5, [r4]
2562+ pax_close_userland
2563 ARM_BE8(rev16 r5, r5) @ little endian instruction
2564 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2565 blo __und_usr_fault_16 @ 16bit undefined instruction
2566+ pax_open_userland
2567 3: ldrht r0, [r2]
2568+ pax_close_userland
2569 ARM_BE8(rev16 r0, r0) @ little endian instruction
2570 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2571 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2572@@ -546,7 +649,8 @@ ENDPROC(__und_usr)
2573 */
2574 .pushsection .fixup, "ax"
2575 .align 2
2576-4: str r4, [sp, #S_PC] @ retry current instruction
2577+4: pax_close_userland
2578+ str r4, [sp, #S_PC] @ retry current instruction
2579 ret r9
2580 .popsection
2581 .pushsection __ex_table,"a"
2582@@ -766,7 +870,7 @@ ENTRY(__switch_to)
2583 THUMB( str lr, [ip], #4 )
2584 ldr r4, [r2, #TI_TP_VALUE]
2585 ldr r5, [r2, #TI_TP_VALUE + 4]
2586-#ifdef CONFIG_CPU_USE_DOMAINS
2587+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2588 ldr r6, [r2, #TI_CPU_DOMAIN]
2589 #endif
2590 switch_tls r1, r4, r5, r3, r7
2591@@ -775,7 +879,7 @@ ENTRY(__switch_to)
2592 ldr r8, =__stack_chk_guard
2593 ldr r7, [r7, #TSK_STACK_CANARY]
2594 #endif
2595-#ifdef CONFIG_CPU_USE_DOMAINS
2596+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2597 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2598 #endif
2599 mov r5, r0
2600diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2601index f8ccc21..83d192f 100644
2602--- a/arch/arm/kernel/entry-common.S
2603+++ b/arch/arm/kernel/entry-common.S
2604@@ -11,18 +11,46 @@
2605 #include <asm/assembler.h>
2606 #include <asm/unistd.h>
2607 #include <asm/ftrace.h>
2608+#include <asm/domain.h>
2609 #include <asm/unwind.h>
2610
2611+#include "entry-header.S"
2612+
2613 #ifdef CONFIG_NEED_RET_TO_USER
2614 #include <mach/entry-macro.S>
2615 #else
2616 .macro arch_ret_to_user, tmp1, tmp2
2617+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2618+ @ save regs
2619+ stmdb sp!, {r1, r2}
2620+ @ read DACR from cpu_domain into r1
2621+ mov r2, sp
2622+ @ assume 8K pages, since we have to split the immediate in two
2623+ bic r2, r2, #(0x1fc0)
2624+ bic r2, r2, #(0x3f)
2625+ ldr r1, [r2, #TI_CPU_DOMAIN]
2626+#ifdef CONFIG_PAX_KERNEXEC
2627+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2628+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2629+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2630+#endif
2631+#ifdef CONFIG_PAX_MEMORY_UDEREF
2632+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2633+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2634+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2635+#endif
2636+ @ write r1 to current_thread_info()->cpu_domain
2637+ str r1, [r2, #TI_CPU_DOMAIN]
2638+ @ write r1 to DACR
2639+ mcr p15, 0, r1, c3, c0, 0
2640+ @ instruction sync
2641+ instr_sync
2642+ @ restore regs
2643+ ldmia sp!, {r1, r2}
2644+#endif
2645 .endm
2646 #endif
2647
2648-#include "entry-header.S"
2649-
2650-
2651 .align 5
2652 /*
2653 * This is the fast syscall return path. We do as little as
2654@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2655 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2656 #endif
2657
2658+ /*
2659+ * do this here to avoid a performance hit of wrapping the code above
2660+ * that directly dereferences userland to parse the SWI instruction
2661+ */
2662+ pax_enter_kernel_user
2663+
2664 adr tbl, sys_call_table @ load syscall table pointer
2665
2666 #if defined(CONFIG_OABI_COMPAT)
2667diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2668index 1a0045a..9b4f34d 100644
2669--- a/arch/arm/kernel/entry-header.S
2670+++ b/arch/arm/kernel/entry-header.S
2671@@ -196,6 +196,60 @@
2672 msr cpsr_c, \rtemp @ switch back to the SVC mode
2673 .endm
2674
2675+ .macro pax_enter_kernel_user
2676+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2677+ @ save regs
2678+ stmdb sp!, {r0, r1}
2679+ @ read DACR from cpu_domain into r1
2680+ mov r0, sp
2681+ @ assume 8K pages, since we have to split the immediate in two
2682+ bic r0, r0, #(0x1fc0)
2683+ bic r0, r0, #(0x3f)
2684+ ldr r1, [r0, #TI_CPU_DOMAIN]
2685+#ifdef CONFIG_PAX_MEMORY_UDEREF
2686+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2687+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2688+#endif
2689+#ifdef CONFIG_PAX_KERNEXEC
2690+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2691+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2692+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2693+#endif
2694+ @ write r1 to current_thread_info()->cpu_domain
2695+ str r1, [r0, #TI_CPU_DOMAIN]
2696+ @ write r1 to DACR
2697+ mcr p15, 0, r1, c3, c0, 0
2698+ @ instruction sync
2699+ instr_sync
2700+ @ restore regs
2701+ ldmia sp!, {r0, r1}
2702+#endif
2703+ .endm
2704+
2705+ .macro pax_exit_kernel
2706+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2707+ @ save regs
2708+ stmdb sp!, {r0, r1}
2709+ @ read old DACR from stack into r1
2710+ ldr r1, [sp, #(8 + S_SP)]
2711+ sub r1, r1, #8
2712+ ldr r1, [r1]
2713+
2714+ @ write r1 to current_thread_info()->cpu_domain
2715+ mov r0, sp
2716+ @ assume 8K pages, since we have to split the immediate in two
2717+ bic r0, r0, #(0x1fc0)
2718+ bic r0, r0, #(0x3f)
2719+ str r1, [r0, #TI_CPU_DOMAIN]
2720+ @ write r1 to DACR
2721+ mcr p15, 0, r1, c3, c0, 0
2722+ @ instruction sync
2723+ instr_sync
2724+ @ restore regs
2725+ ldmia sp!, {r0, r1}
2726+#endif
2727+ .endm
2728+
2729 #ifndef CONFIG_THUMB2_KERNEL
2730 .macro svc_exit, rpsr, irq = 0
2731 .if \irq != 0
2732@@ -215,6 +269,9 @@
2733 blne trace_hardirqs_off
2734 #endif
2735 .endif
2736+
2737+ pax_exit_kernel
2738+
2739 msr spsr_cxsf, \rpsr
2740 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2741 @ We must avoid clrex due to Cortex-A15 erratum #830321
2742@@ -291,6 +348,9 @@
2743 blne trace_hardirqs_off
2744 #endif
2745 .endif
2746+
2747+ pax_exit_kernel
2748+
2749 ldr lr, [sp, #S_SP] @ top of the stack
2750 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2751
2752diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2753index 059c3da..8e45cfc 100644
2754--- a/arch/arm/kernel/fiq.c
2755+++ b/arch/arm/kernel/fiq.c
2756@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2757 void *base = vectors_page;
2758 unsigned offset = FIQ_OFFSET;
2759
2760+ pax_open_kernel();
2761 memcpy(base + offset, start, length);
2762+ pax_close_kernel();
2763+
2764 if (!cache_is_vipt_nonaliasing())
2765 flush_icache_range((unsigned long)base + offset, offset +
2766 length);
2767diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2768index 664eee8..f470938 100644
2769--- a/arch/arm/kernel/head.S
2770+++ b/arch/arm/kernel/head.S
2771@@ -437,7 +437,7 @@ __enable_mmu:
2772 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2773 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2774 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2775- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2776+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2777 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2778 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2779 #endif
2780diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2781index bea7db9..a210d10 100644
2782--- a/arch/arm/kernel/module.c
2783+++ b/arch/arm/kernel/module.c
2784@@ -38,12 +38,39 @@
2785 #endif
2786
2787 #ifdef CONFIG_MMU
2788-void *module_alloc(unsigned long size)
2789+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2790 {
2791+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2792+ return NULL;
2793 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2794- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2795+ GFP_KERNEL, prot, NUMA_NO_NODE,
2796 __builtin_return_address(0));
2797 }
2798+
2799+void *module_alloc(unsigned long size)
2800+{
2801+
2802+#ifdef CONFIG_PAX_KERNEXEC
2803+ return __module_alloc(size, PAGE_KERNEL);
2804+#else
2805+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2806+#endif
2807+
2808+}
2809+
2810+#ifdef CONFIG_PAX_KERNEXEC
2811+void module_memfree_exec(void *module_region)
2812+{
2813+ module_memfree(module_region);
2814+}
2815+EXPORT_SYMBOL(module_memfree_exec);
2816+
2817+void *module_alloc_exec(unsigned long size)
2818+{
2819+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2820+}
2821+EXPORT_SYMBOL(module_alloc_exec);
2822+#endif
2823 #endif
2824
2825 int
2826diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2827index 5038960..4aa71d8 100644
2828--- a/arch/arm/kernel/patch.c
2829+++ b/arch/arm/kernel/patch.c
2830@@ -67,6 +67,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2831 else
2832 __acquire(&patch_lock);
2833
2834+ pax_open_kernel();
2835 if (thumb2 && __opcode_is_thumb16(insn)) {
2836 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2837 size = sizeof(u16);
2838@@ -98,6 +99,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2839 *(u32 *)waddr = insn;
2840 size = sizeof(u32);
2841 }
2842+ pax_close_kernel();
2843
2844 if (waddr != addr) {
2845 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2846diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2847index fdfa3a7..5d208b8 100644
2848--- a/arch/arm/kernel/process.c
2849+++ b/arch/arm/kernel/process.c
2850@@ -207,6 +207,7 @@ void machine_power_off(void)
2851
2852 if (pm_power_off)
2853 pm_power_off();
2854+ BUG();
2855 }
2856
2857 /*
2858@@ -220,7 +221,7 @@ void machine_power_off(void)
2859 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2860 * to use. Implementing such co-ordination would be essentially impossible.
2861 */
2862-void machine_restart(char *cmd)
2863+__noreturn void machine_restart(char *cmd)
2864 {
2865 local_irq_disable();
2866 smp_send_stop();
2867@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
2868
2869 show_regs_print_info(KERN_DEFAULT);
2870
2871- print_symbol("PC is at %s\n", instruction_pointer(regs));
2872- print_symbol("LR is at %s\n", regs->ARM_lr);
2873+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2874+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2875 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2876 "sp : %08lx ip : %08lx fp : %08lx\n",
2877 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2878@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
2879 return 0;
2880 }
2881
2882-unsigned long arch_randomize_brk(struct mm_struct *mm)
2883-{
2884- unsigned long range_end = mm->brk + 0x02000000;
2885- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2886-}
2887-
2888 #ifdef CONFIG_MMU
2889 #ifdef CONFIG_KUSER_HELPERS
2890 /*
2891@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
2892
2893 static int __init gate_vma_init(void)
2894 {
2895- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2896+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2897 return 0;
2898 }
2899 arch_initcall(gate_vma_init);
2900@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2901 return is_gate_vma(vma) ? "[vectors]" : NULL;
2902 }
2903
2904-/* If possible, provide a placement hint at a random offset from the
2905- * stack for the signal page.
2906- */
2907-static unsigned long sigpage_addr(const struct mm_struct *mm,
2908- unsigned int npages)
2909-{
2910- unsigned long offset;
2911- unsigned long first;
2912- unsigned long last;
2913- unsigned long addr;
2914- unsigned int slots;
2915-
2916- first = PAGE_ALIGN(mm->start_stack);
2917-
2918- last = TASK_SIZE - (npages << PAGE_SHIFT);
2919-
2920- /* No room after stack? */
2921- if (first > last)
2922- return 0;
2923-
2924- /* Just enough room? */
2925- if (first == last)
2926- return first;
2927-
2928- slots = ((last - first) >> PAGE_SHIFT) + 1;
2929-
2930- offset = get_random_int() % slots;
2931-
2932- addr = first + (offset << PAGE_SHIFT);
2933-
2934- return addr;
2935-}
2936-
2937-static struct page *signal_page;
2938-extern struct page *get_signal_page(void);
2939-
2940-static const struct vm_special_mapping sigpage_mapping = {
2941- .name = "[sigpage]",
2942- .pages = &signal_page,
2943-};
2944-
2945 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2946 {
2947 struct mm_struct *mm = current->mm;
2948- struct vm_area_struct *vma;
2949- unsigned long addr;
2950- unsigned long hint;
2951- int ret = 0;
2952-
2953- if (!signal_page)
2954- signal_page = get_signal_page();
2955- if (!signal_page)
2956- return -ENOMEM;
2957
2958 down_write(&mm->mmap_sem);
2959- hint = sigpage_addr(mm, 1);
2960- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2961- if (IS_ERR_VALUE(addr)) {
2962- ret = addr;
2963- goto up_fail;
2964- }
2965-
2966- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2967- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2968- &sigpage_mapping);
2969-
2970- if (IS_ERR(vma)) {
2971- ret = PTR_ERR(vma);
2972- goto up_fail;
2973- }
2974-
2975- mm->context.sigpage = addr;
2976-
2977- up_fail:
2978+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2979 up_write(&mm->mmap_sem);
2980- return ret;
2981+ return 0;
2982 }
2983 #endif
2984diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2985index f73891b..cf3004e 100644
2986--- a/arch/arm/kernel/psci.c
2987+++ b/arch/arm/kernel/psci.c
2988@@ -28,7 +28,7 @@
2989 #include <asm/psci.h>
2990 #include <asm/system_misc.h>
2991
2992-struct psci_operations psci_ops;
2993+struct psci_operations psci_ops __read_only;
2994
2995 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2996 typedef int (*psci_initcall_t)(const struct device_node *);
2997diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2998index ef9119f..31995a3 100644
2999--- a/arch/arm/kernel/ptrace.c
3000+++ b/arch/arm/kernel/ptrace.c
3001@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3002 regs->ARM_ip = ip;
3003 }
3004
3005+#ifdef CONFIG_GRKERNSEC_SETXID
3006+extern void gr_delayed_cred_worker(void);
3007+#endif
3008+
3009 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3010 {
3011 current_thread_info()->syscall = scno;
3012
3013+#ifdef CONFIG_GRKERNSEC_SETXID
3014+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3015+ gr_delayed_cred_worker();
3016+#endif
3017+
3018 /* Do the secure computing check first; failures should be fast. */
3019 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3020 if (secure_computing() == -1)
3021diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3022index e55408e..14d9998 100644
3023--- a/arch/arm/kernel/setup.c
3024+++ b/arch/arm/kernel/setup.c
3025@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3026 unsigned int elf_hwcap2 __read_mostly;
3027 EXPORT_SYMBOL(elf_hwcap2);
3028
3029+pteval_t __supported_pte_mask __read_only;
3030+pmdval_t __supported_pmd_mask __read_only;
3031
3032 #ifdef MULTI_CPU
3033-struct processor processor __read_mostly;
3034+struct processor processor __read_only;
3035 #endif
3036 #ifdef MULTI_TLB
3037-struct cpu_tlb_fns cpu_tlb __read_mostly;
3038+struct cpu_tlb_fns cpu_tlb __read_only;
3039 #endif
3040 #ifdef MULTI_USER
3041-struct cpu_user_fns cpu_user __read_mostly;
3042+struct cpu_user_fns cpu_user __read_only;
3043 #endif
3044 #ifdef MULTI_CACHE
3045-struct cpu_cache_fns cpu_cache __read_mostly;
3046+struct cpu_cache_fns cpu_cache __read_only;
3047 #endif
3048 #ifdef CONFIG_OUTER_CACHE
3049-struct outer_cache_fns outer_cache __read_mostly;
3050+struct outer_cache_fns outer_cache __read_only;
3051 EXPORT_SYMBOL(outer_cache);
3052 #endif
3053
3054@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
3055 asm("mrc p15, 0, %0, c0, c1, 4"
3056 : "=r" (mmfr0));
3057 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3058- (mmfr0 & 0x000000f0) >= 0x00000030)
3059+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3060 cpu_arch = CPU_ARCH_ARMv7;
3061- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3062+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3063+ __supported_pte_mask |= L_PTE_PXN;
3064+ __supported_pmd_mask |= PMD_PXNTABLE;
3065+ }
3066+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3067 (mmfr0 & 0x000000f0) == 0x00000020)
3068 cpu_arch = CPU_ARCH_ARMv6;
3069 else
3070diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3071index 8aa6f1b..0899e08 100644
3072--- a/arch/arm/kernel/signal.c
3073+++ b/arch/arm/kernel/signal.c
3074@@ -24,8 +24,6 @@
3075
3076 extern const unsigned long sigreturn_codes[7];
3077
3078-static unsigned long signal_return_offset;
3079-
3080 #ifdef CONFIG_CRUNCH
3081 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3082 {
3083@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3084 * except when the MPU has protected the vectors
3085 * page from PL0
3086 */
3087- retcode = mm->context.sigpage + signal_return_offset +
3088- (idx << 2) + thumb;
3089+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3090 } else
3091 #endif
3092 {
3093@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3094 } while (thread_flags & _TIF_WORK_MASK);
3095 return 0;
3096 }
3097-
3098-struct page *get_signal_page(void)
3099-{
3100- unsigned long ptr;
3101- unsigned offset;
3102- struct page *page;
3103- void *addr;
3104-
3105- page = alloc_pages(GFP_KERNEL, 0);
3106-
3107- if (!page)
3108- return NULL;
3109-
3110- addr = page_address(page);
3111-
3112- /* Give the signal return code some randomness */
3113- offset = 0x200 + (get_random_int() & 0x7fc);
3114- signal_return_offset = offset;
3115-
3116- /*
3117- * Copy signal return handlers into the vector page, and
3118- * set sigreturn to be a pointer to these.
3119- */
3120- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3121-
3122- ptr = (unsigned long)addr + offset;
3123- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3124-
3125- return page;
3126-}
3127diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3128index 86ef244..c518451 100644
3129--- a/arch/arm/kernel/smp.c
3130+++ b/arch/arm/kernel/smp.c
3131@@ -76,7 +76,7 @@ enum ipi_msg_type {
3132
3133 static DECLARE_COMPLETION(cpu_running);
3134
3135-static struct smp_operations smp_ops;
3136+static struct smp_operations smp_ops __read_only;
3137
3138 void __init smp_set_ops(struct smp_operations *ops)
3139 {
3140diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3141index 7a3be1d..b00c7de 100644
3142--- a/arch/arm/kernel/tcm.c
3143+++ b/arch/arm/kernel/tcm.c
3144@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3145 .virtual = ITCM_OFFSET,
3146 .pfn = __phys_to_pfn(ITCM_OFFSET),
3147 .length = 0,
3148- .type = MT_MEMORY_RWX_ITCM,
3149+ .type = MT_MEMORY_RX_ITCM,
3150 }
3151 };
3152
3153@@ -267,7 +267,9 @@ no_dtcm:
3154 start = &__sitcm_text;
3155 end = &__eitcm_text;
3156 ram = &__itcm_start;
3157+ pax_open_kernel();
3158 memcpy(start, ram, itcm_code_sz);
3159+ pax_close_kernel();
3160 pr_debug("CPU ITCM: copied code from %p - %p\n",
3161 start, end);
3162 itcm_present = true;
3163diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3164index 788e23f..6fa06a1 100644
3165--- a/arch/arm/kernel/traps.c
3166+++ b/arch/arm/kernel/traps.c
3167@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3168 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3169 {
3170 #ifdef CONFIG_KALLSYMS
3171- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3172+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3173 #else
3174 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3175 #endif
3176@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3177 static int die_owner = -1;
3178 static unsigned int die_nest_count;
3179
3180+extern void gr_handle_kernel_exploit(void);
3181+
3182 static unsigned long oops_begin(void)
3183 {
3184 int cpu;
3185@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3186 panic("Fatal exception in interrupt");
3187 if (panic_on_oops)
3188 panic("Fatal exception");
3189+
3190+ gr_handle_kernel_exploit();
3191+
3192 if (signr)
3193 do_exit(signr);
3194 }
3195@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3196 kuser_init(vectors_base);
3197
3198 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3199- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3200+
3201+#ifndef CONFIG_PAX_MEMORY_UDEREF
3202+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3203+#endif
3204+
3205 #else /* ifndef CONFIG_CPU_V7M */
3206 /*
3207 * on V7-M there is no need to copy the vector table to a dedicated
3208diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3209index b31aa73..cc4b7a1 100644
3210--- a/arch/arm/kernel/vmlinux.lds.S
3211+++ b/arch/arm/kernel/vmlinux.lds.S
3212@@ -37,7 +37,7 @@
3213 #endif
3214
3215 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3216- defined(CONFIG_GENERIC_BUG)
3217+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3218 #define ARM_EXIT_KEEP(x) x
3219 #define ARM_EXIT_DISCARD(x)
3220 #else
3221@@ -123,6 +123,8 @@ SECTIONS
3222 #ifdef CONFIG_DEBUG_RODATA
3223 . = ALIGN(1<<SECTION_SHIFT);
3224 #endif
3225+ _etext = .; /* End of text section */
3226+
3227 RO_DATA(PAGE_SIZE)
3228
3229 . = ALIGN(4);
3230@@ -153,8 +155,6 @@ SECTIONS
3231
3232 NOTES
3233
3234- _etext = .; /* End of text and rodata section */
3235-
3236 #ifndef CONFIG_XIP_KERNEL
3237 # ifdef CONFIG_ARM_KERNMEM_PERMS
3238 . = ALIGN(1<<SECTION_SHIFT);
3239diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3240index 0b0d58a..988cb45 100644
3241--- a/arch/arm/kvm/arm.c
3242+++ b/arch/arm/kvm/arm.c
3243@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3244 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3245
3246 /* The VMID used in the VTTBR */
3247-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3248+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3249 static u8 kvm_next_vmid;
3250 static DEFINE_SPINLOCK(kvm_vmid_lock);
3251
3252@@ -351,7 +351,7 @@ void force_vm_exit(const cpumask_t *mask)
3253 */
3254 static bool need_new_vmid_gen(struct kvm *kvm)
3255 {
3256- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3257+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3258 }
3259
3260 /**
3261@@ -384,7 +384,7 @@ static void update_vttbr(struct kvm *kvm)
3262
3263 /* First user of a new VMID generation? */
3264 if (unlikely(kvm_next_vmid == 0)) {
3265- atomic64_inc(&kvm_vmid_gen);
3266+ atomic64_inc_unchecked(&kvm_vmid_gen);
3267 kvm_next_vmid = 1;
3268
3269 /*
3270@@ -401,7 +401,7 @@ static void update_vttbr(struct kvm *kvm)
3271 kvm_call_hyp(__kvm_flush_vm_context);
3272 }
3273
3274- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3275+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3276 kvm->arch.vmid = kvm_next_vmid;
3277 kvm_next_vmid++;
3278
3279@@ -1038,7 +1038,7 @@ static void check_kvm_target_cpu(void *ret)
3280 /**
3281 * Initialize Hyp-mode and memory mappings on all CPUs.
3282 */
3283-int kvm_arch_init(void *opaque)
3284+int kvm_arch_init(const void *opaque)
3285 {
3286 int err;
3287 int ret, cpu;
3288diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3289index 14a0d98..7771a7d 100644
3290--- a/arch/arm/lib/clear_user.S
3291+++ b/arch/arm/lib/clear_user.S
3292@@ -12,14 +12,14 @@
3293
3294 .text
3295
3296-/* Prototype: int __clear_user(void *addr, size_t sz)
3297+/* Prototype: int ___clear_user(void *addr, size_t sz)
3298 * Purpose : clear some user memory
3299 * Params : addr - user memory address to clear
3300 * : sz - number of bytes to clear
3301 * Returns : number of bytes NOT cleared
3302 */
3303 ENTRY(__clear_user_std)
3304-WEAK(__clear_user)
3305+WEAK(___clear_user)
3306 stmfd sp!, {r1, lr}
3307 mov r2, #0
3308 cmp r1, #4
3309@@ -44,7 +44,7 @@ WEAK(__clear_user)
3310 USER( strnebt r2, [r0])
3311 mov r0, #0
3312 ldmfd sp!, {r1, pc}
3313-ENDPROC(__clear_user)
3314+ENDPROC(___clear_user)
3315 ENDPROC(__clear_user_std)
3316
3317 .pushsection .fixup,"ax"
3318diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3319index 7a235b9..73a0556 100644
3320--- a/arch/arm/lib/copy_from_user.S
3321+++ b/arch/arm/lib/copy_from_user.S
3322@@ -17,7 +17,7 @@
3323 /*
3324 * Prototype:
3325 *
3326- * size_t __copy_from_user(void *to, const void *from, size_t n)
3327+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3328 *
3329 * Purpose:
3330 *
3331@@ -89,11 +89,11 @@
3332
3333 .text
3334
3335-ENTRY(__copy_from_user)
3336+ENTRY(___copy_from_user)
3337
3338 #include "copy_template.S"
3339
3340-ENDPROC(__copy_from_user)
3341+ENDPROC(___copy_from_user)
3342
3343 .pushsection .fixup,"ax"
3344 .align 0
3345diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3346index 6ee2f67..d1cce76 100644
3347--- a/arch/arm/lib/copy_page.S
3348+++ b/arch/arm/lib/copy_page.S
3349@@ -10,6 +10,7 @@
3350 * ASM optimised string functions
3351 */
3352 #include <linux/linkage.h>
3353+#include <linux/const.h>
3354 #include <asm/assembler.h>
3355 #include <asm/asm-offsets.h>
3356 #include <asm/cache.h>
3357diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3358index a9d3db1..164b089 100644
3359--- a/arch/arm/lib/copy_to_user.S
3360+++ b/arch/arm/lib/copy_to_user.S
3361@@ -17,7 +17,7 @@
3362 /*
3363 * Prototype:
3364 *
3365- * size_t __copy_to_user(void *to, const void *from, size_t n)
3366+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3367 *
3368 * Purpose:
3369 *
3370@@ -93,11 +93,11 @@
3371 .text
3372
3373 ENTRY(__copy_to_user_std)
3374-WEAK(__copy_to_user)
3375+WEAK(___copy_to_user)
3376
3377 #include "copy_template.S"
3378
3379-ENDPROC(__copy_to_user)
3380+ENDPROC(___copy_to_user)
3381 ENDPROC(__copy_to_user_std)
3382
3383 .pushsection .fixup,"ax"
3384diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3385index 7d08b43..f7ca7ea 100644
3386--- a/arch/arm/lib/csumpartialcopyuser.S
3387+++ b/arch/arm/lib/csumpartialcopyuser.S
3388@@ -57,8 +57,8 @@
3389 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3390 */
3391
3392-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3393-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3394+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3395+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3396
3397 #include "csumpartialcopygeneric.S"
3398
3399diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3400index 312d43e..21d2322 100644
3401--- a/arch/arm/lib/delay.c
3402+++ b/arch/arm/lib/delay.c
3403@@ -29,7 +29,7 @@
3404 /*
3405 * Default to the loop-based delay implementation.
3406 */
3407-struct arm_delay_ops arm_delay_ops = {
3408+struct arm_delay_ops arm_delay_ops __read_only = {
3409 .delay = __loop_delay,
3410 .const_udelay = __loop_const_udelay,
3411 .udelay = __loop_udelay,
3412diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3413index 3e58d71..029817c 100644
3414--- a/arch/arm/lib/uaccess_with_memcpy.c
3415+++ b/arch/arm/lib/uaccess_with_memcpy.c
3416@@ -136,7 +136,7 @@ out:
3417 }
3418
3419 unsigned long
3420-__copy_to_user(void __user *to, const void *from, unsigned long n)
3421+___copy_to_user(void __user *to, const void *from, unsigned long n)
3422 {
3423 /*
3424 * This test is stubbed out of the main function above to keep
3425@@ -190,7 +190,7 @@ out:
3426 return n;
3427 }
3428
3429-unsigned long __clear_user(void __user *addr, unsigned long n)
3430+unsigned long ___clear_user(void __user *addr, unsigned long n)
3431 {
3432 /* See rational for this in __copy_to_user() above. */
3433 if (n < 64)
3434diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3435index ce25e85..3dd7850 100644
3436--- a/arch/arm/mach-at91/setup.c
3437+++ b/arch/arm/mach-at91/setup.c
3438@@ -57,7 +57,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3439
3440 desc->pfn = __phys_to_pfn(base);
3441 desc->length = length;
3442- desc->type = MT_MEMORY_RWX_NONCACHED;
3443+ desc->type = MT_MEMORY_RW_NONCACHED;
3444
3445 pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3446 base, length, desc->virtual);
3447diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3448index f8e7dcd..17ee921 100644
3449--- a/arch/arm/mach-exynos/suspend.c
3450+++ b/arch/arm/mach-exynos/suspend.c
3451@@ -18,6 +18,7 @@
3452 #include <linux/syscore_ops.h>
3453 #include <linux/cpu_pm.h>
3454 #include <linux/io.h>
3455+#include <linux/irq.h>
3456 #include <linux/irqchip/arm-gic.h>
3457 #include <linux/err.h>
3458 #include <linux/regulator/machine.h>
3459@@ -558,8 +559,10 @@ void __init exynos_pm_init(void)
3460 tmp |= pm_data->wake_disable_mask;
3461 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3462
3463- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3464- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3465+ pax_open_kernel();
3466+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3467+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3468+ pax_close_kernel();
3469
3470 register_syscore_ops(&exynos_pm_syscore_ops);
3471 suspend_set_ops(&exynos_suspend_ops);
3472diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3473index 7f352de..6dc0929 100644
3474--- a/arch/arm/mach-keystone/keystone.c
3475+++ b/arch/arm/mach-keystone/keystone.c
3476@@ -27,7 +27,7 @@
3477
3478 #include "keystone.h"
3479
3480-static struct notifier_block platform_nb;
3481+static notifier_block_no_const platform_nb;
3482 static unsigned long keystone_dma_pfn_offset __read_mostly;
3483
3484 static int keystone_platform_notifier(struct notifier_block *nb,
3485diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3486index ccef880..5dfad80 100644
3487--- a/arch/arm/mach-mvebu/coherency.c
3488+++ b/arch/arm/mach-mvebu/coherency.c
3489@@ -164,7 +164,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3490
3491 /*
3492 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3493- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3494+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3495 * is needed as a workaround for a deadlock issue between the PCIe
3496 * interface and the cache controller.
3497 */
3498@@ -177,7 +177,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3499 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3500
3501 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3502- mtype = MT_UNCACHED;
3503+ mtype = MT_UNCACHED_RW;
3504
3505 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3506 }
3507diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3508index b6443a4..20a0b74 100644
3509--- a/arch/arm/mach-omap2/board-n8x0.c
3510+++ b/arch/arm/mach-omap2/board-n8x0.c
3511@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3512 }
3513 #endif
3514
3515-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3516+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3517 .late_init = n8x0_menelaus_late_init,
3518 };
3519
3520diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3521index 79f49d9..70bf184 100644
3522--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3523+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3524@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3525 void (*resume)(void);
3526 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3527 void (*hotplug_restart)(void);
3528-};
3529+} __no_const;
3530
3531 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3532 static struct powerdomain *mpuss_pd;
3533@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3534 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3535 {}
3536
3537-struct cpu_pm_ops omap_pm_ops = {
3538+static struct cpu_pm_ops omap_pm_ops __read_only = {
3539 .finish_suspend = default_finish_suspend,
3540 .resume = dummy_cpu_resume,
3541 .scu_prepare = dummy_scu_prepare,
3542diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3543index 5305ec7..6d74045 100644
3544--- a/arch/arm/mach-omap2/omap-smp.c
3545+++ b/arch/arm/mach-omap2/omap-smp.c
3546@@ -19,6 +19,7 @@
3547 #include <linux/device.h>
3548 #include <linux/smp.h>
3549 #include <linux/io.h>
3550+#include <linux/irq.h>
3551 #include <linux/irqchip/arm-gic.h>
3552
3553 #include <asm/smp_scu.h>
3554diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3555index f961c46..4a453dc 100644
3556--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3557+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3558@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3559 return NOTIFY_OK;
3560 }
3561
3562-static struct notifier_block __refdata irq_hotplug_notifier = {
3563+static struct notifier_block irq_hotplug_notifier = {
3564 .notifier_call = irq_cpu_hotplug_notify,
3565 };
3566
3567diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3568index be9541e..821805f 100644
3569--- a/arch/arm/mach-omap2/omap_device.c
3570+++ b/arch/arm/mach-omap2/omap_device.c
3571@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3572 struct platform_device __init *omap_device_build(const char *pdev_name,
3573 int pdev_id,
3574 struct omap_hwmod *oh,
3575- void *pdata, int pdata_len)
3576+ const void *pdata, int pdata_len)
3577 {
3578 struct omap_hwmod *ohs[] = { oh };
3579
3580@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3581 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3582 int pdev_id,
3583 struct omap_hwmod **ohs,
3584- int oh_cnt, void *pdata,
3585+ int oh_cnt, const void *pdata,
3586 int pdata_len)
3587 {
3588 int ret = -ENOMEM;
3589diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3590index 78c02b3..c94109a 100644
3591--- a/arch/arm/mach-omap2/omap_device.h
3592+++ b/arch/arm/mach-omap2/omap_device.h
3593@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3594 /* Core code interface */
3595
3596 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3597- struct omap_hwmod *oh, void *pdata,
3598+ struct omap_hwmod *oh, const void *pdata,
3599 int pdata_len);
3600
3601 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3602 struct omap_hwmod **oh, int oh_cnt,
3603- void *pdata, int pdata_len);
3604+ const void *pdata, int pdata_len);
3605
3606 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3607 struct omap_hwmod **ohs, int oh_cnt);
3608diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3609index 9025fff..3555702 100644
3610--- a/arch/arm/mach-omap2/omap_hwmod.c
3611+++ b/arch/arm/mach-omap2/omap_hwmod.c
3612@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3613 int (*init_clkdm)(struct omap_hwmod *oh);
3614 void (*update_context_lost)(struct omap_hwmod *oh);
3615 int (*get_context_lost)(struct omap_hwmod *oh);
3616-};
3617+} __no_const;
3618
3619 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3620-static struct omap_hwmod_soc_ops soc_ops;
3621+static struct omap_hwmod_soc_ops soc_ops __read_only;
3622
3623 /* omap_hwmod_list contains all registered struct omap_hwmods */
3624 static LIST_HEAD(omap_hwmod_list);
3625diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3626index 95fee54..cfa9cf1 100644
3627--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3628+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3629@@ -10,6 +10,7 @@
3630
3631 #include <linux/kernel.h>
3632 #include <linux/init.h>
3633+#include <asm/pgtable.h>
3634
3635 #include "powerdomain.h"
3636
3637@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3638
3639 void __init am43xx_powerdomains_init(void)
3640 {
3641- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3642+ pax_open_kernel();
3643+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3644+ pax_close_kernel();
3645 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3646 pwrdm_register_pwrdms(powerdomains_am43xx);
3647 pwrdm_complete_init();
3648diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3649index ff0a68c..b312aa0 100644
3650--- a/arch/arm/mach-omap2/wd_timer.c
3651+++ b/arch/arm/mach-omap2/wd_timer.c
3652@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3653 struct omap_hwmod *oh;
3654 char *oh_name = "wd_timer2";
3655 char *dev_name = "omap_wdt";
3656- struct omap_wd_timer_platform_data pdata;
3657+ static struct omap_wd_timer_platform_data pdata = {
3658+ .read_reset_sources = prm_read_reset_sources
3659+ };
3660
3661 if (!cpu_class_is_omap2() || of_have_populated_dt())
3662 return 0;
3663@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3664 return -EINVAL;
3665 }
3666
3667- pdata.read_reset_sources = prm_read_reset_sources;
3668-
3669 pdev = omap_device_build(dev_name, id, oh, &pdata,
3670 sizeof(struct omap_wd_timer_platform_data));
3671 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3672diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3673index 4f25a7c..a81be85 100644
3674--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3675+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3676@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3677 bool entered_lp2 = false;
3678
3679 if (tegra_pending_sgi())
3680- ACCESS_ONCE(abort_flag) = true;
3681+ ACCESS_ONCE_RW(abort_flag) = true;
3682
3683 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3684
3685diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3686index ab95f53..4b977a7 100644
3687--- a/arch/arm/mach-tegra/irq.c
3688+++ b/arch/arm/mach-tegra/irq.c
3689@@ -20,6 +20,7 @@
3690 #include <linux/cpu_pm.h>
3691 #include <linux/interrupt.h>
3692 #include <linux/io.h>
3693+#include <linux/irq.h>
3694 #include <linux/irqchip/arm-gic.h>
3695 #include <linux/irq.h>
3696 #include <linux/kernel.h>
3697diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3698index 2cb587b..6ddfebf 100644
3699--- a/arch/arm/mach-ux500/pm.c
3700+++ b/arch/arm/mach-ux500/pm.c
3701@@ -10,6 +10,7 @@
3702 */
3703
3704 #include <linux/kernel.h>
3705+#include <linux/irq.h>
3706 #include <linux/irqchip/arm-gic.h>
3707 #include <linux/delay.h>
3708 #include <linux/io.h>
3709diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3710index 2dea8b5..6499da2 100644
3711--- a/arch/arm/mach-ux500/setup.h
3712+++ b/arch/arm/mach-ux500/setup.h
3713@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3714 .type = MT_DEVICE, \
3715 }
3716
3717-#define __MEM_DEV_DESC(x, sz) { \
3718- .virtual = IO_ADDRESS(x), \
3719- .pfn = __phys_to_pfn(x), \
3720- .length = sz, \
3721- .type = MT_MEMORY_RWX, \
3722-}
3723-
3724 extern struct smp_operations ux500_smp_ops;
3725 extern void ux500_cpu_die(unsigned int cpu);
3726
3727diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3728index 52d768f..5f93180 100644
3729--- a/arch/arm/mach-zynq/platsmp.c
3730+++ b/arch/arm/mach-zynq/platsmp.c
3731@@ -24,6 +24,7 @@
3732 #include <linux/io.h>
3733 #include <asm/cacheflush.h>
3734 #include <asm/smp_scu.h>
3735+#include <linux/irq.h>
3736 #include <linux/irqchip/arm-gic.h>
3737 #include "common.h"
3738
3739diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3740index c43c714..4f8f7b9 100644
3741--- a/arch/arm/mm/Kconfig
3742+++ b/arch/arm/mm/Kconfig
3743@@ -446,6 +446,7 @@ config CPU_32v5
3744
3745 config CPU_32v6
3746 bool
3747+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3748 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3749
3750 config CPU_32v6K
3751@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3752
3753 config CPU_USE_DOMAINS
3754 bool
3755+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3756 help
3757 This option enables or disables the use of domain switching
3758 via the set_fs() function.
3759@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3760
3761 config KUSER_HELPERS
3762 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3763- depends on MMU
3764+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3765 default y
3766 help
3767 Warning: disabling this option may break user programs.
3768@@ -812,7 +814,7 @@ config KUSER_HELPERS
3769 See Documentation/arm/kernel_user_helpers.txt for details.
3770
3771 However, the fixed address nature of these helpers can be used
3772- by ROP (return orientated programming) authors when creating
3773+ by ROP (Return Oriented Programming) authors when creating
3774 exploits.
3775
3776 If all of the binaries and libraries which run on your platform
3777diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3778index 2c0c541..4585df9 100644
3779--- a/arch/arm/mm/alignment.c
3780+++ b/arch/arm/mm/alignment.c
3781@@ -216,10 +216,12 @@ union offset_union {
3782 #define __get16_unaligned_check(ins,val,addr) \
3783 do { \
3784 unsigned int err = 0, v, a = addr; \
3785+ pax_open_userland(); \
3786 __get8_unaligned_check(ins,v,a,err); \
3787 val = v << ((BE) ? 8 : 0); \
3788 __get8_unaligned_check(ins,v,a,err); \
3789 val |= v << ((BE) ? 0 : 8); \
3790+ pax_close_userland(); \
3791 if (err) \
3792 goto fault; \
3793 } while (0)
3794@@ -233,6 +235,7 @@ union offset_union {
3795 #define __get32_unaligned_check(ins,val,addr) \
3796 do { \
3797 unsigned int err = 0, v, a = addr; \
3798+ pax_open_userland(); \
3799 __get8_unaligned_check(ins,v,a,err); \
3800 val = v << ((BE) ? 24 : 0); \
3801 __get8_unaligned_check(ins,v,a,err); \
3802@@ -241,6 +244,7 @@ union offset_union {
3803 val |= v << ((BE) ? 8 : 16); \
3804 __get8_unaligned_check(ins,v,a,err); \
3805 val |= v << ((BE) ? 0 : 24); \
3806+ pax_close_userland(); \
3807 if (err) \
3808 goto fault; \
3809 } while (0)
3810@@ -254,6 +258,7 @@ union offset_union {
3811 #define __put16_unaligned_check(ins,val,addr) \
3812 do { \
3813 unsigned int err = 0, v = val, a = addr; \
3814+ pax_open_userland(); \
3815 __asm__( FIRST_BYTE_16 \
3816 ARM( "1: "ins" %1, [%2], #1\n" ) \
3817 THUMB( "1: "ins" %1, [%2]\n" ) \
3818@@ -273,6 +278,7 @@ union offset_union {
3819 " .popsection\n" \
3820 : "=r" (err), "=&r" (v), "=&r" (a) \
3821 : "0" (err), "1" (v), "2" (a)); \
3822+ pax_close_userland(); \
3823 if (err) \
3824 goto fault; \
3825 } while (0)
3826@@ -286,6 +292,7 @@ union offset_union {
3827 #define __put32_unaligned_check(ins,val,addr) \
3828 do { \
3829 unsigned int err = 0, v = val, a = addr; \
3830+ pax_open_userland(); \
3831 __asm__( FIRST_BYTE_32 \
3832 ARM( "1: "ins" %1, [%2], #1\n" ) \
3833 THUMB( "1: "ins" %1, [%2]\n" ) \
3834@@ -315,6 +322,7 @@ union offset_union {
3835 " .popsection\n" \
3836 : "=r" (err), "=&r" (v), "=&r" (a) \
3837 : "0" (err), "1" (v), "2" (a)); \
3838+ pax_close_userland(); \
3839 if (err) \
3840 goto fault; \
3841 } while (0)
3842diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3843index 5e65ca8..879e7b3 100644
3844--- a/arch/arm/mm/cache-l2x0.c
3845+++ b/arch/arm/mm/cache-l2x0.c
3846@@ -42,7 +42,7 @@ struct l2c_init_data {
3847 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3848 void (*save)(void __iomem *);
3849 struct outer_cache_fns outer_cache;
3850-};
3851+} __do_const;
3852
3853 #define CACHE_LINE_SIZE 32
3854
3855diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3856index 845769e..4278fd7 100644
3857--- a/arch/arm/mm/context.c
3858+++ b/arch/arm/mm/context.c
3859@@ -43,7 +43,7 @@
3860 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3861
3862 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3863-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3864+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3865 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3866
3867 static DEFINE_PER_CPU(atomic64_t, active_asids);
3868@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3869 {
3870 static u32 cur_idx = 1;
3871 u64 asid = atomic64_read(&mm->context.id);
3872- u64 generation = atomic64_read(&asid_generation);
3873+ u64 generation = atomic64_read_unchecked(&asid_generation);
3874
3875 if (asid != 0) {
3876 /*
3877@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3878 */
3879 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3880 if (asid == NUM_USER_ASIDS) {
3881- generation = atomic64_add_return(ASID_FIRST_VERSION,
3882+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3883 &asid_generation);
3884 flush_context(cpu);
3885 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3886@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3887 cpu_set_reserved_ttbr0();
3888
3889 asid = atomic64_read(&mm->context.id);
3890- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3891+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3892 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3893 goto switch_mm_fastpath;
3894
3895 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3896 /* Check that our ASID belongs to the current generation. */
3897 asid = atomic64_read(&mm->context.id);
3898- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3899+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3900 asid = new_context(mm, cpu);
3901 atomic64_set(&mm->context.id, asid);
3902 }
3903diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3904index a982dc3..2d9f5f7 100644
3905--- a/arch/arm/mm/fault.c
3906+++ b/arch/arm/mm/fault.c
3907@@ -25,6 +25,7 @@
3908 #include <asm/system_misc.h>
3909 #include <asm/system_info.h>
3910 #include <asm/tlbflush.h>
3911+#include <asm/sections.h>
3912
3913 #include "fault.h"
3914
3915@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3916 if (fixup_exception(regs))
3917 return;
3918
3919+#ifdef CONFIG_PAX_MEMORY_UDEREF
3920+ if (addr < TASK_SIZE) {
3921+ if (current->signal->curr_ip)
3922+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3923+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3924+ else
3925+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3926+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3927+ }
3928+#endif
3929+
3930+#ifdef CONFIG_PAX_KERNEXEC
3931+ if ((fsr & FSR_WRITE) &&
3932+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3933+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3934+ {
3935+ if (current->signal->curr_ip)
3936+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3937+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3938+ else
3939+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3940+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3941+ }
3942+#endif
3943+
3944 /*
3945 * No handler, we'll have to terminate things with extreme prejudice.
3946 */
3947@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3948 }
3949 #endif
3950
3951+#ifdef CONFIG_PAX_PAGEEXEC
3952+ if (fsr & FSR_LNX_PF) {
3953+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3954+ do_group_exit(SIGKILL);
3955+ }
3956+#endif
3957+
3958 tsk->thread.address = addr;
3959 tsk->thread.error_code = fsr;
3960 tsk->thread.trap_no = 14;
3961@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3962 }
3963 #endif /* CONFIG_MMU */
3964
3965+#ifdef CONFIG_PAX_PAGEEXEC
3966+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3967+{
3968+ long i;
3969+
3970+ printk(KERN_ERR "PAX: bytes at PC: ");
3971+ for (i = 0; i < 20; i++) {
3972+ unsigned char c;
3973+ if (get_user(c, (__force unsigned char __user *)pc+i))
3974+ printk(KERN_CONT "?? ");
3975+ else
3976+ printk(KERN_CONT "%02x ", c);
3977+ }
3978+ printk("\n");
3979+
3980+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3981+ for (i = -1; i < 20; i++) {
3982+ unsigned long c;
3983+ if (get_user(c, (__force unsigned long __user *)sp+i))
3984+ printk(KERN_CONT "???????? ");
3985+ else
3986+ printk(KERN_CONT "%08lx ", c);
3987+ }
3988+ printk("\n");
3989+}
3990+#endif
3991+
3992 /*
3993 * First Level Translation Fault Handler
3994 *
3995@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3996 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3997 struct siginfo info;
3998
3999+#ifdef CONFIG_PAX_MEMORY_UDEREF
4000+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4001+ if (current->signal->curr_ip)
4002+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4003+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4004+ else
4005+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4006+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4007+ goto die;
4008+ }
4009+#endif
4010+
4011 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4012 return;
4013
4014+die:
4015 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4016 inf->name, fsr, addr);
4017
4018@@ -573,15 +646,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4019 ifsr_info[nr].name = name;
4020 }
4021
4022+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4023+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4024+
4025 asmlinkage void __exception
4026 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4027 {
4028 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4029 struct siginfo info;
4030+ unsigned long pc = instruction_pointer(regs);
4031+
4032+ if (user_mode(regs)) {
4033+ unsigned long sigpage = current->mm->context.sigpage;
4034+
4035+ if (sigpage <= pc && pc < sigpage + 7*4) {
4036+ if (pc < sigpage + 3*4)
4037+ sys_sigreturn(regs);
4038+ else
4039+ sys_rt_sigreturn(regs);
4040+ return;
4041+ }
4042+ if (pc == 0xffff0f60UL) {
4043+ /*
4044+ * PaX: __kuser_cmpxchg64 emulation
4045+ */
4046+ // TODO
4047+ //regs->ARM_pc = regs->ARM_lr;
4048+ //return;
4049+ }
4050+ if (pc == 0xffff0fa0UL) {
4051+ /*
4052+ * PaX: __kuser_memory_barrier emulation
4053+ */
4054+ // dmb(); implied by the exception
4055+ regs->ARM_pc = regs->ARM_lr;
4056+ return;
4057+ }
4058+ if (pc == 0xffff0fc0UL) {
4059+ /*
4060+ * PaX: __kuser_cmpxchg emulation
4061+ */
4062+ // TODO
4063+ //long new;
4064+ //int op;
4065+
4066+ //op = FUTEX_OP_SET << 28;
4067+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4068+ //regs->ARM_r0 = old != new;
4069+ //regs->ARM_pc = regs->ARM_lr;
4070+ //return;
4071+ }
4072+ if (pc == 0xffff0fe0UL) {
4073+ /*
4074+ * PaX: __kuser_get_tls emulation
4075+ */
4076+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4077+ regs->ARM_pc = regs->ARM_lr;
4078+ return;
4079+ }
4080+ }
4081+
4082+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4083+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4084+ if (current->signal->curr_ip)
4085+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4086+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4087+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4088+ else
4089+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4090+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4091+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4092+ goto die;
4093+ }
4094+#endif
4095+
4096+#ifdef CONFIG_PAX_REFCOUNT
4097+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4098+#ifdef CONFIG_THUMB2_KERNEL
4099+ unsigned short bkpt;
4100+
4101+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4102+#else
4103+ unsigned int bkpt;
4104+
4105+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4106+#endif
4107+ current->thread.error_code = ifsr;
4108+ current->thread.trap_no = 0;
4109+ pax_report_refcount_overflow(regs);
4110+ fixup_exception(regs);
4111+ return;
4112+ }
4113+ }
4114+#endif
4115
4116 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4117 return;
4118
4119+die:
4120 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4121 inf->name, ifsr, addr);
4122
4123diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4124index cf08bdf..772656c 100644
4125--- a/arch/arm/mm/fault.h
4126+++ b/arch/arm/mm/fault.h
4127@@ -3,6 +3,7 @@
4128
4129 /*
4130 * Fault status register encodings. We steal bit 31 for our own purposes.
4131+ * Set when the FSR value is from an instruction fault.
4132 */
4133 #define FSR_LNX_PF (1 << 31)
4134 #define FSR_WRITE (1 << 11)
4135@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4136 }
4137 #endif
4138
4139+/* valid for LPAE and !LPAE */
4140+static inline int is_xn_fault(unsigned int fsr)
4141+{
4142+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4143+}
4144+
4145+static inline int is_domain_fault(unsigned int fsr)
4146+{
4147+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4148+}
4149+
4150 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4151 unsigned long search_exception_table(unsigned long addr);
4152
4153diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4154index 2495c8c..415b7fc 100644
4155--- a/arch/arm/mm/init.c
4156+++ b/arch/arm/mm/init.c
4157@@ -758,7 +758,46 @@ void free_tcmmem(void)
4158 {
4159 #ifdef CONFIG_HAVE_TCM
4160 extern char __tcm_start, __tcm_end;
4161+#endif
4162
4163+#ifdef CONFIG_PAX_KERNEXEC
4164+ unsigned long addr;
4165+ pgd_t *pgd;
4166+ pud_t *pud;
4167+ pmd_t *pmd;
4168+ int cpu_arch = cpu_architecture();
4169+ unsigned int cr = get_cr();
4170+
4171+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4172+ /* make pages tables, etc before .text NX */
4173+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4174+ pgd = pgd_offset_k(addr);
4175+ pud = pud_offset(pgd, addr);
4176+ pmd = pmd_offset(pud, addr);
4177+ __section_update(pmd, addr, PMD_SECT_XN);
4178+ }
4179+ /* make init NX */
4180+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4181+ pgd = pgd_offset_k(addr);
4182+ pud = pud_offset(pgd, addr);
4183+ pmd = pmd_offset(pud, addr);
4184+ __section_update(pmd, addr, PMD_SECT_XN);
4185+ }
4186+ /* make kernel code/rodata RX */
4187+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4188+ pgd = pgd_offset_k(addr);
4189+ pud = pud_offset(pgd, addr);
4190+ pmd = pmd_offset(pud, addr);
4191+#ifdef CONFIG_ARM_LPAE
4192+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4193+#else
4194+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4195+#endif
4196+ }
4197+ }
4198+#endif
4199+
4200+#ifdef CONFIG_HAVE_TCM
4201 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4202 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4203 #endif
4204diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4205index d1e5ad7..84dcbf2 100644
4206--- a/arch/arm/mm/ioremap.c
4207+++ b/arch/arm/mm/ioremap.c
4208@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4209 unsigned int mtype;
4210
4211 if (cached)
4212- mtype = MT_MEMORY_RWX;
4213+ mtype = MT_MEMORY_RX;
4214 else
4215- mtype = MT_MEMORY_RWX_NONCACHED;
4216+ mtype = MT_MEMORY_RX_NONCACHED;
4217
4218 return __arm_ioremap_caller(phys_addr, size, mtype,
4219 __builtin_return_address(0));
4220diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4221index 5e85ed3..b10a7ed 100644
4222--- a/arch/arm/mm/mmap.c
4223+++ b/arch/arm/mm/mmap.c
4224@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4225 struct vm_area_struct *vma;
4226 int do_align = 0;
4227 int aliasing = cache_is_vipt_aliasing();
4228+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4229 struct vm_unmapped_area_info info;
4230
4231 /*
4232@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4233 if (len > TASK_SIZE)
4234 return -ENOMEM;
4235
4236+#ifdef CONFIG_PAX_RANDMMAP
4237+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4238+#endif
4239+
4240 if (addr) {
4241 if (do_align)
4242 addr = COLOUR_ALIGN(addr, pgoff);
4243@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4244 addr = PAGE_ALIGN(addr);
4245
4246 vma = find_vma(mm, addr);
4247- if (TASK_SIZE - len >= addr &&
4248- (!vma || addr + len <= vma->vm_start))
4249+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4250 return addr;
4251 }
4252
4253@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4254 info.high_limit = TASK_SIZE;
4255 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4256 info.align_offset = pgoff << PAGE_SHIFT;
4257+ info.threadstack_offset = offset;
4258 return vm_unmapped_area(&info);
4259 }
4260
4261@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4262 unsigned long addr = addr0;
4263 int do_align = 0;
4264 int aliasing = cache_is_vipt_aliasing();
4265+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4266 struct vm_unmapped_area_info info;
4267
4268 /*
4269@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4270 return addr;
4271 }
4272
4273+#ifdef CONFIG_PAX_RANDMMAP
4274+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4275+#endif
4276+
4277 /* requesting a specific address */
4278 if (addr) {
4279 if (do_align)
4280@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4281 else
4282 addr = PAGE_ALIGN(addr);
4283 vma = find_vma(mm, addr);
4284- if (TASK_SIZE - len >= addr &&
4285- (!vma || addr + len <= vma->vm_start))
4286+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4287 return addr;
4288 }
4289
4290@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4291 info.high_limit = mm->mmap_base;
4292 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4293 info.align_offset = pgoff << PAGE_SHIFT;
4294+ info.threadstack_offset = offset;
4295 addr = vm_unmapped_area(&info);
4296
4297 /*
4298@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4299 {
4300 unsigned long random_factor = 0UL;
4301
4302+#ifdef CONFIG_PAX_RANDMMAP
4303+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4304+#endif
4305+
4306 /* 8 bits of randomness in 20 address space bits */
4307 if ((current->flags & PF_RANDOMIZE) &&
4308 !(current->personality & ADDR_NO_RANDOMIZE))
4309@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4310
4311 if (mmap_is_legacy()) {
4312 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4313+
4314+#ifdef CONFIG_PAX_RANDMMAP
4315+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4316+ mm->mmap_base += mm->delta_mmap;
4317+#endif
4318+
4319 mm->get_unmapped_area = arch_get_unmapped_area;
4320 } else {
4321 mm->mmap_base = mmap_base(random_factor);
4322+
4323+#ifdef CONFIG_PAX_RANDMMAP
4324+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4325+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4326+#endif
4327+
4328 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4329 }
4330 }
4331diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4332index 4e6ef89..21c27f2 100644
4333--- a/arch/arm/mm/mmu.c
4334+++ b/arch/arm/mm/mmu.c
4335@@ -41,6 +41,22 @@
4336 #include "mm.h"
4337 #include "tcm.h"
4338
4339+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4340+void modify_domain(unsigned int dom, unsigned int type)
4341+{
4342+ struct thread_info *thread = current_thread_info();
4343+ unsigned int domain = thread->cpu_domain;
4344+ /*
4345+ * DOMAIN_MANAGER might be defined to some other value,
4346+ * use the arch-defined constant
4347+ */
4348+ domain &= ~domain_val(dom, 3);
4349+ thread->cpu_domain = domain | domain_val(dom, type);
4350+ set_domain(thread->cpu_domain);
4351+}
4352+EXPORT_SYMBOL(modify_domain);
4353+#endif
4354+
4355 /*
4356 * empty_zero_page is a special page that is used for
4357 * zero-initialized data and COW.
4358@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4359 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4360 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4361
4362-static struct mem_type mem_types[] = {
4363+#ifdef CONFIG_PAX_KERNEXEC
4364+#define L_PTE_KERNEXEC L_PTE_RDONLY
4365+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4366+#else
4367+#define L_PTE_KERNEXEC L_PTE_DIRTY
4368+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4369+#endif
4370+
4371+static struct mem_type mem_types[] __read_only = {
4372 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4373 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4374 L_PTE_SHARED,
4375@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4376 .prot_sect = PROT_SECT_DEVICE,
4377 .domain = DOMAIN_IO,
4378 },
4379- [MT_UNCACHED] = {
4380+ [MT_UNCACHED_RW] = {
4381 .prot_pte = PROT_PTE_DEVICE,
4382 .prot_l1 = PMD_TYPE_TABLE,
4383 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4384 .domain = DOMAIN_IO,
4385 },
4386- [MT_CACHECLEAN] = {
4387- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4388+ [MT_CACHECLEAN_RO] = {
4389+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4390 .domain = DOMAIN_KERNEL,
4391 },
4392 #ifndef CONFIG_ARM_LPAE
4393- [MT_MINICLEAN] = {
4394- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4395+ [MT_MINICLEAN_RO] = {
4396+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4397 .domain = DOMAIN_KERNEL,
4398 },
4399 #endif
4400@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4401 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4402 L_PTE_RDONLY,
4403 .prot_l1 = PMD_TYPE_TABLE,
4404- .domain = DOMAIN_USER,
4405+ .domain = DOMAIN_VECTORS,
4406 },
4407 [MT_HIGH_VECTORS] = {
4408 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4409 L_PTE_USER | L_PTE_RDONLY,
4410 .prot_l1 = PMD_TYPE_TABLE,
4411- .domain = DOMAIN_USER,
4412+ .domain = DOMAIN_VECTORS,
4413 },
4414- [MT_MEMORY_RWX] = {
4415+ [__MT_MEMORY_RWX] = {
4416 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4417 .prot_l1 = PMD_TYPE_TABLE,
4418 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4419@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4420 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4421 .domain = DOMAIN_KERNEL,
4422 },
4423- [MT_ROM] = {
4424- .prot_sect = PMD_TYPE_SECT,
4425+ [MT_MEMORY_RX] = {
4426+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4427+ .prot_l1 = PMD_TYPE_TABLE,
4428+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4429+ .domain = DOMAIN_KERNEL,
4430+ },
4431+ [MT_ROM_RX] = {
4432+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4433 .domain = DOMAIN_KERNEL,
4434 },
4435- [MT_MEMORY_RWX_NONCACHED] = {
4436+ [MT_MEMORY_RW_NONCACHED] = {
4437 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4438 L_PTE_MT_BUFFERABLE,
4439 .prot_l1 = PMD_TYPE_TABLE,
4440 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4441 .domain = DOMAIN_KERNEL,
4442 },
4443+ [MT_MEMORY_RX_NONCACHED] = {
4444+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4445+ L_PTE_MT_BUFFERABLE,
4446+ .prot_l1 = PMD_TYPE_TABLE,
4447+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4448+ .domain = DOMAIN_KERNEL,
4449+ },
4450 [MT_MEMORY_RW_DTCM] = {
4451 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4452 L_PTE_XN,
4453@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4454 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4455 .domain = DOMAIN_KERNEL,
4456 },
4457- [MT_MEMORY_RWX_ITCM] = {
4458- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4459+ [MT_MEMORY_RX_ITCM] = {
4460+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4461 .prot_l1 = PMD_TYPE_TABLE,
4462+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4463 .domain = DOMAIN_KERNEL,
4464 },
4465 [MT_MEMORY_RW_SO] = {
4466@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4467 * Mark cache clean areas and XIP ROM read only
4468 * from SVC mode and no access from userspace.
4469 */
4470- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4471- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4472- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4473+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4474+#ifdef CONFIG_PAX_KERNEXEC
4475+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4476+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4477+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4478+#endif
4479+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4480+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4481 #endif
4482
4483 /*
4484@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4485 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4486 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4487 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4488- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4489- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4490+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4491+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4492 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4493 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4494+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4495+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4496 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4497- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4498- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4499+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4500+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4501+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4502+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4503 }
4504 }
4505
4506@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4507 if (cpu_arch >= CPU_ARCH_ARMv6) {
4508 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4509 /* Non-cacheable Normal is XCB = 001 */
4510- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4511+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4512+ PMD_SECT_BUFFERED;
4513+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4514 PMD_SECT_BUFFERED;
4515 } else {
4516 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4517- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4518+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4519+ PMD_SECT_TEX(1);
4520+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4521 PMD_SECT_TEX(1);
4522 }
4523 } else {
4524- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4525+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4526+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4527 }
4528
4529 #ifdef CONFIG_ARM_LPAE
4530@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4531 user_pgprot |= PTE_EXT_PXN;
4532 #endif
4533
4534+ user_pgprot |= __supported_pte_mask;
4535+
4536 for (i = 0; i < 16; i++) {
4537 pteval_t v = pgprot_val(protection_map[i]);
4538 protection_map[i] = __pgprot(v | user_pgprot);
4539@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4540
4541 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4542 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4543- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4544- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4545+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4546+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4547 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4548 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4549+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4550+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4551 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4552- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4553- mem_types[MT_ROM].prot_sect |= cp->pmd;
4554+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4555+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4556+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4557
4558 switch (cp->pmd) {
4559 case PMD_SECT_WT:
4560- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4561+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4562 break;
4563 case PMD_SECT_WB:
4564 case PMD_SECT_WBWA:
4565- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4566+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4567 break;
4568 }
4569 pr_info("Memory policy: %sData cache %s\n",
4570@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4571 return;
4572 }
4573
4574- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4575+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4576 md->virtual >= PAGE_OFFSET &&
4577 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4578 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4579@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4580 * called function. This means you can't use any function or debugging
4581 * method which may touch any device, otherwise the kernel _will_ crash.
4582 */
4583+
4584+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4585+
4586 static void __init devicemaps_init(const struct machine_desc *mdesc)
4587 {
4588 struct map_desc map;
4589 unsigned long addr;
4590- void *vectors;
4591
4592- /*
4593- * Allocate the vector page early.
4594- */
4595- vectors = early_alloc(PAGE_SIZE * 2);
4596-
4597- early_trap_init(vectors);
4598+ early_trap_init(&vectors);
4599
4600 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4601 pmd_clear(pmd_off_k(addr));
4602@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4603 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4604 map.virtual = MODULES_VADDR;
4605 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4606- map.type = MT_ROM;
4607+ map.type = MT_ROM_RX;
4608 create_mapping(&map);
4609 #endif
4610
4611@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4612 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4613 map.virtual = FLUSH_BASE;
4614 map.length = SZ_1M;
4615- map.type = MT_CACHECLEAN;
4616+ map.type = MT_CACHECLEAN_RO;
4617 create_mapping(&map);
4618 #endif
4619 #ifdef FLUSH_BASE_MINICACHE
4620 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4621 map.virtual = FLUSH_BASE_MINICACHE;
4622 map.length = SZ_1M;
4623- map.type = MT_MINICLEAN;
4624+ map.type = MT_MINICLEAN_RO;
4625 create_mapping(&map);
4626 #endif
4627
4628@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4629 * location (0xffff0000). If we aren't using high-vectors, also
4630 * create a mapping at the low-vectors virtual address.
4631 */
4632- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4633+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4634 map.virtual = 0xffff0000;
4635 map.length = PAGE_SIZE;
4636 #ifdef CONFIG_KUSER_HELPERS
4637@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4638 static void __init map_lowmem(void)
4639 {
4640 struct memblock_region *reg;
4641+#ifndef CONFIG_PAX_KERNEXEC
4642 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4643 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4644+#endif
4645
4646 /* Map all the lowmem memory banks. */
4647 for_each_memblock(memory, reg) {
4648@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4649 if (start >= end)
4650 break;
4651
4652+#ifdef CONFIG_PAX_KERNEXEC
4653+ map.pfn = __phys_to_pfn(start);
4654+ map.virtual = __phys_to_virt(start);
4655+ map.length = end - start;
4656+
4657+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4658+ struct map_desc kernel;
4659+ struct map_desc initmap;
4660+
4661+ /* when freeing initmem we will make this RW */
4662+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4663+ initmap.virtual = (unsigned long)__init_begin;
4664+ initmap.length = _sdata - __init_begin;
4665+ initmap.type = __MT_MEMORY_RWX;
4666+ create_mapping(&initmap);
4667+
4668+ /* when freeing initmem we will make this RX */
4669+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4670+ kernel.virtual = (unsigned long)_stext;
4671+ kernel.length = __init_begin - _stext;
4672+ kernel.type = __MT_MEMORY_RWX;
4673+ create_mapping(&kernel);
4674+
4675+ if (map.virtual < (unsigned long)_stext) {
4676+ map.length = (unsigned long)_stext - map.virtual;
4677+ map.type = __MT_MEMORY_RWX;
4678+ create_mapping(&map);
4679+ }
4680+
4681+ map.pfn = __phys_to_pfn(__pa(_sdata));
4682+ map.virtual = (unsigned long)_sdata;
4683+ map.length = end - __pa(_sdata);
4684+ }
4685+
4686+ map.type = MT_MEMORY_RW;
4687+ create_mapping(&map);
4688+#else
4689 if (end < kernel_x_start) {
4690 map.pfn = __phys_to_pfn(start);
4691 map.virtual = __phys_to_virt(start);
4692 map.length = end - start;
4693- map.type = MT_MEMORY_RWX;
4694+ map.type = __MT_MEMORY_RWX;
4695
4696 create_mapping(&map);
4697 } else if (start >= kernel_x_end) {
4698@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4699 map.pfn = __phys_to_pfn(kernel_x_start);
4700 map.virtual = __phys_to_virt(kernel_x_start);
4701 map.length = kernel_x_end - kernel_x_start;
4702- map.type = MT_MEMORY_RWX;
4703+ map.type = __MT_MEMORY_RWX;
4704
4705 create_mapping(&map);
4706
4707@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4708 create_mapping(&map);
4709 }
4710 }
4711+#endif
4712 }
4713 }
4714
4715diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4716index e1268f9..a9755a7 100644
4717--- a/arch/arm/net/bpf_jit_32.c
4718+++ b/arch/arm/net/bpf_jit_32.c
4719@@ -20,6 +20,7 @@
4720 #include <asm/cacheflush.h>
4721 #include <asm/hwcap.h>
4722 #include <asm/opcodes.h>
4723+#include <asm/pgtable.h>
4724
4725 #include "bpf_jit_32.h"
4726
4727@@ -71,7 +72,11 @@ struct jit_ctx {
4728 #endif
4729 };
4730
4731+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4732+int bpf_jit_enable __read_only;
4733+#else
4734 int bpf_jit_enable __read_mostly;
4735+#endif
4736
4737 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4738 {
4739@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4740 {
4741 u32 *ptr;
4742 /* We are guaranteed to have aligned memory. */
4743+ pax_open_kernel();
4744 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4745 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4746+ pax_close_kernel();
4747 }
4748
4749 static void build_prologue(struct jit_ctx *ctx)
4750diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4751index 5b217f4..c23f40e 100644
4752--- a/arch/arm/plat-iop/setup.c
4753+++ b/arch/arm/plat-iop/setup.c
4754@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4755 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4756 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4757 .length = IOP3XX_PERIPHERAL_SIZE,
4758- .type = MT_UNCACHED,
4759+ .type = MT_UNCACHED_RW,
4760 },
4761 };
4762
4763diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4764index a5bc92d..0bb4730 100644
4765--- a/arch/arm/plat-omap/sram.c
4766+++ b/arch/arm/plat-omap/sram.c
4767@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4768 * Looks like we need to preserve some bootloader code at the
4769 * beginning of SRAM for jumping to flash for reboot to work...
4770 */
4771+ pax_open_kernel();
4772 memset_io(omap_sram_base + omap_sram_skip, 0,
4773 omap_sram_size - omap_sram_skip);
4774+ pax_close_kernel();
4775 }
4776diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4777index ce6d763..cfea917 100644
4778--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4779+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4780@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4781 int (*started)(unsigned ch);
4782 int (*flush)(unsigned ch);
4783 int (*stop)(unsigned ch);
4784-};
4785+} __no_const;
4786
4787 extern void *samsung_dmadev_get_ops(void);
4788 extern void *s3c_dma_get_ops(void);
4789diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4790index a5abb00..9cbca9a 100644
4791--- a/arch/arm64/include/asm/barrier.h
4792+++ b/arch/arm64/include/asm/barrier.h
4793@@ -44,7 +44,7 @@
4794 do { \
4795 compiletime_assert_atomic_type(*p); \
4796 barrier(); \
4797- ACCESS_ONCE(*p) = (v); \
4798+ ACCESS_ONCE_RW(*p) = (v); \
4799 } while (0)
4800
4801 #define smp_load_acquire(p) \
4802diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4803index 09da25b..3ea0d64 100644
4804--- a/arch/arm64/include/asm/percpu.h
4805+++ b/arch/arm64/include/asm/percpu.h
4806@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4807 {
4808 switch (size) {
4809 case 1:
4810- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4811+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4812 break;
4813 case 2:
4814- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4815+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4816 break;
4817 case 4:
4818- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4819+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4820 break;
4821 case 8:
4822- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4823+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4824 break;
4825 default:
4826 BUILD_BUG();
4827diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4828index 3bf8f4e..5dd5491 100644
4829--- a/arch/arm64/include/asm/uaccess.h
4830+++ b/arch/arm64/include/asm/uaccess.h
4831@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4832 flag; \
4833 })
4834
4835+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4836 #define access_ok(type, addr, size) __range_ok(addr, size)
4837 #define user_addr_max get_fs
4838
4839diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4840index c3a58a1..78fbf54 100644
4841--- a/arch/avr32/include/asm/cache.h
4842+++ b/arch/avr32/include/asm/cache.h
4843@@ -1,8 +1,10 @@
4844 #ifndef __ASM_AVR32_CACHE_H
4845 #define __ASM_AVR32_CACHE_H
4846
4847+#include <linux/const.h>
4848+
4849 #define L1_CACHE_SHIFT 5
4850-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4851+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4852
4853 /*
4854 * Memory returned by kmalloc() may be used for DMA, so we must make
4855diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4856index d232888..87c8df1 100644
4857--- a/arch/avr32/include/asm/elf.h
4858+++ b/arch/avr32/include/asm/elf.h
4859@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4860 the loader. We need to make sure that it is out of the way of the program
4861 that it will "exec", and that there is sufficient room for the brk. */
4862
4863-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4864+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4865
4866+#ifdef CONFIG_PAX_ASLR
4867+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4868+
4869+#define PAX_DELTA_MMAP_LEN 15
4870+#define PAX_DELTA_STACK_LEN 15
4871+#endif
4872
4873 /* This yields a mask that user programs can use to figure out what
4874 instruction set this CPU supports. This could be done in user space,
4875diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4876index 479330b..53717a8 100644
4877--- a/arch/avr32/include/asm/kmap_types.h
4878+++ b/arch/avr32/include/asm/kmap_types.h
4879@@ -2,9 +2,9 @@
4880 #define __ASM_AVR32_KMAP_TYPES_H
4881
4882 #ifdef CONFIG_DEBUG_HIGHMEM
4883-# define KM_TYPE_NR 29
4884+# define KM_TYPE_NR 30
4885 #else
4886-# define KM_TYPE_NR 14
4887+# define KM_TYPE_NR 15
4888 #endif
4889
4890 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4891diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4892index d223a8b..69c5210 100644
4893--- a/arch/avr32/mm/fault.c
4894+++ b/arch/avr32/mm/fault.c
4895@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4896
4897 int exception_trace = 1;
4898
4899+#ifdef CONFIG_PAX_PAGEEXEC
4900+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4901+{
4902+ unsigned long i;
4903+
4904+ printk(KERN_ERR "PAX: bytes at PC: ");
4905+ for (i = 0; i < 20; i++) {
4906+ unsigned char c;
4907+ if (get_user(c, (unsigned char *)pc+i))
4908+ printk(KERN_CONT "???????? ");
4909+ else
4910+ printk(KERN_CONT "%02x ", c);
4911+ }
4912+ printk("\n");
4913+}
4914+#endif
4915+
4916 /*
4917 * This routine handles page faults. It determines the address and the
4918 * problem, and then passes it off to one of the appropriate routines.
4919@@ -178,6 +195,16 @@ bad_area:
4920 up_read(&mm->mmap_sem);
4921
4922 if (user_mode(regs)) {
4923+
4924+#ifdef CONFIG_PAX_PAGEEXEC
4925+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4926+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4927+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4928+ do_group_exit(SIGKILL);
4929+ }
4930+ }
4931+#endif
4932+
4933 if (exception_trace && printk_ratelimit())
4934 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4935 "sp %08lx ecr %lu\n",
4936diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4937index 568885a..f8008df 100644
4938--- a/arch/blackfin/include/asm/cache.h
4939+++ b/arch/blackfin/include/asm/cache.h
4940@@ -7,6 +7,7 @@
4941 #ifndef __ARCH_BLACKFIN_CACHE_H
4942 #define __ARCH_BLACKFIN_CACHE_H
4943
4944+#include <linux/const.h>
4945 #include <linux/linkage.h> /* for asmlinkage */
4946
4947 /*
4948@@ -14,7 +15,7 @@
4949 * Blackfin loads 32 bytes for cache
4950 */
4951 #define L1_CACHE_SHIFT 5
4952-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4953+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4954 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4955
4956 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4957diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4958index aea2718..3639a60 100644
4959--- a/arch/cris/include/arch-v10/arch/cache.h
4960+++ b/arch/cris/include/arch-v10/arch/cache.h
4961@@ -1,8 +1,9 @@
4962 #ifndef _ASM_ARCH_CACHE_H
4963 #define _ASM_ARCH_CACHE_H
4964
4965+#include <linux/const.h>
4966 /* Etrax 100LX have 32-byte cache-lines. */
4967-#define L1_CACHE_BYTES 32
4968 #define L1_CACHE_SHIFT 5
4969+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4970
4971 #endif /* _ASM_ARCH_CACHE_H */
4972diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4973index 7caf25d..ee65ac5 100644
4974--- a/arch/cris/include/arch-v32/arch/cache.h
4975+++ b/arch/cris/include/arch-v32/arch/cache.h
4976@@ -1,11 +1,12 @@
4977 #ifndef _ASM_CRIS_ARCH_CACHE_H
4978 #define _ASM_CRIS_ARCH_CACHE_H
4979
4980+#include <linux/const.h>
4981 #include <arch/hwregs/dma.h>
4982
4983 /* A cache-line is 32 bytes. */
4984-#define L1_CACHE_BYTES 32
4985 #define L1_CACHE_SHIFT 5
4986+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4987
4988 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4989
4990diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4991index 102190a..5334cea 100644
4992--- a/arch/frv/include/asm/atomic.h
4993+++ b/arch/frv/include/asm/atomic.h
4994@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
4995 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4996 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4997
4998+#define atomic64_read_unchecked(v) atomic64_read(v)
4999+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5000+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5001+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5002+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5003+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5004+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5005+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5006+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5007+
5008 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5009 {
5010 int c, old;
5011diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5012index 2797163..c2a401df9 100644
5013--- a/arch/frv/include/asm/cache.h
5014+++ b/arch/frv/include/asm/cache.h
5015@@ -12,10 +12,11 @@
5016 #ifndef __ASM_CACHE_H
5017 #define __ASM_CACHE_H
5018
5019+#include <linux/const.h>
5020
5021 /* bytes per L1 cache line */
5022 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5023-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5024+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5025
5026 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5027 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5028diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5029index 43901f2..0d8b865 100644
5030--- a/arch/frv/include/asm/kmap_types.h
5031+++ b/arch/frv/include/asm/kmap_types.h
5032@@ -2,6 +2,6 @@
5033 #ifndef _ASM_KMAP_TYPES_H
5034 #define _ASM_KMAP_TYPES_H
5035
5036-#define KM_TYPE_NR 17
5037+#define KM_TYPE_NR 18
5038
5039 #endif
5040diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5041index 836f147..4cf23f5 100644
5042--- a/arch/frv/mm/elf-fdpic.c
5043+++ b/arch/frv/mm/elf-fdpic.c
5044@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5045 {
5046 struct vm_area_struct *vma;
5047 struct vm_unmapped_area_info info;
5048+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5049
5050 if (len > TASK_SIZE)
5051 return -ENOMEM;
5052@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5053 if (addr) {
5054 addr = PAGE_ALIGN(addr);
5055 vma = find_vma(current->mm, addr);
5056- if (TASK_SIZE - len >= addr &&
5057- (!vma || addr + len <= vma->vm_start))
5058+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5059 goto success;
5060 }
5061
5062@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5063 info.high_limit = (current->mm->start_stack - 0x00200000);
5064 info.align_mask = 0;
5065 info.align_offset = 0;
5066+ info.threadstack_offset = offset;
5067 addr = vm_unmapped_area(&info);
5068 if (!(addr & ~PAGE_MASK))
5069 goto success;
5070diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5071index 69952c1..4fa2908 100644
5072--- a/arch/hexagon/include/asm/cache.h
5073+++ b/arch/hexagon/include/asm/cache.h
5074@@ -21,9 +21,11 @@
5075 #ifndef __ASM_CACHE_H
5076 #define __ASM_CACHE_H
5077
5078+#include <linux/const.h>
5079+
5080 /* Bytes per L1 cache line */
5081-#define L1_CACHE_SHIFT (5)
5082-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5083+#define L1_CACHE_SHIFT 5
5084+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5085
5086 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5087
5088diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5089index 074e52b..76afdac 100644
5090--- a/arch/ia64/Kconfig
5091+++ b/arch/ia64/Kconfig
5092@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5093 config KEXEC
5094 bool "kexec system call"
5095 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5096+ depends on !GRKERNSEC_KMEM
5097 help
5098 kexec is a system call that implements the ability to shutdown your
5099 current kernel, and to start another kernel. It is like a reboot
5100diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5101index 970d0bd..e750b9b 100644
5102--- a/arch/ia64/Makefile
5103+++ b/arch/ia64/Makefile
5104@@ -98,5 +98,6 @@ endef
5105 archprepare: make_nr_irqs_h FORCE
5106 PHONY += make_nr_irqs_h FORCE
5107
5108+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5109 make_nr_irqs_h: FORCE
5110 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5111diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5112index 0bf0350..2ad1957 100644
5113--- a/arch/ia64/include/asm/atomic.h
5114+++ b/arch/ia64/include/asm/atomic.h
5115@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5116 #define atomic64_inc(v) atomic64_add(1, (v))
5117 #define atomic64_dec(v) atomic64_sub(1, (v))
5118
5119+#define atomic64_read_unchecked(v) atomic64_read(v)
5120+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5121+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5122+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5123+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5124+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5125+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5126+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5127+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5128+
5129 #endif /* _ASM_IA64_ATOMIC_H */
5130diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5131index f6769eb..1cdb590 100644
5132--- a/arch/ia64/include/asm/barrier.h
5133+++ b/arch/ia64/include/asm/barrier.h
5134@@ -66,7 +66,7 @@
5135 do { \
5136 compiletime_assert_atomic_type(*p); \
5137 barrier(); \
5138- ACCESS_ONCE(*p) = (v); \
5139+ ACCESS_ONCE_RW(*p) = (v); \
5140 } while (0)
5141
5142 #define smp_load_acquire(p) \
5143diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5144index 988254a..e1ee885 100644
5145--- a/arch/ia64/include/asm/cache.h
5146+++ b/arch/ia64/include/asm/cache.h
5147@@ -1,6 +1,7 @@
5148 #ifndef _ASM_IA64_CACHE_H
5149 #define _ASM_IA64_CACHE_H
5150
5151+#include <linux/const.h>
5152
5153 /*
5154 * Copyright (C) 1998-2000 Hewlett-Packard Co
5155@@ -9,7 +10,7 @@
5156
5157 /* Bytes per L1 (data) cache line. */
5158 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5159-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5160+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5161
5162 #ifdef CONFIG_SMP
5163 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5164diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5165index 5a83c5c..4d7f553 100644
5166--- a/arch/ia64/include/asm/elf.h
5167+++ b/arch/ia64/include/asm/elf.h
5168@@ -42,6 +42,13 @@
5169 */
5170 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5171
5172+#ifdef CONFIG_PAX_ASLR
5173+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5174+
5175+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5176+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5177+#endif
5178+
5179 #define PT_IA_64_UNWIND 0x70000001
5180
5181 /* IA-64 relocations: */
5182diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5183index 5767cdf..7462574 100644
5184--- a/arch/ia64/include/asm/pgalloc.h
5185+++ b/arch/ia64/include/asm/pgalloc.h
5186@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5187 pgd_val(*pgd_entry) = __pa(pud);
5188 }
5189
5190+static inline void
5191+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5192+{
5193+ pgd_populate(mm, pgd_entry, pud);
5194+}
5195+
5196 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5197 {
5198 return quicklist_alloc(0, GFP_KERNEL, NULL);
5199@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5200 pud_val(*pud_entry) = __pa(pmd);
5201 }
5202
5203+static inline void
5204+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5205+{
5206+ pud_populate(mm, pud_entry, pmd);
5207+}
5208+
5209 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5210 {
5211 return quicklist_alloc(0, GFP_KERNEL, NULL);
5212diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5213index 7935115..c0eca6a 100644
5214--- a/arch/ia64/include/asm/pgtable.h
5215+++ b/arch/ia64/include/asm/pgtable.h
5216@@ -12,7 +12,7 @@
5217 * David Mosberger-Tang <davidm@hpl.hp.com>
5218 */
5219
5220-
5221+#include <linux/const.h>
5222 #include <asm/mman.h>
5223 #include <asm/page.h>
5224 #include <asm/processor.h>
5225@@ -142,6 +142,17 @@
5226 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5227 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5228 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5229+
5230+#ifdef CONFIG_PAX_PAGEEXEC
5231+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5232+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5233+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5234+#else
5235+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5236+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5237+# define PAGE_COPY_NOEXEC PAGE_COPY
5238+#endif
5239+
5240 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5241 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5242 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5243diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5244index 45698cd..e8e2dbc 100644
5245--- a/arch/ia64/include/asm/spinlock.h
5246+++ b/arch/ia64/include/asm/spinlock.h
5247@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5248 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5249
5250 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5251- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5252+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5253 }
5254
5255 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5256diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5257index 103bedc..0210597 100644
5258--- a/arch/ia64/include/asm/uaccess.h
5259+++ b/arch/ia64/include/asm/uaccess.h
5260@@ -70,6 +70,7 @@
5261 && ((segment).seg == KERNEL_DS.seg \
5262 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5263 })
5264+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5265 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5266
5267 /*
5268@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5269 static inline unsigned long
5270 __copy_to_user (void __user *to, const void *from, unsigned long count)
5271 {
5272+ if (count > INT_MAX)
5273+ return count;
5274+
5275+ if (!__builtin_constant_p(count))
5276+ check_object_size(from, count, true);
5277+
5278 return __copy_user(to, (__force void __user *) from, count);
5279 }
5280
5281 static inline unsigned long
5282 __copy_from_user (void *to, const void __user *from, unsigned long count)
5283 {
5284+ if (count > INT_MAX)
5285+ return count;
5286+
5287+ if (!__builtin_constant_p(count))
5288+ check_object_size(to, count, false);
5289+
5290 return __copy_user((__force void __user *) to, from, count);
5291 }
5292
5293@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5294 ({ \
5295 void __user *__cu_to = (to); \
5296 const void *__cu_from = (from); \
5297- long __cu_len = (n); \
5298+ unsigned long __cu_len = (n); \
5299 \
5300- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5301+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5302+ if (!__builtin_constant_p(n)) \
5303+ check_object_size(__cu_from, __cu_len, true); \
5304 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5305+ } \
5306 __cu_len; \
5307 })
5308
5309@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5310 ({ \
5311 void *__cu_to = (to); \
5312 const void __user *__cu_from = (from); \
5313- long __cu_len = (n); \
5314+ unsigned long __cu_len = (n); \
5315 \
5316 __chk_user_ptr(__cu_from); \
5317- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5318+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5319+ if (!__builtin_constant_p(n)) \
5320+ check_object_size(__cu_to, __cu_len, false); \
5321 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5322+ } \
5323 __cu_len; \
5324 })
5325
5326diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5327index 29754aa..06d2838 100644
5328--- a/arch/ia64/kernel/module.c
5329+++ b/arch/ia64/kernel/module.c
5330@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5331 }
5332
5333 static inline int
5334+in_init_rx (const struct module *mod, uint64_t addr)
5335+{
5336+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5337+}
5338+
5339+static inline int
5340+in_init_rw (const struct module *mod, uint64_t addr)
5341+{
5342+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5343+}
5344+
5345+static inline int
5346 in_init (const struct module *mod, uint64_t addr)
5347 {
5348- return addr - (uint64_t) mod->module_init < mod->init_size;
5349+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5350+}
5351+
5352+static inline int
5353+in_core_rx (const struct module *mod, uint64_t addr)
5354+{
5355+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5356+}
5357+
5358+static inline int
5359+in_core_rw (const struct module *mod, uint64_t addr)
5360+{
5361+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5362 }
5363
5364 static inline int
5365 in_core (const struct module *mod, uint64_t addr)
5366 {
5367- return addr - (uint64_t) mod->module_core < mod->core_size;
5368+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5369 }
5370
5371 static inline int
5372@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5373 break;
5374
5375 case RV_BDREL:
5376- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5377+ if (in_init_rx(mod, val))
5378+ val -= (uint64_t) mod->module_init_rx;
5379+ else if (in_init_rw(mod, val))
5380+ val -= (uint64_t) mod->module_init_rw;
5381+ else if (in_core_rx(mod, val))
5382+ val -= (uint64_t) mod->module_core_rx;
5383+ else if (in_core_rw(mod, val))
5384+ val -= (uint64_t) mod->module_core_rw;
5385 break;
5386
5387 case RV_LTV:
5388@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5389 * addresses have been selected...
5390 */
5391 uint64_t gp;
5392- if (mod->core_size > MAX_LTOFF)
5393+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5394 /*
5395 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5396 * at the end of the module.
5397 */
5398- gp = mod->core_size - MAX_LTOFF / 2;
5399+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5400 else
5401- gp = mod->core_size / 2;
5402- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5403+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5404+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5405 mod->arch.gp = gp;
5406 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5407 }
5408diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5409index c39c3cd..3c77738 100644
5410--- a/arch/ia64/kernel/palinfo.c
5411+++ b/arch/ia64/kernel/palinfo.c
5412@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5413 return NOTIFY_OK;
5414 }
5415
5416-static struct notifier_block __refdata palinfo_cpu_notifier =
5417+static struct notifier_block palinfo_cpu_notifier =
5418 {
5419 .notifier_call = palinfo_cpu_callback,
5420 .priority = 0,
5421diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5422index 41e33f8..65180b2a 100644
5423--- a/arch/ia64/kernel/sys_ia64.c
5424+++ b/arch/ia64/kernel/sys_ia64.c
5425@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5426 unsigned long align_mask = 0;
5427 struct mm_struct *mm = current->mm;
5428 struct vm_unmapped_area_info info;
5429+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5430
5431 if (len > RGN_MAP_LIMIT)
5432 return -ENOMEM;
5433@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5434 if (REGION_NUMBER(addr) == RGN_HPAGE)
5435 addr = 0;
5436 #endif
5437+
5438+#ifdef CONFIG_PAX_RANDMMAP
5439+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5440+ addr = mm->free_area_cache;
5441+ else
5442+#endif
5443+
5444 if (!addr)
5445 addr = TASK_UNMAPPED_BASE;
5446
5447@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5448 info.high_limit = TASK_SIZE;
5449 info.align_mask = align_mask;
5450 info.align_offset = 0;
5451+ info.threadstack_offset = offset;
5452 return vm_unmapped_area(&info);
5453 }
5454
5455diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5456index 84f8a52..7c76178 100644
5457--- a/arch/ia64/kernel/vmlinux.lds.S
5458+++ b/arch/ia64/kernel/vmlinux.lds.S
5459@@ -192,7 +192,7 @@ SECTIONS {
5460 /* Per-cpu data: */
5461 . = ALIGN(PERCPU_PAGE_SIZE);
5462 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5463- __phys_per_cpu_start = __per_cpu_load;
5464+ __phys_per_cpu_start = per_cpu_load;
5465 /*
5466 * ensure percpu data fits
5467 * into percpu page size
5468diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5469index ba5ba7a..36e9d3a 100644
5470--- a/arch/ia64/mm/fault.c
5471+++ b/arch/ia64/mm/fault.c
5472@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5473 return pte_present(pte);
5474 }
5475
5476+#ifdef CONFIG_PAX_PAGEEXEC
5477+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5478+{
5479+ unsigned long i;
5480+
5481+ printk(KERN_ERR "PAX: bytes at PC: ");
5482+ for (i = 0; i < 8; i++) {
5483+ unsigned int c;
5484+ if (get_user(c, (unsigned int *)pc+i))
5485+ printk(KERN_CONT "???????? ");
5486+ else
5487+ printk(KERN_CONT "%08x ", c);
5488+ }
5489+ printk("\n");
5490+}
5491+#endif
5492+
5493 # define VM_READ_BIT 0
5494 # define VM_WRITE_BIT 1
5495 # define VM_EXEC_BIT 2
5496@@ -151,8 +168,21 @@ retry:
5497 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5498 goto bad_area;
5499
5500- if ((vma->vm_flags & mask) != mask)
5501+ if ((vma->vm_flags & mask) != mask) {
5502+
5503+#ifdef CONFIG_PAX_PAGEEXEC
5504+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5505+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5506+ goto bad_area;
5507+
5508+ up_read(&mm->mmap_sem);
5509+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5510+ do_group_exit(SIGKILL);
5511+ }
5512+#endif
5513+
5514 goto bad_area;
5515+ }
5516
5517 /*
5518 * If for any reason at all we couldn't handle the fault, make
5519diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5520index 76069c1..c2aa816 100644
5521--- a/arch/ia64/mm/hugetlbpage.c
5522+++ b/arch/ia64/mm/hugetlbpage.c
5523@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5524 unsigned long pgoff, unsigned long flags)
5525 {
5526 struct vm_unmapped_area_info info;
5527+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5528
5529 if (len > RGN_MAP_LIMIT)
5530 return -ENOMEM;
5531@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5532 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5533 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5534 info.align_offset = 0;
5535+ info.threadstack_offset = offset;
5536 return vm_unmapped_area(&info);
5537 }
5538
5539diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5540index 6b33457..88b5124 100644
5541--- a/arch/ia64/mm/init.c
5542+++ b/arch/ia64/mm/init.c
5543@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5544 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5545 vma->vm_end = vma->vm_start + PAGE_SIZE;
5546 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5547+
5548+#ifdef CONFIG_PAX_PAGEEXEC
5549+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5550+ vma->vm_flags &= ~VM_EXEC;
5551+
5552+#ifdef CONFIG_PAX_MPROTECT
5553+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5554+ vma->vm_flags &= ~VM_MAYEXEC;
5555+#endif
5556+
5557+ }
5558+#endif
5559+
5560 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5561 down_write(&current->mm->mmap_sem);
5562 if (insert_vm_struct(current->mm, vma)) {
5563@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5564 gate_vma.vm_start = FIXADDR_USER_START;
5565 gate_vma.vm_end = FIXADDR_USER_END;
5566 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5567- gate_vma.vm_page_prot = __P101;
5568+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5569
5570 return 0;
5571 }
5572diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5573index 40b3ee98..8c2c112 100644
5574--- a/arch/m32r/include/asm/cache.h
5575+++ b/arch/m32r/include/asm/cache.h
5576@@ -1,8 +1,10 @@
5577 #ifndef _ASM_M32R_CACHE_H
5578 #define _ASM_M32R_CACHE_H
5579
5580+#include <linux/const.h>
5581+
5582 /* L1 cache line size */
5583 #define L1_CACHE_SHIFT 4
5584-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5585+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5586
5587 #endif /* _ASM_M32R_CACHE_H */
5588diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5589index 82abd15..d95ae5d 100644
5590--- a/arch/m32r/lib/usercopy.c
5591+++ b/arch/m32r/lib/usercopy.c
5592@@ -14,6 +14,9 @@
5593 unsigned long
5594 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5595 {
5596+ if ((long)n < 0)
5597+ return n;
5598+
5599 prefetch(from);
5600 if (access_ok(VERIFY_WRITE, to, n))
5601 __copy_user(to,from,n);
5602@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5603 unsigned long
5604 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5605 {
5606+ if ((long)n < 0)
5607+ return n;
5608+
5609 prefetchw(to);
5610 if (access_ok(VERIFY_READ, from, n))
5611 __copy_user_zeroing(to,from,n);
5612diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5613index 0395c51..5f26031 100644
5614--- a/arch/m68k/include/asm/cache.h
5615+++ b/arch/m68k/include/asm/cache.h
5616@@ -4,9 +4,11 @@
5617 #ifndef __ARCH_M68K_CACHE_H
5618 #define __ARCH_M68K_CACHE_H
5619
5620+#include <linux/const.h>
5621+
5622 /* bytes per L1 cache line */
5623 #define L1_CACHE_SHIFT 4
5624-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5625+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5626
5627 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5628
5629diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5630index d703d8e..a8e2d70 100644
5631--- a/arch/metag/include/asm/barrier.h
5632+++ b/arch/metag/include/asm/barrier.h
5633@@ -90,7 +90,7 @@ static inline void fence(void)
5634 do { \
5635 compiletime_assert_atomic_type(*p); \
5636 smp_mb(); \
5637- ACCESS_ONCE(*p) = (v); \
5638+ ACCESS_ONCE_RW(*p) = (v); \
5639 } while (0)
5640
5641 #define smp_load_acquire(p) \
5642diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5643index 3c32075..ae0ae75 100644
5644--- a/arch/metag/mm/hugetlbpage.c
5645+++ b/arch/metag/mm/hugetlbpage.c
5646@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5647 info.high_limit = TASK_SIZE;
5648 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5649 info.align_offset = 0;
5650+ info.threadstack_offset = 0;
5651 return vm_unmapped_area(&info);
5652 }
5653
5654diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5655index 4efe96a..60e8699 100644
5656--- a/arch/microblaze/include/asm/cache.h
5657+++ b/arch/microblaze/include/asm/cache.h
5658@@ -13,11 +13,12 @@
5659 #ifndef _ASM_MICROBLAZE_CACHE_H
5660 #define _ASM_MICROBLAZE_CACHE_H
5661
5662+#include <linux/const.h>
5663 #include <asm/registers.h>
5664
5665 #define L1_CACHE_SHIFT 5
5666 /* word-granular cache in microblaze */
5667-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5668+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5669
5670 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5671
5672diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5673index 843713c..b6a87b9 100644
5674--- a/arch/mips/Kconfig
5675+++ b/arch/mips/Kconfig
5676@@ -2439,6 +2439,7 @@ source "kernel/Kconfig.preempt"
5677
5678 config KEXEC
5679 bool "Kexec system call"
5680+ depends on !GRKERNSEC_KMEM
5681 help
5682 kexec is a system call that implements the ability to shutdown your
5683 current kernel, and to start another kernel. It is like a reboot
5684diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5685index 3778655..1dff0a9 100644
5686--- a/arch/mips/cavium-octeon/dma-octeon.c
5687+++ b/arch/mips/cavium-octeon/dma-octeon.c
5688@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5689 if (dma_release_from_coherent(dev, order, vaddr))
5690 return;
5691
5692- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5693+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5694 }
5695
5696 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5697diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5698index 857da84..3f4458b 100644
5699--- a/arch/mips/include/asm/atomic.h
5700+++ b/arch/mips/include/asm/atomic.h
5701@@ -22,15 +22,39 @@
5702 #include <asm/cmpxchg.h>
5703 #include <asm/war.h>
5704
5705+#ifdef CONFIG_GENERIC_ATOMIC64
5706+#include <asm-generic/atomic64.h>
5707+#endif
5708+
5709 #define ATOMIC_INIT(i) { (i) }
5710
5711+#ifdef CONFIG_64BIT
5712+#define _ASM_EXTABLE(from, to) \
5713+" .section __ex_table,\"a\"\n" \
5714+" .dword " #from ", " #to"\n" \
5715+" .previous\n"
5716+#else
5717+#define _ASM_EXTABLE(from, to) \
5718+" .section __ex_table,\"a\"\n" \
5719+" .word " #from ", " #to"\n" \
5720+" .previous\n"
5721+#endif
5722+
5723 /*
5724 * atomic_read - read atomic variable
5725 * @v: pointer of type atomic_t
5726 *
5727 * Atomically reads the value of @v.
5728 */
5729-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5730+static inline int atomic_read(const atomic_t *v)
5731+{
5732+ return ACCESS_ONCE(v->counter);
5733+}
5734+
5735+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5736+{
5737+ return ACCESS_ONCE(v->counter);
5738+}
5739
5740 /*
5741 * atomic_set - set atomic variable
5742@@ -39,47 +63,77 @@
5743 *
5744 * Atomically sets the value of @v to @i.
5745 */
5746-#define atomic_set(v, i) ((v)->counter = (i))
5747+static inline void atomic_set(atomic_t *v, int i)
5748+{
5749+ v->counter = i;
5750+}
5751
5752-#define ATOMIC_OP(op, c_op, asm_op) \
5753-static __inline__ void atomic_##op(int i, atomic_t * v) \
5754+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5755+{
5756+ v->counter = i;
5757+}
5758+
5759+#ifdef CONFIG_PAX_REFCOUNT
5760+#define __OVERFLOW_POST \
5761+ " b 4f \n" \
5762+ " .set noreorder \n" \
5763+ "3: b 5f \n" \
5764+ " move %0, %1 \n" \
5765+ " .set reorder \n"
5766+#define __OVERFLOW_EXTABLE \
5767+ "3:\n" \
5768+ _ASM_EXTABLE(2b, 3b)
5769+#else
5770+#define __OVERFLOW_POST
5771+#define __OVERFLOW_EXTABLE
5772+#endif
5773+
5774+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5775+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5776 { \
5777 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5778 int temp; \
5779 \
5780 __asm__ __volatile__( \
5781- " .set arch=r4000 \n" \
5782- "1: ll %0, %1 # atomic_" #op " \n" \
5783- " " #asm_op " %0, %2 \n" \
5784+ " .set mips3 \n" \
5785+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5786+ "2: " #asm_op " %0, %2 \n" \
5787 " sc %0, %1 \n" \
5788 " beqzl %0, 1b \n" \
5789+ extable \
5790 " .set mips0 \n" \
5791 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5792 : "Ir" (i)); \
5793 } else if (kernel_uses_llsc) { \
5794 int temp; \
5795 \
5796- do { \
5797- __asm__ __volatile__( \
5798- " .set arch=r4000 \n" \
5799- " ll %0, %1 # atomic_" #op "\n" \
5800- " " #asm_op " %0, %2 \n" \
5801- " sc %0, %1 \n" \
5802- " .set mips0 \n" \
5803- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5804- : "Ir" (i)); \
5805- } while (unlikely(!temp)); \
5806+ __asm__ __volatile__( \
5807+ " .set mips3 \n" \
5808+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5809+ "2: " #asm_op " %0, %2 \n" \
5810+ " sc %0, %1 \n" \
5811+ " beqz %0, 1b \n" \
5812+ extable \
5813+ " .set mips0 \n" \
5814+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5815+ : "Ir" (i)); \
5816 } else { \
5817 unsigned long flags; \
5818 \
5819 raw_local_irq_save(flags); \
5820- v->counter c_op i; \
5821+ __asm__ __volatile__( \
5822+ "2: " #asm_op " %0, %1 \n" \
5823+ extable \
5824+ : "+r" (v->counter) : "Ir" (i)); \
5825 raw_local_irq_restore(flags); \
5826 } \
5827 }
5828
5829-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5830-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5831+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , asm_op##u) \
5832+ __ATOMIC_OP(op, _unchecked, asm_op)
5833+
5834+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5835+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5836 { \
5837 int result; \
5838 \
5839@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5840 int temp; \
5841 \
5842 __asm__ __volatile__( \
5843- " .set arch=r4000 \n" \
5844- "1: ll %1, %2 # atomic_" #op "_return \n" \
5845- " " #asm_op " %0, %1, %3 \n" \
5846+ " .set mips3 \n" \
5847+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5848+ "2: " #asm_op " %0, %1, %3 \n" \
5849 " sc %0, %2 \n" \
5850 " beqzl %0, 1b \n" \
5851- " " #asm_op " %0, %1, %3 \n" \
5852+ post_op \
5853+ extable \
5854+ "4: " #asm_op " %0, %1, %3 \n" \
5855+ "5: \n" \
5856 " .set mips0 \n" \
5857 : "=&r" (result), "=&r" (temp), \
5858 "+" GCC_OFF12_ASM() (v->counter) \
5859@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5860 } else if (kernel_uses_llsc) { \
5861 int temp; \
5862 \
5863- do { \
5864- __asm__ __volatile__( \
5865- " .set arch=r4000 \n" \
5866- " ll %1, %2 # atomic_" #op "_return \n" \
5867- " " #asm_op " %0, %1, %3 \n" \
5868- " sc %0, %2 \n" \
5869- " .set mips0 \n" \
5870- : "=&r" (result), "=&r" (temp), \
5871- "+" GCC_OFF12_ASM() (v->counter) \
5872- : "Ir" (i)); \
5873- } while (unlikely(!result)); \
5874+ __asm__ __volatile__( \
5875+ " .set mips3 \n" \
5876+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5877+ "2: " #asm_op " %0, %1, %3 \n" \
5878+ " sc %0, %2 \n" \
5879+ post_op \
5880+ extable \
5881+ "4: " #asm_op " %0, %1, %3 \n" \
5882+ "5: \n" \
5883+ " .set mips0 \n" \
5884+ : "=&r" (result), "=&r" (temp), \
5885+ "+" GCC_OFF12_ASM() (v->counter) \
5886+ : "Ir" (i)); \
5887 \
5888 result = temp; result c_op i; \
5889 } else { \
5890 unsigned long flags; \
5891 \
5892 raw_local_irq_save(flags); \
5893- result = v->counter; \
5894- result c_op i; \
5895- v->counter = result; \
5896+ __asm__ __volatile__( \
5897+ " lw %0, %1 \n" \
5898+ "2: " #asm_op " %0, %1, %2 \n" \
5899+ " sw %0, %1 \n" \
5900+ "3: \n" \
5901+ extable \
5902+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
5903+ : "Ir" (i)); \
5904 raw_local_irq_restore(flags); \
5905 } \
5906 \
5907@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5908 return result; \
5909 }
5910
5911-#define ATOMIC_OPS(op, c_op, asm_op) \
5912- ATOMIC_OP(op, c_op, asm_op) \
5913- ATOMIC_OP_RETURN(op, c_op, asm_op)
5914+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
5915+ __ATOMIC_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5916
5917-ATOMIC_OPS(add, +=, addu)
5918-ATOMIC_OPS(sub, -=, subu)
5919+#define ATOMIC_OPS(op, asm_op) \
5920+ ATOMIC_OP(op, asm_op) \
5921+ ATOMIC_OP_RETURN(op, asm_op)
5922+
5923+ATOMIC_OPS(add, add)
5924+ATOMIC_OPS(sub, sub)
5925
5926 #undef ATOMIC_OPS
5927 #undef ATOMIC_OP_RETURN
5928+#undef __ATOMIC_OP_RETURN
5929 #undef ATOMIC_OP
5930+#undef __ATOMIC_OP
5931
5932 /*
5933 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5934@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5935 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5936 * The function returns the old value of @v minus @i.
5937 */
5938-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5939+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5940 {
5941 int result;
5942
5943@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5944 return result;
5945 }
5946
5947-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5948-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5949+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5950+{
5951+ return cmpxchg(&v->counter, old, new);
5952+}
5953+
5954+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5955+ int new)
5956+{
5957+ return cmpxchg(&(v->counter), old, new);
5958+}
5959+
5960+static inline int atomic_xchg(atomic_t *v, int new)
5961+{
5962+ return xchg(&v->counter, new);
5963+}
5964+
5965+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5966+{
5967+ return xchg(&(v->counter), new);
5968+}
5969
5970 /**
5971 * __atomic_add_unless - add unless the number is a given value
5972@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5973
5974 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5975 #define atomic_inc_return(v) atomic_add_return(1, (v))
5976+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5977+{
5978+ return atomic_add_return_unchecked(1, v);
5979+}
5980
5981 /*
5982 * atomic_sub_and_test - subtract value from variable and test result
5983@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5984 * other cases.
5985 */
5986 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5987+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5988+{
5989+ return atomic_add_return_unchecked(1, v) == 0;
5990+}
5991
5992 /*
5993 * atomic_dec_and_test - decrement by 1 and test
5994@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5995 * Atomically increments @v by 1.
5996 */
5997 #define atomic_inc(v) atomic_add(1, (v))
5998+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
5999+{
6000+ atomic_add_unchecked(1, v);
6001+}
6002
6003 /*
6004 * atomic_dec - decrement and test
6005@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6006 * Atomically decrements @v by 1.
6007 */
6008 #define atomic_dec(v) atomic_sub(1, (v))
6009+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6010+{
6011+ atomic_sub_unchecked(1, v);
6012+}
6013
6014 /*
6015 * atomic_add_negative - add and test if negative
6016@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6017 * @v: pointer of type atomic64_t
6018 *
6019 */
6020-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6021+static inline long atomic64_read(const atomic64_t *v)
6022+{
6023+ return ACCESS_ONCE(v->counter);
6024+}
6025+
6026+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6027+{
6028+ return ACCESS_ONCE(v->counter);
6029+}
6030
6031 /*
6032 * atomic64_set - set atomic variable
6033 * @v: pointer of type atomic64_t
6034 * @i: required value
6035 */
6036-#define atomic64_set(v, i) ((v)->counter = (i))
6037+static inline void atomic64_set(atomic64_t *v, long i)
6038+{
6039+ v->counter = i;
6040+}
6041
6042-#define ATOMIC64_OP(op, c_op, asm_op) \
6043-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6044+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6045+{
6046+ v->counter = i;
6047+}
6048+
6049+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6050+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6051 { \
6052 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6053 long temp; \
6054 \
6055 __asm__ __volatile__( \
6056- " .set arch=r4000 \n" \
6057- "1: lld %0, %1 # atomic64_" #op " \n" \
6058- " " #asm_op " %0, %2 \n" \
6059+ " .set mips3 \n" \
6060+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6061+ "2: " #asm_op " %0, %2 \n" \
6062 " scd %0, %1 \n" \
6063 " beqzl %0, 1b \n" \
6064+ extable \
6065 " .set mips0 \n" \
6066 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6067 : "Ir" (i)); \
6068 } else if (kernel_uses_llsc) { \
6069 long temp; \
6070 \
6071- do { \
6072- __asm__ __volatile__( \
6073- " .set arch=r4000 \n" \
6074- " lld %0, %1 # atomic64_" #op "\n" \
6075- " " #asm_op " %0, %2 \n" \
6076- " scd %0, %1 \n" \
6077- " .set mips0 \n" \
6078- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6079- : "Ir" (i)); \
6080- } while (unlikely(!temp)); \
6081+ __asm__ __volatile__( \
6082+ " .set mips3 \n" \
6083+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6084+ "2: " #asm_op " %0, %2 \n" \
6085+ " scd %0, %1 \n" \
6086+ " beqz %0, 1b \n" \
6087+ extable \
6088+ " .set mips0 \n" \
6089+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6090+ : "Ir" (i)); \
6091 } else { \
6092 unsigned long flags; \
6093 \
6094 raw_local_irq_save(flags); \
6095- v->counter c_op i; \
6096+ __asm__ __volatile__( \
6097+ "2: " #asm_op " %0, %1 \n" \
6098+ extable \
6099+ : "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); \
6100 raw_local_irq_restore(flags); \
6101 } \
6102 }
6103
6104-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6105-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6106+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , asm_op##u) \
6107+ __ATOMIC64_OP(op, _unchecked, asm_op)
6108+
6109+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6110+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6111 { \
6112 long result; \
6113 \
6114@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6115 long temp; \
6116 \
6117 __asm__ __volatile__( \
6118- " .set arch=r4000 \n" \
6119+ " .set mips3 \n" \
6120 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6121- " " #asm_op " %0, %1, %3 \n" \
6122+ "2: " #asm_op " %0, %1, %3 \n" \
6123 " scd %0, %2 \n" \
6124 " beqzl %0, 1b \n" \
6125- " " #asm_op " %0, %1, %3 \n" \
6126+ post_op \
6127+ extable \
6128+ "4: " #asm_op " %0, %1, %3 \n" \
6129+ "5: \n" \
6130 " .set mips0 \n" \
6131 : "=&r" (result), "=&r" (temp), \
6132 "+" GCC_OFF12_ASM() (v->counter) \
6133@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6134 } else if (kernel_uses_llsc) { \
6135 long temp; \
6136 \
6137- do { \
6138- __asm__ __volatile__( \
6139- " .set arch=r4000 \n" \
6140- " lld %1, %2 # atomic64_" #op "_return\n" \
6141- " " #asm_op " %0, %1, %3 \n" \
6142- " scd %0, %2 \n" \
6143- " .set mips0 \n" \
6144- : "=&r" (result), "=&r" (temp), \
6145- "=" GCC_OFF12_ASM() (v->counter) \
6146- : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6147- : "memory"); \
6148- } while (unlikely(!result)); \
6149+ __asm__ __volatile__( \
6150+ " .set mips3 \n" \
6151+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6152+ "2: " #asm_op " %0, %1, %3 \n" \
6153+ " scd %0, %2 \n" \
6154+ " beqz %0, 1b \n" \
6155+ post_op \
6156+ extable \
6157+ "4: " #asm_op " %0, %1, %3 \n" \
6158+ "5: \n" \
6159+ " .set mips0 \n" \
6160+ : "=&r" (result), "=&r" (temp), \
6161+ "=" GCC_OFF12_ASM() (v->counter) \
6162+ : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6163+ : "memory"); \
6164 \
6165 result = temp; result c_op i; \
6166 } else { \
6167 unsigned long flags; \
6168 \
6169 raw_local_irq_save(flags); \
6170- result = v->counter; \
6171- result c_op i; \
6172- v->counter = result; \
6173+ __asm__ __volatile__( \
6174+ " ld %0, %1 \n" \
6175+ "2: " #asm_op " %0, %1, %2 \n" \
6176+ " sd %0, %1 \n" \
6177+ "3: \n" \
6178+ extable \
6179+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
6180+ : "Ir" (i)); \
6181 raw_local_irq_restore(flags); \
6182 } \
6183 \
6184@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6185 return result; \
6186 }
6187
6188-#define ATOMIC64_OPS(op, c_op, asm_op) \
6189- ATOMIC64_OP(op, c_op, asm_op) \
6190- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6191+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
6192+ __ATOMIC64_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6193
6194-ATOMIC64_OPS(add, +=, daddu)
6195-ATOMIC64_OPS(sub, -=, dsubu)
6196+#define ATOMIC64_OPS(op, asm_op) \
6197+ ATOMIC64_OP(op, asm_op) \
6198+ ATOMIC64_OP_RETURN(op, asm_op)
6199+
6200+ATOMIC64_OPS(add, dadd)
6201+ATOMIC64_OPS(sub, dsub)
6202
6203 #undef ATOMIC64_OPS
6204 #undef ATOMIC64_OP_RETURN
6205+#undef __ATOMIC64_OP_RETURN
6206 #undef ATOMIC64_OP
6207+#undef __ATOMIC64_OP
6208+#undef __OVERFLOW_EXTABLE
6209+#undef __OVERFLOW_POST
6210
6211 /*
6212 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6213@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6214 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6215 * The function returns the old value of @v minus @i.
6216 */
6217-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6218+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6219 {
6220 long result;
6221
6222@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6223 return result;
6224 }
6225
6226-#define atomic64_cmpxchg(v, o, n) \
6227- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6228-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6229+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6230+{
6231+ return cmpxchg(&v->counter, old, new);
6232+}
6233+
6234+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6235+ long new)
6236+{
6237+ return cmpxchg(&(v->counter), old, new);
6238+}
6239+
6240+static inline long atomic64_xchg(atomic64_t *v, long new)
6241+{
6242+ return xchg(&v->counter, new);
6243+}
6244+
6245+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6246+{
6247+ return xchg(&(v->counter), new);
6248+}
6249
6250 /**
6251 * atomic64_add_unless - add unless the number is a given value
6252@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6253
6254 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6255 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6256+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6257
6258 /*
6259 * atomic64_sub_and_test - subtract value from variable and test result
6260@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6261 * other cases.
6262 */
6263 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6264+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6265
6266 /*
6267 * atomic64_dec_and_test - decrement by 1 and test
6268@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6269 * Atomically increments @v by 1.
6270 */
6271 #define atomic64_inc(v) atomic64_add(1, (v))
6272+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6273
6274 /*
6275 * atomic64_dec - decrement and test
6276@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6277 * Atomically decrements @v by 1.
6278 */
6279 #define atomic64_dec(v) atomic64_sub(1, (v))
6280+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6281
6282 /*
6283 * atomic64_add_negative - add and test if negative
6284diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6285index 2b8bbbc..4556df6 100644
6286--- a/arch/mips/include/asm/barrier.h
6287+++ b/arch/mips/include/asm/barrier.h
6288@@ -133,7 +133,7 @@
6289 do { \
6290 compiletime_assert_atomic_type(*p); \
6291 smp_mb(); \
6292- ACCESS_ONCE(*p) = (v); \
6293+ ACCESS_ONCE_RW(*p) = (v); \
6294 } while (0)
6295
6296 #define smp_load_acquire(p) \
6297diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6298index b4db69f..8f3b093 100644
6299--- a/arch/mips/include/asm/cache.h
6300+++ b/arch/mips/include/asm/cache.h
6301@@ -9,10 +9,11 @@
6302 #ifndef _ASM_CACHE_H
6303 #define _ASM_CACHE_H
6304
6305+#include <linux/const.h>
6306 #include <kmalloc.h>
6307
6308 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6309-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6310+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6311
6312 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6313 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6314diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6315index eb4d95d..f2f7f93 100644
6316--- a/arch/mips/include/asm/elf.h
6317+++ b/arch/mips/include/asm/elf.h
6318@@ -405,15 +405,18 @@ extern const char *__elf_platform;
6319 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6320 #endif
6321
6322+#ifdef CONFIG_PAX_ASLR
6323+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6324+
6325+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6326+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6327+#endif
6328+
6329 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6330 struct linux_binprm;
6331 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6332 int uses_interp);
6333
6334-struct mm_struct;
6335-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6336-#define arch_randomize_brk arch_randomize_brk
6337-
6338 struct arch_elf_state {
6339 int fp_abi;
6340 int interp_fp_abi;
6341diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6342index c1f6afa..38cc6e9 100644
6343--- a/arch/mips/include/asm/exec.h
6344+++ b/arch/mips/include/asm/exec.h
6345@@ -12,6 +12,6 @@
6346 #ifndef _ASM_EXEC_H
6347 #define _ASM_EXEC_H
6348
6349-extern unsigned long arch_align_stack(unsigned long sp);
6350+#define arch_align_stack(x) ((x) & ~0xfUL)
6351
6352 #endif /* _ASM_EXEC_H */
6353diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6354index 9e8ef59..1139d6b 100644
6355--- a/arch/mips/include/asm/hw_irq.h
6356+++ b/arch/mips/include/asm/hw_irq.h
6357@@ -10,7 +10,7 @@
6358
6359 #include <linux/atomic.h>
6360
6361-extern atomic_t irq_err_count;
6362+extern atomic_unchecked_t irq_err_count;
6363
6364 /*
6365 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6366diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6367index 46dfc3c..a16b13a 100644
6368--- a/arch/mips/include/asm/local.h
6369+++ b/arch/mips/include/asm/local.h
6370@@ -12,15 +12,25 @@ typedef struct
6371 atomic_long_t a;
6372 } local_t;
6373
6374+typedef struct {
6375+ atomic_long_unchecked_t a;
6376+} local_unchecked_t;
6377+
6378 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6379
6380 #define local_read(l) atomic_long_read(&(l)->a)
6381+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6382 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6383+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6384
6385 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6386+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6387 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6388+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6389 #define local_inc(l) atomic_long_inc(&(l)->a)
6390+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6391 #define local_dec(l) atomic_long_dec(&(l)->a)
6392+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6393
6394 /*
6395 * Same as above, but return the result value
6396@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6397 return result;
6398 }
6399
6400+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6401+{
6402+ unsigned long result;
6403+
6404+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6405+ unsigned long temp;
6406+
6407+ __asm__ __volatile__(
6408+ " .set mips3 \n"
6409+ "1:" __LL "%1, %2 # local_add_return \n"
6410+ " addu %0, %1, %3 \n"
6411+ __SC "%0, %2 \n"
6412+ " beqzl %0, 1b \n"
6413+ " addu %0, %1, %3 \n"
6414+ " .set mips0 \n"
6415+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6416+ : "Ir" (i), "m" (l->a.counter)
6417+ : "memory");
6418+ } else if (kernel_uses_llsc) {
6419+ unsigned long temp;
6420+
6421+ __asm__ __volatile__(
6422+ " .set mips3 \n"
6423+ "1:" __LL "%1, %2 # local_add_return \n"
6424+ " addu %0, %1, %3 \n"
6425+ __SC "%0, %2 \n"
6426+ " beqz %0, 1b \n"
6427+ " addu %0, %1, %3 \n"
6428+ " .set mips0 \n"
6429+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6430+ : "Ir" (i), "m" (l->a.counter)
6431+ : "memory");
6432+ } else {
6433+ unsigned long flags;
6434+
6435+ local_irq_save(flags);
6436+ result = l->a.counter;
6437+ result += i;
6438+ l->a.counter = result;
6439+ local_irq_restore(flags);
6440+ }
6441+
6442+ return result;
6443+}
6444+
6445 static __inline__ long local_sub_return(long i, local_t * l)
6446 {
6447 unsigned long result;
6448@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6449
6450 #define local_cmpxchg(l, o, n) \
6451 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6452+#define local_cmpxchg_unchecked(l, o, n) \
6453+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6454 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6455
6456 /**
6457diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6458index 154b70a..426ae3d 100644
6459--- a/arch/mips/include/asm/page.h
6460+++ b/arch/mips/include/asm/page.h
6461@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6462 #ifdef CONFIG_CPU_MIPS32
6463 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6464 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6465- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6466+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6467 #else
6468 typedef struct { unsigned long long pte; } pte_t;
6469 #define pte_val(x) ((x).pte)
6470diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6471index b336037..5b874cc 100644
6472--- a/arch/mips/include/asm/pgalloc.h
6473+++ b/arch/mips/include/asm/pgalloc.h
6474@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6475 {
6476 set_pud(pud, __pud((unsigned long)pmd));
6477 }
6478+
6479+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6480+{
6481+ pud_populate(mm, pud, pmd);
6482+}
6483 #endif
6484
6485 /*
6486diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6487index 845016d..3303268 100644
6488--- a/arch/mips/include/asm/pgtable.h
6489+++ b/arch/mips/include/asm/pgtable.h
6490@@ -20,6 +20,9 @@
6491 #include <asm/io.h>
6492 #include <asm/pgtable-bits.h>
6493
6494+#define ktla_ktva(addr) (addr)
6495+#define ktva_ktla(addr) (addr)
6496+
6497 struct mm_struct;
6498 struct vm_area_struct;
6499
6500diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6501index e4440f9..8fb0005 100644
6502--- a/arch/mips/include/asm/thread_info.h
6503+++ b/arch/mips/include/asm/thread_info.h
6504@@ -106,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6505 #define TIF_SECCOMP 4 /* secure computing */
6506 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6507 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6508+/* li takes a 32bit immediate */
6509+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6510+
6511 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6512 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6513 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6514@@ -141,14 +144,16 @@ static inline struct thread_info *current_thread_info(void)
6515 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6516 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6517 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6518+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6519
6520 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6521 _TIF_SYSCALL_AUDIT | \
6522- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6523+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6524+ _TIF_GRSEC_SETXID)
6525
6526 /* work to do in syscall_trace_leave() */
6527 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6528- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6529+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6530
6531 /* work to do on interrupt/exception return */
6532 #define _TIF_WORK_MASK \
6533@@ -156,7 +161,7 @@ static inline struct thread_info *current_thread_info(void)
6534 /* work to do on any return to u-space */
6535 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6536 _TIF_WORK_SYSCALL_EXIT | \
6537- _TIF_SYSCALL_TRACEPOINT)
6538+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6539
6540 /*
6541 * We stash processor id into a COP0 register to retrieve it fast
6542diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6543index bf8b324..cec5705 100644
6544--- a/arch/mips/include/asm/uaccess.h
6545+++ b/arch/mips/include/asm/uaccess.h
6546@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6547 __ok == 0; \
6548 })
6549
6550+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6551 #define access_ok(type, addr, size) \
6552 likely(__access_ok((addr), (size), __access_mask))
6553
6554diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6555index 1188e00..41cf144 100644
6556--- a/arch/mips/kernel/binfmt_elfn32.c
6557+++ b/arch/mips/kernel/binfmt_elfn32.c
6558@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6559 #undef ELF_ET_DYN_BASE
6560 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6561
6562+#ifdef CONFIG_PAX_ASLR
6563+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6564+
6565+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6566+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6567+#endif
6568+
6569 #include <asm/processor.h>
6570 #include <linux/module.h>
6571 #include <linux/elfcore.h>
6572diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6573index 9287678..f870e47 100644
6574--- a/arch/mips/kernel/binfmt_elfo32.c
6575+++ b/arch/mips/kernel/binfmt_elfo32.c
6576@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6577 #undef ELF_ET_DYN_BASE
6578 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6579
6580+#ifdef CONFIG_PAX_ASLR
6581+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6582+
6583+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6584+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6585+#endif
6586+
6587 #include <asm/processor.h>
6588
6589 #include <linux/module.h>
6590diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6591index a74ec3a..4f06f18 100644
6592--- a/arch/mips/kernel/i8259.c
6593+++ b/arch/mips/kernel/i8259.c
6594@@ -202,7 +202,7 @@ spurious_8259A_irq:
6595 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6596 spurious_irq_mask |= irqmask;
6597 }
6598- atomic_inc(&irq_err_count);
6599+ atomic_inc_unchecked(&irq_err_count);
6600 /*
6601 * Theoretically we do not have to handle this IRQ,
6602 * but in Linux this does not cause problems and is
6603diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6604index 44a1f79..2bd6aa3 100644
6605--- a/arch/mips/kernel/irq-gt641xx.c
6606+++ b/arch/mips/kernel/irq-gt641xx.c
6607@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6608 }
6609 }
6610
6611- atomic_inc(&irq_err_count);
6612+ atomic_inc_unchecked(&irq_err_count);
6613 }
6614
6615 void __init gt641xx_irq_init(void)
6616diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6617index d2bfbc2..a8eacd2 100644
6618--- a/arch/mips/kernel/irq.c
6619+++ b/arch/mips/kernel/irq.c
6620@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6621 printk("unexpected IRQ # %d\n", irq);
6622 }
6623
6624-atomic_t irq_err_count;
6625+atomic_unchecked_t irq_err_count;
6626
6627 int arch_show_interrupts(struct seq_file *p, int prec)
6628 {
6629- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6630+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6631 return 0;
6632 }
6633
6634 asmlinkage void spurious_interrupt(void)
6635 {
6636- atomic_inc(&irq_err_count);
6637+ atomic_inc_unchecked(&irq_err_count);
6638 }
6639
6640 void __init init_IRQ(void)
6641@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6642 #endif
6643 }
6644
6645+
6646 #ifdef DEBUG_STACKOVERFLOW
6647+extern void gr_handle_kernel_exploit(void);
6648+
6649 static inline void check_stack_overflow(void)
6650 {
6651 unsigned long sp;
6652@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6653 printk("do_IRQ: stack overflow: %ld\n",
6654 sp - sizeof(struct thread_info));
6655 dump_stack();
6656+ gr_handle_kernel_exploit();
6657 }
6658 }
6659 #else
6660diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6661index 0614717..002fa43 100644
6662--- a/arch/mips/kernel/pm-cps.c
6663+++ b/arch/mips/kernel/pm-cps.c
6664@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6665 nc_core_ready_count = nc_addr;
6666
6667 /* Ensure ready_count is zero-initialised before the assembly runs */
6668- ACCESS_ONCE(*nc_core_ready_count) = 0;
6669+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6670 coupled_barrier(&per_cpu(pm_barrier, core), online);
6671
6672 /* Run the generated entry code */
6673diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6674index 85bff5d..39bc202 100644
6675--- a/arch/mips/kernel/process.c
6676+++ b/arch/mips/kernel/process.c
6677@@ -534,18 +534,6 @@ out:
6678 return pc;
6679 }
6680
6681-/*
6682- * Don't forget that the stack pointer must be aligned on a 8 bytes
6683- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6684- */
6685-unsigned long arch_align_stack(unsigned long sp)
6686-{
6687- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6688- sp -= get_random_int() & ~PAGE_MASK;
6689-
6690- return sp & ALMASK;
6691-}
6692-
6693 static void arch_dump_stack(void *info)
6694 {
6695 struct pt_regs *regs;
6696diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6697index 5104528..950bbdc 100644
6698--- a/arch/mips/kernel/ptrace.c
6699+++ b/arch/mips/kernel/ptrace.c
6700@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6701 return ret;
6702 }
6703
6704+#ifdef CONFIG_GRKERNSEC_SETXID
6705+extern void gr_delayed_cred_worker(void);
6706+#endif
6707+
6708 /*
6709 * Notification of system call entry/exit
6710 * - triggered by current->work.syscall_trace
6711@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6712 tracehook_report_syscall_entry(regs))
6713 ret = -1;
6714
6715+#ifdef CONFIG_GRKERNSEC_SETXID
6716+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6717+ gr_delayed_cred_worker();
6718+#endif
6719+
6720 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6721 trace_sys_enter(regs, regs->regs[2]);
6722
6723diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6724index 07fc524..b9d7f28 100644
6725--- a/arch/mips/kernel/reset.c
6726+++ b/arch/mips/kernel/reset.c
6727@@ -13,6 +13,7 @@
6728 #include <linux/reboot.h>
6729
6730 #include <asm/reboot.h>
6731+#include <asm/bug.h>
6732
6733 /*
6734 * Urgs ... Too many MIPS machines to handle this in a generic way.
6735@@ -29,16 +30,19 @@ void machine_restart(char *command)
6736 {
6737 if (_machine_restart)
6738 _machine_restart(command);
6739+ BUG();
6740 }
6741
6742 void machine_halt(void)
6743 {
6744 if (_machine_halt)
6745 _machine_halt();
6746+ BUG();
6747 }
6748
6749 void machine_power_off(void)
6750 {
6751 if (pm_power_off)
6752 pm_power_off();
6753+ BUG();
6754 }
6755diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6756index 2242bdd..b284048 100644
6757--- a/arch/mips/kernel/sync-r4k.c
6758+++ b/arch/mips/kernel/sync-r4k.c
6759@@ -18,8 +18,8 @@
6760 #include <asm/mipsregs.h>
6761
6762 static atomic_t count_start_flag = ATOMIC_INIT(0);
6763-static atomic_t count_count_start = ATOMIC_INIT(0);
6764-static atomic_t count_count_stop = ATOMIC_INIT(0);
6765+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6766+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6767 static atomic_t count_reference = ATOMIC_INIT(0);
6768
6769 #define COUNTON 100
6770@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6771
6772 for (i = 0; i < NR_LOOPS; i++) {
6773 /* slaves loop on '!= 2' */
6774- while (atomic_read(&count_count_start) != 1)
6775+ while (atomic_read_unchecked(&count_count_start) != 1)
6776 mb();
6777- atomic_set(&count_count_stop, 0);
6778+ atomic_set_unchecked(&count_count_stop, 0);
6779 smp_wmb();
6780
6781 /* this lets the slaves write their count register */
6782- atomic_inc(&count_count_start);
6783+ atomic_inc_unchecked(&count_count_start);
6784
6785 /*
6786 * Everyone initialises count in the last loop:
6787@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6788 /*
6789 * Wait for all slaves to leave the synchronization point:
6790 */
6791- while (atomic_read(&count_count_stop) != 1)
6792+ while (atomic_read_unchecked(&count_count_stop) != 1)
6793 mb();
6794- atomic_set(&count_count_start, 0);
6795+ atomic_set_unchecked(&count_count_start, 0);
6796 smp_wmb();
6797- atomic_inc(&count_count_stop);
6798+ atomic_inc_unchecked(&count_count_stop);
6799 }
6800 /* Arrange for an interrupt in a short while */
6801 write_c0_compare(read_c0_count() + COUNTON);
6802@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6803 initcount = atomic_read(&count_reference);
6804
6805 for (i = 0; i < NR_LOOPS; i++) {
6806- atomic_inc(&count_count_start);
6807- while (atomic_read(&count_count_start) != 2)
6808+ atomic_inc_unchecked(&count_count_start);
6809+ while (atomic_read_unchecked(&count_count_start) != 2)
6810 mb();
6811
6812 /*
6813@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6814 if (i == NR_LOOPS-1)
6815 write_c0_count(initcount);
6816
6817- atomic_inc(&count_count_stop);
6818- while (atomic_read(&count_count_stop) != 2)
6819+ atomic_inc_unchecked(&count_count_stop);
6820+ while (atomic_read_unchecked(&count_count_stop) != 2)
6821 mb();
6822 }
6823 /* Arrange for an interrupt in a short while */
6824diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6825index c3b41e2..46c32e9 100644
6826--- a/arch/mips/kernel/traps.c
6827+++ b/arch/mips/kernel/traps.c
6828@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6829 siginfo_t info;
6830
6831 prev_state = exception_enter();
6832- die_if_kernel("Integer overflow", regs);
6833+ if (unlikely(!user_mode(regs))) {
6834+
6835+#ifdef CONFIG_PAX_REFCOUNT
6836+ if (fixup_exception(regs)) {
6837+ pax_report_refcount_overflow(regs);
6838+ exception_exit(prev_state);
6839+ return;
6840+ }
6841+#endif
6842+
6843+ die("Integer overflow", regs);
6844+ }
6845
6846 info.si_code = FPE_INTOVF;
6847 info.si_signo = SIGFPE;
6848diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6849index 270bbd4..c01932a 100644
6850--- a/arch/mips/kvm/mips.c
6851+++ b/arch/mips/kvm/mips.c
6852@@ -815,7 +815,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6853 return r;
6854 }
6855
6856-int kvm_arch_init(void *opaque)
6857+int kvm_arch_init(const void *opaque)
6858 {
6859 if (kvm_mips_callbacks) {
6860 kvm_err("kvm: module already exists\n");
6861diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6862index 70ab5d6..62940fe 100644
6863--- a/arch/mips/mm/fault.c
6864+++ b/arch/mips/mm/fault.c
6865@@ -28,6 +28,23 @@
6866 #include <asm/highmem.h> /* For VMALLOC_END */
6867 #include <linux/kdebug.h>
6868
6869+#ifdef CONFIG_PAX_PAGEEXEC
6870+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6871+{
6872+ unsigned long i;
6873+
6874+ printk(KERN_ERR "PAX: bytes at PC: ");
6875+ for (i = 0; i < 5; i++) {
6876+ unsigned int c;
6877+ if (get_user(c, (unsigned int *)pc+i))
6878+ printk(KERN_CONT "???????? ");
6879+ else
6880+ printk(KERN_CONT "%08x ", c);
6881+ }
6882+ printk("\n");
6883+}
6884+#endif
6885+
6886 /*
6887 * This routine handles page faults. It determines the address,
6888 * and the problem, and then passes it off to one of the appropriate
6889@@ -201,6 +218,14 @@ bad_area:
6890 bad_area_nosemaphore:
6891 /* User mode accesses just cause a SIGSEGV */
6892 if (user_mode(regs)) {
6893+
6894+#ifdef CONFIG_PAX_PAGEEXEC
6895+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6896+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6897+ do_group_exit(SIGKILL);
6898+ }
6899+#endif
6900+
6901 tsk->thread.cp0_badvaddr = address;
6902 tsk->thread.error_code = write;
6903 #if 0
6904diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6905index f1baadd..5472dca 100644
6906--- a/arch/mips/mm/mmap.c
6907+++ b/arch/mips/mm/mmap.c
6908@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6909 struct vm_area_struct *vma;
6910 unsigned long addr = addr0;
6911 int do_color_align;
6912+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6913 struct vm_unmapped_area_info info;
6914
6915 if (unlikely(len > TASK_SIZE))
6916@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6917 do_color_align = 1;
6918
6919 /* requesting a specific address */
6920+
6921+#ifdef CONFIG_PAX_RANDMMAP
6922+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6923+#endif
6924+
6925 if (addr) {
6926 if (do_color_align)
6927 addr = COLOUR_ALIGN(addr, pgoff);
6928@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6929 addr = PAGE_ALIGN(addr);
6930
6931 vma = find_vma(mm, addr);
6932- if (TASK_SIZE - len >= addr &&
6933- (!vma || addr + len <= vma->vm_start))
6934+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6935 return addr;
6936 }
6937
6938 info.length = len;
6939 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6940 info.align_offset = pgoff << PAGE_SHIFT;
6941+ info.threadstack_offset = offset;
6942
6943 if (dir == DOWN) {
6944 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6945@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6946 {
6947 unsigned long random_factor = 0UL;
6948
6949+#ifdef CONFIG_PAX_RANDMMAP
6950+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6951+#endif
6952+
6953 if (current->flags & PF_RANDOMIZE) {
6954 random_factor = get_random_int();
6955 random_factor = random_factor << PAGE_SHIFT;
6956@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6957
6958 if (mmap_is_legacy()) {
6959 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6960+
6961+#ifdef CONFIG_PAX_RANDMMAP
6962+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6963+ mm->mmap_base += mm->delta_mmap;
6964+#endif
6965+
6966 mm->get_unmapped_area = arch_get_unmapped_area;
6967 } else {
6968 mm->mmap_base = mmap_base(random_factor);
6969+
6970+#ifdef CONFIG_PAX_RANDMMAP
6971+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6972+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6973+#endif
6974+
6975 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6976 }
6977 }
6978
6979-static inline unsigned long brk_rnd(void)
6980-{
6981- unsigned long rnd = get_random_int();
6982-
6983- rnd = rnd << PAGE_SHIFT;
6984- /* 8MB for 32bit, 256MB for 64bit */
6985- if (TASK_IS_32BIT_ADDR)
6986- rnd = rnd & 0x7ffffful;
6987- else
6988- rnd = rnd & 0xffffffful;
6989-
6990- return rnd;
6991-}
6992-
6993-unsigned long arch_randomize_brk(struct mm_struct *mm)
6994-{
6995- unsigned long base = mm->brk;
6996- unsigned long ret;
6997-
6998- ret = PAGE_ALIGN(base + brk_rnd());
6999-
7000- if (ret < mm->brk)
7001- return mm->brk;
7002-
7003- return ret;
7004-}
7005-
7006 int __virt_addr_valid(const volatile void *kaddr)
7007 {
7008 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7009diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7010index d07e041..bedb72b 100644
7011--- a/arch/mips/pci/pci-octeon.c
7012+++ b/arch/mips/pci/pci-octeon.c
7013@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7014
7015
7016 static struct pci_ops octeon_pci_ops = {
7017- octeon_read_config,
7018- octeon_write_config,
7019+ .read = octeon_read_config,
7020+ .write = octeon_write_config,
7021 };
7022
7023 static struct resource octeon_pci_mem_resource = {
7024diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7025index 5e36c33..eb4a17b 100644
7026--- a/arch/mips/pci/pcie-octeon.c
7027+++ b/arch/mips/pci/pcie-octeon.c
7028@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7029 }
7030
7031 static struct pci_ops octeon_pcie0_ops = {
7032- octeon_pcie0_read_config,
7033- octeon_pcie0_write_config,
7034+ .read = octeon_pcie0_read_config,
7035+ .write = octeon_pcie0_write_config,
7036 };
7037
7038 static struct resource octeon_pcie0_mem_resource = {
7039@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7040 };
7041
7042 static struct pci_ops octeon_pcie1_ops = {
7043- octeon_pcie1_read_config,
7044- octeon_pcie1_write_config,
7045+ .read = octeon_pcie1_read_config,
7046+ .write = octeon_pcie1_write_config,
7047 };
7048
7049 static struct resource octeon_pcie1_mem_resource = {
7050@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7051 };
7052
7053 static struct pci_ops octeon_dummy_ops = {
7054- octeon_dummy_read_config,
7055- octeon_dummy_write_config,
7056+ .read = octeon_dummy_read_config,
7057+ .write = octeon_dummy_write_config,
7058 };
7059
7060 static struct resource octeon_dummy_mem_resource = {
7061diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7062index a2358b4..7cead4f 100644
7063--- a/arch/mips/sgi-ip27/ip27-nmi.c
7064+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7065@@ -187,9 +187,9 @@ void
7066 cont_nmi_dump(void)
7067 {
7068 #ifndef REAL_NMI_SIGNAL
7069- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7070+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7071
7072- atomic_inc(&nmied_cpus);
7073+ atomic_inc_unchecked(&nmied_cpus);
7074 #endif
7075 /*
7076 * Only allow 1 cpu to proceed
7077@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7078 udelay(10000);
7079 }
7080 #else
7081- while (atomic_read(&nmied_cpus) != num_online_cpus());
7082+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7083 #endif
7084
7085 /*
7086diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7087index a046b30..6799527 100644
7088--- a/arch/mips/sni/rm200.c
7089+++ b/arch/mips/sni/rm200.c
7090@@ -270,7 +270,7 @@ spurious_8259A_irq:
7091 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7092 spurious_irq_mask |= irqmask;
7093 }
7094- atomic_inc(&irq_err_count);
7095+ atomic_inc_unchecked(&irq_err_count);
7096 /*
7097 * Theoretically we do not have to handle this IRQ,
7098 * but in Linux this does not cause problems and is
7099diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7100index 41e873b..34d33a7 100644
7101--- a/arch/mips/vr41xx/common/icu.c
7102+++ b/arch/mips/vr41xx/common/icu.c
7103@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7104
7105 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7106
7107- atomic_inc(&irq_err_count);
7108+ atomic_inc_unchecked(&irq_err_count);
7109
7110 return -1;
7111 }
7112diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7113index ae0e4ee..e8f0692 100644
7114--- a/arch/mips/vr41xx/common/irq.c
7115+++ b/arch/mips/vr41xx/common/irq.c
7116@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7117 irq_cascade_t *cascade;
7118
7119 if (irq >= NR_IRQS) {
7120- atomic_inc(&irq_err_count);
7121+ atomic_inc_unchecked(&irq_err_count);
7122 return;
7123 }
7124
7125@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7126 ret = cascade->get_irq(irq);
7127 irq = ret;
7128 if (ret < 0)
7129- atomic_inc(&irq_err_count);
7130+ atomic_inc_unchecked(&irq_err_count);
7131 else
7132 irq_dispatch(irq);
7133 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7134diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7135index 967d144..db12197 100644
7136--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7137+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7138@@ -11,12 +11,14 @@
7139 #ifndef _ASM_PROC_CACHE_H
7140 #define _ASM_PROC_CACHE_H
7141
7142+#include <linux/const.h>
7143+
7144 /* L1 cache */
7145
7146 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7147 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7148-#define L1_CACHE_BYTES 16 /* bytes per entry */
7149 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7150+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7151 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7152
7153 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7154diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7155index bcb5df2..84fabd2 100644
7156--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7157+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7158@@ -16,13 +16,15 @@
7159 #ifndef _ASM_PROC_CACHE_H
7160 #define _ASM_PROC_CACHE_H
7161
7162+#include <linux/const.h>
7163+
7164 /*
7165 * L1 cache
7166 */
7167 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7168 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7169-#define L1_CACHE_BYTES 32 /* bytes per entry */
7170 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7171+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7172 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7173
7174 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7175diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7176index 4ce7a01..449202a 100644
7177--- a/arch/openrisc/include/asm/cache.h
7178+++ b/arch/openrisc/include/asm/cache.h
7179@@ -19,11 +19,13 @@
7180 #ifndef __ASM_OPENRISC_CACHE_H
7181 #define __ASM_OPENRISC_CACHE_H
7182
7183+#include <linux/const.h>
7184+
7185 /* FIXME: How can we replace these with values from the CPU...
7186 * they shouldn't be hard-coded!
7187 */
7188
7189-#define L1_CACHE_BYTES 16
7190 #define L1_CACHE_SHIFT 4
7191+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7192
7193 #endif /* __ASM_OPENRISC_CACHE_H */
7194diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7195index 226f8ca..9d9b87d 100644
7196--- a/arch/parisc/include/asm/atomic.h
7197+++ b/arch/parisc/include/asm/atomic.h
7198@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7199 return dec;
7200 }
7201
7202+#define atomic64_read_unchecked(v) atomic64_read(v)
7203+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7204+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7205+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7206+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7207+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7208+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7209+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7210+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7211+
7212 #endif /* !CONFIG_64BIT */
7213
7214
7215diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7216index 47f11c7..3420df2 100644
7217--- a/arch/parisc/include/asm/cache.h
7218+++ b/arch/parisc/include/asm/cache.h
7219@@ -5,6 +5,7 @@
7220 #ifndef __ARCH_PARISC_CACHE_H
7221 #define __ARCH_PARISC_CACHE_H
7222
7223+#include <linux/const.h>
7224
7225 /*
7226 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7227@@ -15,13 +16,13 @@
7228 * just ruin performance.
7229 */
7230 #ifdef CONFIG_PA20
7231-#define L1_CACHE_BYTES 64
7232 #define L1_CACHE_SHIFT 6
7233 #else
7234-#define L1_CACHE_BYTES 32
7235 #define L1_CACHE_SHIFT 5
7236 #endif
7237
7238+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7239+
7240 #ifndef __ASSEMBLY__
7241
7242 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7243diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7244index 3391d06..c23a2cc 100644
7245--- a/arch/parisc/include/asm/elf.h
7246+++ b/arch/parisc/include/asm/elf.h
7247@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7248
7249 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7250
7251+#ifdef CONFIG_PAX_ASLR
7252+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7253+
7254+#define PAX_DELTA_MMAP_LEN 16
7255+#define PAX_DELTA_STACK_LEN 16
7256+#endif
7257+
7258 /* This yields a mask that user programs can use to figure out what
7259 instruction set this CPU supports. This could be done in user space,
7260 but it's not easy, and we've already done it here. */
7261diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7262index f213f5b..0af3e8e 100644
7263--- a/arch/parisc/include/asm/pgalloc.h
7264+++ b/arch/parisc/include/asm/pgalloc.h
7265@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7266 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7267 }
7268
7269+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7270+{
7271+ pgd_populate(mm, pgd, pmd);
7272+}
7273+
7274 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7275 {
7276 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7277@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7278 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7279 #define pmd_free(mm, x) do { } while (0)
7280 #define pgd_populate(mm, pmd, pte) BUG()
7281+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7282
7283 #endif
7284
7285diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7286index 22b89d1..ce34230 100644
7287--- a/arch/parisc/include/asm/pgtable.h
7288+++ b/arch/parisc/include/asm/pgtable.h
7289@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7290 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7291 #define PAGE_COPY PAGE_EXECREAD
7292 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7293+
7294+#ifdef CONFIG_PAX_PAGEEXEC
7295+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7296+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7297+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7298+#else
7299+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7300+# define PAGE_COPY_NOEXEC PAGE_COPY
7301+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7302+#endif
7303+
7304 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7305 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7306 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7307diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7308index a5cb070..8604ddc 100644
7309--- a/arch/parisc/include/asm/uaccess.h
7310+++ b/arch/parisc/include/asm/uaccess.h
7311@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7312 const void __user *from,
7313 unsigned long n)
7314 {
7315- int sz = __compiletime_object_size(to);
7316+ size_t sz = __compiletime_object_size(to);
7317 int ret = -EFAULT;
7318
7319- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7320+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7321 ret = __copy_from_user(to, from, n);
7322 else
7323 copy_from_user_overflow();
7324diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7325index 5822e8e..bc5e638 100644
7326--- a/arch/parisc/kernel/module.c
7327+++ b/arch/parisc/kernel/module.c
7328@@ -98,16 +98,38 @@
7329
7330 /* three functions to determine where in the module core
7331 * or init pieces the location is */
7332+static inline int in_init_rx(struct module *me, void *loc)
7333+{
7334+ return (loc >= me->module_init_rx &&
7335+ loc < (me->module_init_rx + me->init_size_rx));
7336+}
7337+
7338+static inline int in_init_rw(struct module *me, void *loc)
7339+{
7340+ return (loc >= me->module_init_rw &&
7341+ loc < (me->module_init_rw + me->init_size_rw));
7342+}
7343+
7344 static inline int in_init(struct module *me, void *loc)
7345 {
7346- return (loc >= me->module_init &&
7347- loc <= (me->module_init + me->init_size));
7348+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7349+}
7350+
7351+static inline int in_core_rx(struct module *me, void *loc)
7352+{
7353+ return (loc >= me->module_core_rx &&
7354+ loc < (me->module_core_rx + me->core_size_rx));
7355+}
7356+
7357+static inline int in_core_rw(struct module *me, void *loc)
7358+{
7359+ return (loc >= me->module_core_rw &&
7360+ loc < (me->module_core_rw + me->core_size_rw));
7361 }
7362
7363 static inline int in_core(struct module *me, void *loc)
7364 {
7365- return (loc >= me->module_core &&
7366- loc <= (me->module_core + me->core_size));
7367+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7368 }
7369
7370 static inline int in_local(struct module *me, void *loc)
7371@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7372 }
7373
7374 /* align things a bit */
7375- me->core_size = ALIGN(me->core_size, 16);
7376- me->arch.got_offset = me->core_size;
7377- me->core_size += gots * sizeof(struct got_entry);
7378+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7379+ me->arch.got_offset = me->core_size_rw;
7380+ me->core_size_rw += gots * sizeof(struct got_entry);
7381
7382- me->core_size = ALIGN(me->core_size, 16);
7383- me->arch.fdesc_offset = me->core_size;
7384- me->core_size += fdescs * sizeof(Elf_Fdesc);
7385+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7386+ me->arch.fdesc_offset = me->core_size_rw;
7387+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7388
7389 me->arch.got_max = gots;
7390 me->arch.fdesc_max = fdescs;
7391@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7392
7393 BUG_ON(value == 0);
7394
7395- got = me->module_core + me->arch.got_offset;
7396+ got = me->module_core_rw + me->arch.got_offset;
7397 for (i = 0; got[i].addr; i++)
7398 if (got[i].addr == value)
7399 goto out;
7400@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7401 #ifdef CONFIG_64BIT
7402 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7403 {
7404- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7405+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7406
7407 if (!value) {
7408 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7409@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7410
7411 /* Create new one */
7412 fdesc->addr = value;
7413- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7414+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7415 return (Elf_Addr)fdesc;
7416 }
7417 #endif /* CONFIG_64BIT */
7418@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7419
7420 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7421 end = table + sechdrs[me->arch.unwind_section].sh_size;
7422- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7423+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7424
7425 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7426 me->arch.unwind_section, table, end, gp);
7427diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7428index e1ffea2..46ed66e 100644
7429--- a/arch/parisc/kernel/sys_parisc.c
7430+++ b/arch/parisc/kernel/sys_parisc.c
7431@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7432 unsigned long task_size = TASK_SIZE;
7433 int do_color_align, last_mmap;
7434 struct vm_unmapped_area_info info;
7435+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7436
7437 if (len > task_size)
7438 return -ENOMEM;
7439@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7440 goto found_addr;
7441 }
7442
7443+#ifdef CONFIG_PAX_RANDMMAP
7444+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7445+#endif
7446+
7447 if (addr) {
7448 if (do_color_align && last_mmap)
7449 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7450@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7451 info.high_limit = mmap_upper_limit();
7452 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7453 info.align_offset = shared_align_offset(last_mmap, pgoff);
7454+ info.threadstack_offset = offset;
7455 addr = vm_unmapped_area(&info);
7456
7457 found_addr:
7458@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7459 unsigned long addr = addr0;
7460 int do_color_align, last_mmap;
7461 struct vm_unmapped_area_info info;
7462+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7463
7464 #ifdef CONFIG_64BIT
7465 /* This should only ever run for 32-bit processes. */
7466@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7467 }
7468
7469 /* requesting a specific address */
7470+#ifdef CONFIG_PAX_RANDMMAP
7471+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7472+#endif
7473+
7474 if (addr) {
7475 if (do_color_align && last_mmap)
7476 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7477@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7478 info.high_limit = mm->mmap_base;
7479 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7480 info.align_offset = shared_align_offset(last_mmap, pgoff);
7481+ info.threadstack_offset = offset;
7482 addr = vm_unmapped_area(&info);
7483 if (!(addr & ~PAGE_MASK))
7484 goto found_addr;
7485@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7486 mm->mmap_legacy_base = mmap_legacy_base();
7487 mm->mmap_base = mmap_upper_limit();
7488
7489+#ifdef CONFIG_PAX_RANDMMAP
7490+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7491+ mm->mmap_legacy_base += mm->delta_mmap;
7492+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7493+ }
7494+#endif
7495+
7496 if (mmap_is_legacy()) {
7497 mm->mmap_base = mm->mmap_legacy_base;
7498 mm->get_unmapped_area = arch_get_unmapped_area;
7499diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7500index 47ee620..1107387 100644
7501--- a/arch/parisc/kernel/traps.c
7502+++ b/arch/parisc/kernel/traps.c
7503@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7504
7505 down_read(&current->mm->mmap_sem);
7506 vma = find_vma(current->mm,regs->iaoq[0]);
7507- if (vma && (regs->iaoq[0] >= vma->vm_start)
7508- && (vma->vm_flags & VM_EXEC)) {
7509-
7510+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7511 fault_address = regs->iaoq[0];
7512 fault_space = regs->iasq[0];
7513
7514diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7515index e5120e6..8ddb5cc 100644
7516--- a/arch/parisc/mm/fault.c
7517+++ b/arch/parisc/mm/fault.c
7518@@ -15,6 +15,7 @@
7519 #include <linux/sched.h>
7520 #include <linux/interrupt.h>
7521 #include <linux/module.h>
7522+#include <linux/unistd.h>
7523
7524 #include <asm/uaccess.h>
7525 #include <asm/traps.h>
7526@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7527 static unsigned long
7528 parisc_acctyp(unsigned long code, unsigned int inst)
7529 {
7530- if (code == 6 || code == 16)
7531+ if (code == 6 || code == 7 || code == 16)
7532 return VM_EXEC;
7533
7534 switch (inst & 0xf0000000) {
7535@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7536 }
7537 #endif
7538
7539+#ifdef CONFIG_PAX_PAGEEXEC
7540+/*
7541+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7542+ *
7543+ * returns 1 when task should be killed
7544+ * 2 when rt_sigreturn trampoline was detected
7545+ * 3 when unpatched PLT trampoline was detected
7546+ */
7547+static int pax_handle_fetch_fault(struct pt_regs *regs)
7548+{
7549+
7550+#ifdef CONFIG_PAX_EMUPLT
7551+ int err;
7552+
7553+ do { /* PaX: unpatched PLT emulation */
7554+ unsigned int bl, depwi;
7555+
7556+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7557+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7558+
7559+ if (err)
7560+ break;
7561+
7562+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7563+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7564+
7565+ err = get_user(ldw, (unsigned int *)addr);
7566+ err |= get_user(bv, (unsigned int *)(addr+4));
7567+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7568+
7569+ if (err)
7570+ break;
7571+
7572+ if (ldw == 0x0E801096U &&
7573+ bv == 0xEAC0C000U &&
7574+ ldw2 == 0x0E881095U)
7575+ {
7576+ unsigned int resolver, map;
7577+
7578+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7579+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7580+ if (err)
7581+ break;
7582+
7583+ regs->gr[20] = instruction_pointer(regs)+8;
7584+ regs->gr[21] = map;
7585+ regs->gr[22] = resolver;
7586+ regs->iaoq[0] = resolver | 3UL;
7587+ regs->iaoq[1] = regs->iaoq[0] + 4;
7588+ return 3;
7589+ }
7590+ }
7591+ } while (0);
7592+#endif
7593+
7594+#ifdef CONFIG_PAX_EMUTRAMP
7595+
7596+#ifndef CONFIG_PAX_EMUSIGRT
7597+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7598+ return 1;
7599+#endif
7600+
7601+ do { /* PaX: rt_sigreturn emulation */
7602+ unsigned int ldi1, ldi2, bel, nop;
7603+
7604+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7605+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7606+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7607+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7608+
7609+ if (err)
7610+ break;
7611+
7612+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7613+ ldi2 == 0x3414015AU &&
7614+ bel == 0xE4008200U &&
7615+ nop == 0x08000240U)
7616+ {
7617+ regs->gr[25] = (ldi1 & 2) >> 1;
7618+ regs->gr[20] = __NR_rt_sigreturn;
7619+ regs->gr[31] = regs->iaoq[1] + 16;
7620+ regs->sr[0] = regs->iasq[1];
7621+ regs->iaoq[0] = 0x100UL;
7622+ regs->iaoq[1] = regs->iaoq[0] + 4;
7623+ regs->iasq[0] = regs->sr[2];
7624+ regs->iasq[1] = regs->sr[2];
7625+ return 2;
7626+ }
7627+ } while (0);
7628+#endif
7629+
7630+ return 1;
7631+}
7632+
7633+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7634+{
7635+ unsigned long i;
7636+
7637+ printk(KERN_ERR "PAX: bytes at PC: ");
7638+ for (i = 0; i < 5; i++) {
7639+ unsigned int c;
7640+ if (get_user(c, (unsigned int *)pc+i))
7641+ printk(KERN_CONT "???????? ");
7642+ else
7643+ printk(KERN_CONT "%08x ", c);
7644+ }
7645+ printk("\n");
7646+}
7647+#endif
7648+
7649 int fixup_exception(struct pt_regs *regs)
7650 {
7651 const struct exception_table_entry *fix;
7652@@ -234,8 +345,33 @@ retry:
7653
7654 good_area:
7655
7656- if ((vma->vm_flags & acc_type) != acc_type)
7657+ if ((vma->vm_flags & acc_type) != acc_type) {
7658+
7659+#ifdef CONFIG_PAX_PAGEEXEC
7660+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7661+ (address & ~3UL) == instruction_pointer(regs))
7662+ {
7663+ up_read(&mm->mmap_sem);
7664+ switch (pax_handle_fetch_fault(regs)) {
7665+
7666+#ifdef CONFIG_PAX_EMUPLT
7667+ case 3:
7668+ return;
7669+#endif
7670+
7671+#ifdef CONFIG_PAX_EMUTRAMP
7672+ case 2:
7673+ return;
7674+#endif
7675+
7676+ }
7677+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7678+ do_group_exit(SIGKILL);
7679+ }
7680+#endif
7681+
7682 goto bad_area;
7683+ }
7684
7685 /*
7686 * If for any reason at all we couldn't handle the fault, make
7687diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7688index a2a168e..e484682 100644
7689--- a/arch/powerpc/Kconfig
7690+++ b/arch/powerpc/Kconfig
7691@@ -408,6 +408,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7692 config KEXEC
7693 bool "kexec system call"
7694 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7695+ depends on !GRKERNSEC_KMEM
7696 help
7697 kexec is a system call that implements the ability to shutdown your
7698 current kernel, and to start another kernel. It is like a reboot
7699diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7700index 512d278..d31fadd 100644
7701--- a/arch/powerpc/include/asm/atomic.h
7702+++ b/arch/powerpc/include/asm/atomic.h
7703@@ -12,6 +12,11 @@
7704
7705 #define ATOMIC_INIT(i) { (i) }
7706
7707+#define _ASM_EXTABLE(from, to) \
7708+" .section __ex_table,\"a\"\n" \
7709+ PPC_LONG" " #from ", " #to"\n" \
7710+" .previous\n"
7711+
7712 static __inline__ int atomic_read(const atomic_t *v)
7713 {
7714 int t;
7715@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7716 return t;
7717 }
7718
7719+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7720+{
7721+ int t;
7722+
7723+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7724+
7725+ return t;
7726+}
7727+
7728 static __inline__ void atomic_set(atomic_t *v, int i)
7729 {
7730 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7731 }
7732
7733-#define ATOMIC_OP(op, asm_op) \
7734-static __inline__ void atomic_##op(int a, atomic_t *v) \
7735+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7736+{
7737+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7738+}
7739+
7740+#ifdef CONFIG_PAX_REFCOUNT
7741+#define __REFCOUNT_OP(op) op##o.
7742+#define __OVERFLOW_PRE \
7743+ " mcrxr cr0\n"
7744+#define __OVERFLOW_POST \
7745+ " bf 4*cr0+so, 3f\n" \
7746+ "2: .long 0x00c00b00\n" \
7747+ "3:\n"
7748+#define __OVERFLOW_EXTABLE \
7749+ "\n4:\n"
7750+ _ASM_EXTABLE(2b, 4b)
7751+#else
7752+#define __REFCOUNT_OP(op) op
7753+#define __OVERFLOW_PRE
7754+#define __OVERFLOW_POST
7755+#define __OVERFLOW_EXTABLE
7756+#endif
7757+
7758+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7759+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7760 { \
7761 int t; \
7762 \
7763 __asm__ __volatile__( \
7764-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7765+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7766+ pre_op \
7767 #asm_op " %0,%2,%0\n" \
7768+ post_op \
7769 PPC405_ERR77(0,%3) \
7770 " stwcx. %0,0,%3 \n" \
7771 " bne- 1b\n" \
7772+ extable \
7773 : "=&r" (t), "+m" (v->counter) \
7774 : "r" (a), "r" (&v->counter) \
7775 : "cc"); \
7776 } \
7777
7778-#define ATOMIC_OP_RETURN(op, asm_op) \
7779-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7780+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7781+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7782+
7783+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7784+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7785 { \
7786 int t; \
7787 \
7788 __asm__ __volatile__( \
7789 PPC_ATOMIC_ENTRY_BARRIER \
7790-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7791+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7792+ pre_op \
7793 #asm_op " %0,%1,%0\n" \
7794+ post_op \
7795 PPC405_ERR77(0,%2) \
7796 " stwcx. %0,0,%2 \n" \
7797 " bne- 1b\n" \
7798+ extable \
7799 PPC_ATOMIC_EXIT_BARRIER \
7800 : "=&r" (t) \
7801 : "r" (a), "r" (&v->counter) \
7802@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7803 return t; \
7804 }
7805
7806+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7807+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7808+
7809 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7810
7811 ATOMIC_OPS(add, add)
7812@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7813
7814 #undef ATOMIC_OPS
7815 #undef ATOMIC_OP_RETURN
7816+#undef __ATOMIC_OP_RETURN
7817 #undef ATOMIC_OP
7818+#undef __ATOMIC_OP
7819
7820 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7821
7822-static __inline__ void atomic_inc(atomic_t *v)
7823-{
7824- int t;
7825+/*
7826+ * atomic_inc - increment atomic variable
7827+ * @v: pointer of type atomic_t
7828+ *
7829+ * Automatically increments @v by 1
7830+ */
7831+#define atomic_inc(v) atomic_add(1, (v))
7832+#define atomic_inc_return(v) atomic_add_return(1, (v))
7833
7834- __asm__ __volatile__(
7835-"1: lwarx %0,0,%2 # atomic_inc\n\
7836- addic %0,%0,1\n"
7837- PPC405_ERR77(0,%2)
7838-" stwcx. %0,0,%2 \n\
7839- bne- 1b"
7840- : "=&r" (t), "+m" (v->counter)
7841- : "r" (&v->counter)
7842- : "cc", "xer");
7843+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7844+{
7845+ atomic_add_unchecked(1, v);
7846 }
7847
7848-static __inline__ int atomic_inc_return(atomic_t *v)
7849+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7850 {
7851- int t;
7852-
7853- __asm__ __volatile__(
7854- PPC_ATOMIC_ENTRY_BARRIER
7855-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7856- addic %0,%0,1\n"
7857- PPC405_ERR77(0,%1)
7858-" stwcx. %0,0,%1 \n\
7859- bne- 1b"
7860- PPC_ATOMIC_EXIT_BARRIER
7861- : "=&r" (t)
7862- : "r" (&v->counter)
7863- : "cc", "xer", "memory");
7864-
7865- return t;
7866+ return atomic_add_return_unchecked(1, v);
7867 }
7868
7869 /*
7870@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7871 */
7872 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7873
7874-static __inline__ void atomic_dec(atomic_t *v)
7875+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7876 {
7877- int t;
7878-
7879- __asm__ __volatile__(
7880-"1: lwarx %0,0,%2 # atomic_dec\n\
7881- addic %0,%0,-1\n"
7882- PPC405_ERR77(0,%2)\
7883-" stwcx. %0,0,%2\n\
7884- bne- 1b"
7885- : "=&r" (t), "+m" (v->counter)
7886- : "r" (&v->counter)
7887- : "cc", "xer");
7888+ return atomic_add_return_unchecked(1, v) == 0;
7889 }
7890
7891-static __inline__ int atomic_dec_return(atomic_t *v)
7892+/*
7893+ * atomic_dec - decrement atomic variable
7894+ * @v: pointer of type atomic_t
7895+ *
7896+ * Atomically decrements @v by 1
7897+ */
7898+#define atomic_dec(v) atomic_sub(1, (v))
7899+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7900+
7901+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7902 {
7903- int t;
7904-
7905- __asm__ __volatile__(
7906- PPC_ATOMIC_ENTRY_BARRIER
7907-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7908- addic %0,%0,-1\n"
7909- PPC405_ERR77(0,%1)
7910-" stwcx. %0,0,%1\n\
7911- bne- 1b"
7912- PPC_ATOMIC_EXIT_BARRIER
7913- : "=&r" (t)
7914- : "r" (&v->counter)
7915- : "cc", "xer", "memory");
7916-
7917- return t;
7918+ atomic_sub_unchecked(1, v);
7919 }
7920
7921 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7922 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7923
7924+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7925+{
7926+ return cmpxchg(&(v->counter), old, new);
7927+}
7928+
7929+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7930+{
7931+ return xchg(&(v->counter), new);
7932+}
7933+
7934 /**
7935 * __atomic_add_unless - add unless the number is a given value
7936 * @v: pointer of type atomic_t
7937@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7938 PPC_ATOMIC_ENTRY_BARRIER
7939 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7940 cmpw 0,%0,%3 \n\
7941- beq- 2f \n\
7942- add %0,%2,%0 \n"
7943+ beq- 2f \n"
7944+
7945+#ifdef CONFIG_PAX_REFCOUNT
7946+" mcrxr cr0\n"
7947+" addo. %0,%2,%0\n"
7948+" bf 4*cr0+so, 4f\n"
7949+"3:.long " "0x00c00b00""\n"
7950+"4:\n"
7951+#else
7952+ "add %0,%2,%0 \n"
7953+#endif
7954+
7955 PPC405_ERR77(0,%2)
7956 " stwcx. %0,0,%1 \n\
7957 bne- 1b \n"
7958+"5:"
7959+
7960+#ifdef CONFIG_PAX_REFCOUNT
7961+ _ASM_EXTABLE(3b, 5b)
7962+#endif
7963+
7964 PPC_ATOMIC_EXIT_BARRIER
7965 " subf %0,%2,%0 \n\
7966 2:"
7967@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7968 }
7969 #define atomic_dec_if_positive atomic_dec_if_positive
7970
7971+#define smp_mb__before_atomic_dec() smp_mb()
7972+#define smp_mb__after_atomic_dec() smp_mb()
7973+#define smp_mb__before_atomic_inc() smp_mb()
7974+#define smp_mb__after_atomic_inc() smp_mb()
7975+
7976 #ifdef __powerpc64__
7977
7978 #define ATOMIC64_INIT(i) { (i) }
7979@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7980 return t;
7981 }
7982
7983+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7984+{
7985+ long t;
7986+
7987+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7988+
7989+ return t;
7990+}
7991+
7992 static __inline__ void atomic64_set(atomic64_t *v, long i)
7993 {
7994 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7995 }
7996
7997-#define ATOMIC64_OP(op, asm_op) \
7998-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
7999+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8000+{
8001+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8002+}
8003+
8004+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
8005+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
8006 { \
8007 long t; \
8008 \
8009 __asm__ __volatile__( \
8010 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8011+ pre_op \
8012 #asm_op " %0,%2,%0\n" \
8013+ post_op \
8014 " stdcx. %0,0,%3 \n" \
8015 " bne- 1b\n" \
8016+ extable \
8017 : "=&r" (t), "+m" (v->counter) \
8018 : "r" (a), "r" (&v->counter) \
8019 : "cc"); \
8020 }
8021
8022-#define ATOMIC64_OP_RETURN(op, asm_op) \
8023-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8024+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8025+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8026+
8027+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8028+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8029 { \
8030 long t; \
8031 \
8032 __asm__ __volatile__( \
8033 PPC_ATOMIC_ENTRY_BARRIER \
8034 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8035+ pre_op \
8036 #asm_op " %0,%1,%0\n" \
8037+ post_op \
8038 " stdcx. %0,0,%2 \n" \
8039 " bne- 1b\n" \
8040+ extable \
8041 PPC_ATOMIC_EXIT_BARRIER \
8042 : "=&r" (t) \
8043 : "r" (a), "r" (&v->counter) \
8044@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8045 return t; \
8046 }
8047
8048+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8049+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8050+
8051 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8052
8053 ATOMIC64_OPS(add, add)
8054@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8055
8056 #undef ATOMIC64_OPS
8057 #undef ATOMIC64_OP_RETURN
8058+#undef __ATOMIC64_OP_RETURN
8059 #undef ATOMIC64_OP
8060+#undef __ATOMIC64_OP
8061+#undef __OVERFLOW_EXTABLE
8062+#undef __OVERFLOW_POST
8063+#undef __OVERFLOW_PRE
8064+#undef __REFCOUNT_OP
8065
8066 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8067
8068-static __inline__ void atomic64_inc(atomic64_t *v)
8069-{
8070- long t;
8071+/*
8072+ * atomic64_inc - increment atomic variable
8073+ * @v: pointer of type atomic64_t
8074+ *
8075+ * Automatically increments @v by 1
8076+ */
8077+#define atomic64_inc(v) atomic64_add(1, (v))
8078+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8079
8080- __asm__ __volatile__(
8081-"1: ldarx %0,0,%2 # atomic64_inc\n\
8082- addic %0,%0,1\n\
8083- stdcx. %0,0,%2 \n\
8084- bne- 1b"
8085- : "=&r" (t), "+m" (v->counter)
8086- : "r" (&v->counter)
8087- : "cc", "xer");
8088+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8089+{
8090+ atomic64_add_unchecked(1, v);
8091 }
8092
8093-static __inline__ long atomic64_inc_return(atomic64_t *v)
8094+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8095 {
8096- long t;
8097-
8098- __asm__ __volatile__(
8099- PPC_ATOMIC_ENTRY_BARRIER
8100-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8101- addic %0,%0,1\n\
8102- stdcx. %0,0,%1 \n\
8103- bne- 1b"
8104- PPC_ATOMIC_EXIT_BARRIER
8105- : "=&r" (t)
8106- : "r" (&v->counter)
8107- : "cc", "xer", "memory");
8108-
8109- return t;
8110+ return atomic64_add_return_unchecked(1, v);
8111 }
8112
8113 /*
8114@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8115 */
8116 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8117
8118-static __inline__ void atomic64_dec(atomic64_t *v)
8119+/*
8120+ * atomic64_dec - decrement atomic variable
8121+ * @v: pointer of type atomic64_t
8122+ *
8123+ * Atomically decrements @v by 1
8124+ */
8125+#define atomic64_dec(v) atomic64_sub(1, (v))
8126+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8127+
8128+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8129 {
8130- long t;
8131-
8132- __asm__ __volatile__(
8133-"1: ldarx %0,0,%2 # atomic64_dec\n\
8134- addic %0,%0,-1\n\
8135- stdcx. %0,0,%2\n\
8136- bne- 1b"
8137- : "=&r" (t), "+m" (v->counter)
8138- : "r" (&v->counter)
8139- : "cc", "xer");
8140-}
8141-
8142-static __inline__ long atomic64_dec_return(atomic64_t *v)
8143-{
8144- long t;
8145-
8146- __asm__ __volatile__(
8147- PPC_ATOMIC_ENTRY_BARRIER
8148-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8149- addic %0,%0,-1\n\
8150- stdcx. %0,0,%1\n\
8151- bne- 1b"
8152- PPC_ATOMIC_EXIT_BARRIER
8153- : "=&r" (t)
8154- : "r" (&v->counter)
8155- : "cc", "xer", "memory");
8156-
8157- return t;
8158+ atomic64_sub_unchecked(1, v);
8159 }
8160
8161 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8162@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8163 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8164 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8165
8166+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8167+{
8168+ return cmpxchg(&(v->counter), old, new);
8169+}
8170+
8171+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8172+{
8173+ return xchg(&(v->counter), new);
8174+}
8175+
8176 /**
8177 * atomic64_add_unless - add unless the number is a given value
8178 * @v: pointer of type atomic64_t
8179@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8180
8181 __asm__ __volatile__ (
8182 PPC_ATOMIC_ENTRY_BARRIER
8183-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8184+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8185 cmpd 0,%0,%3 \n\
8186- beq- 2f \n\
8187- add %0,%2,%0 \n"
8188+ beq- 2f \n"
8189+
8190+#ifdef CONFIG_PAX_REFCOUNT
8191+" mcrxr cr0\n"
8192+" addo. %0,%2,%0\n"
8193+" bf 4*cr0+so, 4f\n"
8194+"3:.long " "0x00c00b00""\n"
8195+"4:\n"
8196+#else
8197+ "add %0,%2,%0 \n"
8198+#endif
8199+
8200 " stdcx. %0,0,%1 \n\
8201 bne- 1b \n"
8202 PPC_ATOMIC_EXIT_BARRIER
8203+"5:"
8204+
8205+#ifdef CONFIG_PAX_REFCOUNT
8206+ _ASM_EXTABLE(3b, 5b)
8207+#endif
8208+
8209 " subf %0,%2,%0 \n\
8210 2:"
8211 : "=&r" (t)
8212diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8213index a3bf5be..e03ba81 100644
8214--- a/arch/powerpc/include/asm/barrier.h
8215+++ b/arch/powerpc/include/asm/barrier.h
8216@@ -76,7 +76,7 @@
8217 do { \
8218 compiletime_assert_atomic_type(*p); \
8219 smp_lwsync(); \
8220- ACCESS_ONCE(*p) = (v); \
8221+ ACCESS_ONCE_RW(*p) = (v); \
8222 } while (0)
8223
8224 #define smp_load_acquire(p) \
8225diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8226index 34a05a1..a1f2c67 100644
8227--- a/arch/powerpc/include/asm/cache.h
8228+++ b/arch/powerpc/include/asm/cache.h
8229@@ -4,6 +4,7 @@
8230 #ifdef __KERNEL__
8231
8232 #include <asm/reg.h>
8233+#include <linux/const.h>
8234
8235 /* bytes per L1 cache line */
8236 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8237@@ -23,7 +24,7 @@
8238 #define L1_CACHE_SHIFT 7
8239 #endif
8240
8241-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8242+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8243
8244 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8245
8246diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8247index 57d289a..b36c98c 100644
8248--- a/arch/powerpc/include/asm/elf.h
8249+++ b/arch/powerpc/include/asm/elf.h
8250@@ -30,6 +30,18 @@
8251
8252 #define ELF_ET_DYN_BASE 0x20000000
8253
8254+#ifdef CONFIG_PAX_ASLR
8255+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8256+
8257+#ifdef __powerpc64__
8258+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8259+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8260+#else
8261+#define PAX_DELTA_MMAP_LEN 15
8262+#define PAX_DELTA_STACK_LEN 15
8263+#endif
8264+#endif
8265+
8266 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8267
8268 /*
8269@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8270 (0x7ff >> (PAGE_SHIFT - 12)) : \
8271 (0x3ffff >> (PAGE_SHIFT - 12)))
8272
8273-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8274-#define arch_randomize_brk arch_randomize_brk
8275-
8276-
8277 #ifdef CONFIG_SPU_BASE
8278 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8279 #define NT_SPU 1
8280diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8281index 8196e9c..d83a9f3 100644
8282--- a/arch/powerpc/include/asm/exec.h
8283+++ b/arch/powerpc/include/asm/exec.h
8284@@ -4,6 +4,6 @@
8285 #ifndef _ASM_POWERPC_EXEC_H
8286 #define _ASM_POWERPC_EXEC_H
8287
8288-extern unsigned long arch_align_stack(unsigned long sp);
8289+#define arch_align_stack(x) ((x) & ~0xfUL)
8290
8291 #endif /* _ASM_POWERPC_EXEC_H */
8292diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8293index 5acabbd..7ea14fa 100644
8294--- a/arch/powerpc/include/asm/kmap_types.h
8295+++ b/arch/powerpc/include/asm/kmap_types.h
8296@@ -10,7 +10,7 @@
8297 * 2 of the License, or (at your option) any later version.
8298 */
8299
8300-#define KM_TYPE_NR 16
8301+#define KM_TYPE_NR 17
8302
8303 #endif /* __KERNEL__ */
8304 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8305diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8306index b8da913..c02b593 100644
8307--- a/arch/powerpc/include/asm/local.h
8308+++ b/arch/powerpc/include/asm/local.h
8309@@ -9,21 +9,65 @@ typedef struct
8310 atomic_long_t a;
8311 } local_t;
8312
8313+typedef struct
8314+{
8315+ atomic_long_unchecked_t a;
8316+} local_unchecked_t;
8317+
8318 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8319
8320 #define local_read(l) atomic_long_read(&(l)->a)
8321+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8322 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8323+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8324
8325 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8326+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8327 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8328+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8329 #define local_inc(l) atomic_long_inc(&(l)->a)
8330+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8331 #define local_dec(l) atomic_long_dec(&(l)->a)
8332+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8333
8334 static __inline__ long local_add_return(long a, local_t *l)
8335 {
8336 long t;
8337
8338 __asm__ __volatile__(
8339+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8340+
8341+#ifdef CONFIG_PAX_REFCOUNT
8342+" mcrxr cr0\n"
8343+" addo. %0,%1,%0\n"
8344+" bf 4*cr0+so, 3f\n"
8345+"2:.long " "0x00c00b00""\n"
8346+#else
8347+" add %0,%1,%0\n"
8348+#endif
8349+
8350+"3:\n"
8351+ PPC405_ERR77(0,%2)
8352+ PPC_STLCX "%0,0,%2 \n\
8353+ bne- 1b"
8354+
8355+#ifdef CONFIG_PAX_REFCOUNT
8356+"\n4:\n"
8357+ _ASM_EXTABLE(2b, 4b)
8358+#endif
8359+
8360+ : "=&r" (t)
8361+ : "r" (a), "r" (&(l->a.counter))
8362+ : "cc", "memory");
8363+
8364+ return t;
8365+}
8366+
8367+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8368+{
8369+ long t;
8370+
8371+ __asm__ __volatile__(
8372 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8373 add %0,%1,%0\n"
8374 PPC405_ERR77(0,%2)
8375@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8376
8377 #define local_cmpxchg(l, o, n) \
8378 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8379+#define local_cmpxchg_unchecked(l, o, n) \
8380+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8381 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8382
8383 /**
8384diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8385index 8565c25..2865190 100644
8386--- a/arch/powerpc/include/asm/mman.h
8387+++ b/arch/powerpc/include/asm/mman.h
8388@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8389 }
8390 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8391
8392-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8393+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8394 {
8395 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8396 }
8397diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8398index 69c0598..2c56964 100644
8399--- a/arch/powerpc/include/asm/page.h
8400+++ b/arch/powerpc/include/asm/page.h
8401@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8402 * and needs to be executable. This means the whole heap ends
8403 * up being executable.
8404 */
8405-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8406- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8407+#define VM_DATA_DEFAULT_FLAGS32 \
8408+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8409+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8410
8411 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8412 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8413@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8414 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8415 #endif
8416
8417+#define ktla_ktva(addr) (addr)
8418+#define ktva_ktla(addr) (addr)
8419+
8420 #ifndef CONFIG_PPC_BOOK3S_64
8421 /*
8422 * Use the top bit of the higher-level page table entries to indicate whether
8423diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8424index d908a46..3753f71 100644
8425--- a/arch/powerpc/include/asm/page_64.h
8426+++ b/arch/powerpc/include/asm/page_64.h
8427@@ -172,15 +172,18 @@ do { \
8428 * stack by default, so in the absence of a PT_GNU_STACK program header
8429 * we turn execute permission off.
8430 */
8431-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8432- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8433+#define VM_STACK_DEFAULT_FLAGS32 \
8434+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8435+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8436
8437 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8438 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8439
8440+#ifndef CONFIG_PAX_PAGEEXEC
8441 #define VM_STACK_DEFAULT_FLAGS \
8442 (is_32bit_task() ? \
8443 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8444+#endif
8445
8446 #include <asm-generic/getorder.h>
8447
8448diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8449index 4b0be20..c15a27d 100644
8450--- a/arch/powerpc/include/asm/pgalloc-64.h
8451+++ b/arch/powerpc/include/asm/pgalloc-64.h
8452@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8453 #ifndef CONFIG_PPC_64K_PAGES
8454
8455 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8456+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8457
8458 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8459 {
8460@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8461 pud_set(pud, (unsigned long)pmd);
8462 }
8463
8464+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8465+{
8466+ pud_populate(mm, pud, pmd);
8467+}
8468+
8469 #define pmd_populate(mm, pmd, pte_page) \
8470 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8471 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8472@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8473 #endif
8474
8475 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8476+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8477
8478 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8479 pte_t *pte)
8480diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8481index a8805fe..6d69617 100644
8482--- a/arch/powerpc/include/asm/pgtable.h
8483+++ b/arch/powerpc/include/asm/pgtable.h
8484@@ -2,6 +2,7 @@
8485 #define _ASM_POWERPC_PGTABLE_H
8486 #ifdef __KERNEL__
8487
8488+#include <linux/const.h>
8489 #ifndef __ASSEMBLY__
8490 #include <linux/mmdebug.h>
8491 #include <linux/mmzone.h>
8492diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8493index 4aad413..85d86bf 100644
8494--- a/arch/powerpc/include/asm/pte-hash32.h
8495+++ b/arch/powerpc/include/asm/pte-hash32.h
8496@@ -21,6 +21,7 @@
8497 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8498 #define _PAGE_USER 0x004 /* usermode access allowed */
8499 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8500+#define _PAGE_EXEC _PAGE_GUARDED
8501 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8502 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8503 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8504diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8505index 1c874fb..e8480a4 100644
8506--- a/arch/powerpc/include/asm/reg.h
8507+++ b/arch/powerpc/include/asm/reg.h
8508@@ -253,6 +253,7 @@
8509 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8510 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8511 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8512+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8513 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8514 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8515 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8516diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8517index 5a6614a..d89995d1 100644
8518--- a/arch/powerpc/include/asm/smp.h
8519+++ b/arch/powerpc/include/asm/smp.h
8520@@ -51,7 +51,7 @@ struct smp_ops_t {
8521 int (*cpu_disable)(void);
8522 void (*cpu_die)(unsigned int nr);
8523 int (*cpu_bootable)(unsigned int nr);
8524-};
8525+} __no_const;
8526
8527 extern void smp_send_debugger_break(void);
8528 extern void start_secondary_resume(void);
8529diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8530index 4dbe072..b803275 100644
8531--- a/arch/powerpc/include/asm/spinlock.h
8532+++ b/arch/powerpc/include/asm/spinlock.h
8533@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8534 __asm__ __volatile__(
8535 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8536 __DO_SIGN_EXTEND
8537-" addic. %0,%0,1\n\
8538- ble- 2f\n"
8539+
8540+#ifdef CONFIG_PAX_REFCOUNT
8541+" mcrxr cr0\n"
8542+" addico. %0,%0,1\n"
8543+" bf 4*cr0+so, 3f\n"
8544+"2:.long " "0x00c00b00""\n"
8545+#else
8546+" addic. %0,%0,1\n"
8547+#endif
8548+
8549+"3:\n"
8550+ "ble- 4f\n"
8551 PPC405_ERR77(0,%1)
8552 " stwcx. %0,0,%1\n\
8553 bne- 1b\n"
8554 PPC_ACQUIRE_BARRIER
8555-"2:" : "=&r" (tmp)
8556+"4:"
8557+
8558+#ifdef CONFIG_PAX_REFCOUNT
8559+ _ASM_EXTABLE(2b,4b)
8560+#endif
8561+
8562+ : "=&r" (tmp)
8563 : "r" (&rw->lock)
8564 : "cr0", "xer", "memory");
8565
8566@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8567 __asm__ __volatile__(
8568 "# read_unlock\n\t"
8569 PPC_RELEASE_BARRIER
8570-"1: lwarx %0,0,%1\n\
8571- addic %0,%0,-1\n"
8572+"1: lwarx %0,0,%1\n"
8573+
8574+#ifdef CONFIG_PAX_REFCOUNT
8575+" mcrxr cr0\n"
8576+" addico. %0,%0,-1\n"
8577+" bf 4*cr0+so, 3f\n"
8578+"2:.long " "0x00c00b00""\n"
8579+#else
8580+" addic. %0,%0,-1\n"
8581+#endif
8582+
8583+"3:\n"
8584 PPC405_ERR77(0,%1)
8585 " stwcx. %0,0,%1\n\
8586 bne- 1b"
8587+
8588+#ifdef CONFIG_PAX_REFCOUNT
8589+"\n4:\n"
8590+ _ASM_EXTABLE(2b, 4b)
8591+#endif
8592+
8593 : "=&r"(tmp)
8594 : "r"(&rw->lock)
8595 : "cr0", "xer", "memory");
8596diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8597index 0be6c68..9c3c6ee 100644
8598--- a/arch/powerpc/include/asm/thread_info.h
8599+++ b/arch/powerpc/include/asm/thread_info.h
8600@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8601 #if defined(CONFIG_PPC64)
8602 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8603 #endif
8604+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8605+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8606
8607 /* as above, but as bit values */
8608 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8609@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8610 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8611 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8612 #define _TIF_NOHZ (1<<TIF_NOHZ)
8613+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8614 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8615 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8616- _TIF_NOHZ)
8617+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8618
8619 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8620 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8621diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8622index a0c071d..49cdc7f 100644
8623--- a/arch/powerpc/include/asm/uaccess.h
8624+++ b/arch/powerpc/include/asm/uaccess.h
8625@@ -58,6 +58,7 @@
8626
8627 #endif
8628
8629+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8630 #define access_ok(type, addr, size) \
8631 (__chk_user_ptr(addr), \
8632 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8633@@ -318,52 +319,6 @@ do { \
8634 extern unsigned long __copy_tofrom_user(void __user *to,
8635 const void __user *from, unsigned long size);
8636
8637-#ifndef __powerpc64__
8638-
8639-static inline unsigned long copy_from_user(void *to,
8640- const void __user *from, unsigned long n)
8641-{
8642- unsigned long over;
8643-
8644- if (access_ok(VERIFY_READ, from, n))
8645- return __copy_tofrom_user((__force void __user *)to, from, n);
8646- if ((unsigned long)from < TASK_SIZE) {
8647- over = (unsigned long)from + n - TASK_SIZE;
8648- return __copy_tofrom_user((__force void __user *)to, from,
8649- n - over) + over;
8650- }
8651- return n;
8652-}
8653-
8654-static inline unsigned long copy_to_user(void __user *to,
8655- const void *from, unsigned long n)
8656-{
8657- unsigned long over;
8658-
8659- if (access_ok(VERIFY_WRITE, to, n))
8660- return __copy_tofrom_user(to, (__force void __user *)from, n);
8661- if ((unsigned long)to < TASK_SIZE) {
8662- over = (unsigned long)to + n - TASK_SIZE;
8663- return __copy_tofrom_user(to, (__force void __user *)from,
8664- n - over) + over;
8665- }
8666- return n;
8667-}
8668-
8669-#else /* __powerpc64__ */
8670-
8671-#define __copy_in_user(to, from, size) \
8672- __copy_tofrom_user((to), (from), (size))
8673-
8674-extern unsigned long copy_from_user(void *to, const void __user *from,
8675- unsigned long n);
8676-extern unsigned long copy_to_user(void __user *to, const void *from,
8677- unsigned long n);
8678-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8679- unsigned long n);
8680-
8681-#endif /* __powerpc64__ */
8682-
8683 static inline unsigned long __copy_from_user_inatomic(void *to,
8684 const void __user *from, unsigned long n)
8685 {
8686@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8687 if (ret == 0)
8688 return 0;
8689 }
8690+
8691+ if (!__builtin_constant_p(n))
8692+ check_object_size(to, n, false);
8693+
8694 return __copy_tofrom_user((__force void __user *)to, from, n);
8695 }
8696
8697@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8698 if (ret == 0)
8699 return 0;
8700 }
8701+
8702+ if (!__builtin_constant_p(n))
8703+ check_object_size(from, n, true);
8704+
8705 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8706 }
8707
8708@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8709 return __copy_to_user_inatomic(to, from, size);
8710 }
8711
8712+#ifndef __powerpc64__
8713+
8714+static inline unsigned long __must_check copy_from_user(void *to,
8715+ const void __user *from, unsigned long n)
8716+{
8717+ unsigned long over;
8718+
8719+ if ((long)n < 0)
8720+ return n;
8721+
8722+ if (access_ok(VERIFY_READ, from, n)) {
8723+ if (!__builtin_constant_p(n))
8724+ check_object_size(to, n, false);
8725+ return __copy_tofrom_user((__force void __user *)to, from, n);
8726+ }
8727+ if ((unsigned long)from < TASK_SIZE) {
8728+ over = (unsigned long)from + n - TASK_SIZE;
8729+ if (!__builtin_constant_p(n - over))
8730+ check_object_size(to, n - over, false);
8731+ return __copy_tofrom_user((__force void __user *)to, from,
8732+ n - over) + over;
8733+ }
8734+ return n;
8735+}
8736+
8737+static inline unsigned long __must_check copy_to_user(void __user *to,
8738+ const void *from, unsigned long n)
8739+{
8740+ unsigned long over;
8741+
8742+ if ((long)n < 0)
8743+ return n;
8744+
8745+ if (access_ok(VERIFY_WRITE, to, n)) {
8746+ if (!__builtin_constant_p(n))
8747+ check_object_size(from, n, true);
8748+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8749+ }
8750+ if ((unsigned long)to < TASK_SIZE) {
8751+ over = (unsigned long)to + n - TASK_SIZE;
8752+ if (!__builtin_constant_p(n))
8753+ check_object_size(from, n - over, true);
8754+ return __copy_tofrom_user(to, (__force void __user *)from,
8755+ n - over) + over;
8756+ }
8757+ return n;
8758+}
8759+
8760+#else /* __powerpc64__ */
8761+
8762+#define __copy_in_user(to, from, size) \
8763+ __copy_tofrom_user((to), (from), (size))
8764+
8765+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8766+{
8767+ if ((long)n < 0 || n > INT_MAX)
8768+ return n;
8769+
8770+ if (!__builtin_constant_p(n))
8771+ check_object_size(to, n, false);
8772+
8773+ if (likely(access_ok(VERIFY_READ, from, n)))
8774+ n = __copy_from_user(to, from, n);
8775+ else
8776+ memset(to, 0, n);
8777+ return n;
8778+}
8779+
8780+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8781+{
8782+ if ((long)n < 0 || n > INT_MAX)
8783+ return n;
8784+
8785+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8786+ if (!__builtin_constant_p(n))
8787+ check_object_size(from, n, true);
8788+ n = __copy_to_user(to, from, n);
8789+ }
8790+ return n;
8791+}
8792+
8793+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8794+ unsigned long n);
8795+
8796+#endif /* __powerpc64__ */
8797+
8798 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8799
8800 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8801diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8802index 502cf69..53936a1 100644
8803--- a/arch/powerpc/kernel/Makefile
8804+++ b/arch/powerpc/kernel/Makefile
8805@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8806 CFLAGS_btext.o += -fPIC
8807 endif
8808
8809+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8810+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8811+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8812+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8813+
8814 ifdef CONFIG_FUNCTION_TRACER
8815 # Do not trace early boot code
8816 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8817@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8818 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8819 endif
8820
8821+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8822+
8823 obj-y := cputable.o ptrace.o syscalls.o \
8824 irq.o align.o signal_32.o pmc.o vdso.o \
8825 process.o systbl.o idle.o \
8826diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8827index 3e68d1c..72a5ee6 100644
8828--- a/arch/powerpc/kernel/exceptions-64e.S
8829+++ b/arch/powerpc/kernel/exceptions-64e.S
8830@@ -1010,6 +1010,7 @@ storage_fault_common:
8831 std r14,_DAR(r1)
8832 std r15,_DSISR(r1)
8833 addi r3,r1,STACK_FRAME_OVERHEAD
8834+ bl save_nvgprs
8835 mr r4,r14
8836 mr r5,r15
8837 ld r14,PACA_EXGEN+EX_R14(r13)
8838@@ -1018,8 +1019,7 @@ storage_fault_common:
8839 cmpdi r3,0
8840 bne- 1f
8841 b ret_from_except_lite
8842-1: bl save_nvgprs
8843- mr r5,r3
8844+1: mr r5,r3
8845 addi r3,r1,STACK_FRAME_OVERHEAD
8846 ld r4,_DAR(r1)
8847 bl bad_page_fault
8848diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8849index c2df815..bae3d12 100644
8850--- a/arch/powerpc/kernel/exceptions-64s.S
8851+++ b/arch/powerpc/kernel/exceptions-64s.S
8852@@ -1599,10 +1599,10 @@ handle_page_fault:
8853 11: ld r4,_DAR(r1)
8854 ld r5,_DSISR(r1)
8855 addi r3,r1,STACK_FRAME_OVERHEAD
8856+ bl save_nvgprs
8857 bl do_page_fault
8858 cmpdi r3,0
8859 beq+ 12f
8860- bl save_nvgprs
8861 mr r5,r3
8862 addi r3,r1,STACK_FRAME_OVERHEAD
8863 lwz r4,_DAR(r1)
8864diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8865index 4509603..cdb491f 100644
8866--- a/arch/powerpc/kernel/irq.c
8867+++ b/arch/powerpc/kernel/irq.c
8868@@ -460,6 +460,8 @@ void migrate_irqs(void)
8869 }
8870 #endif
8871
8872+extern void gr_handle_kernel_exploit(void);
8873+
8874 static inline void check_stack_overflow(void)
8875 {
8876 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8877@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8878 pr_err("do_IRQ: stack overflow: %ld\n",
8879 sp - sizeof(struct thread_info));
8880 dump_stack();
8881+ gr_handle_kernel_exploit();
8882 }
8883 #endif
8884 }
8885diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8886index c94d2e0..992a9ce 100644
8887--- a/arch/powerpc/kernel/module_32.c
8888+++ b/arch/powerpc/kernel/module_32.c
8889@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8890 me->arch.core_plt_section = i;
8891 }
8892 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8893- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8894+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8895 return -ENOEXEC;
8896 }
8897
8898@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8899
8900 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8901 /* Init, or core PLT? */
8902- if (location >= mod->module_core
8903- && location < mod->module_core + mod->core_size)
8904+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8905+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8906 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8907- else
8908+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8909+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8910 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8911+ else {
8912+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8913+ return ~0UL;
8914+ }
8915
8916 /* Find this entry, or if that fails, the next avail. entry */
8917 while (entry->jump[0]) {
8918@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8919 }
8920 #ifdef CONFIG_DYNAMIC_FTRACE
8921 module->arch.tramp =
8922- do_plt_call(module->module_core,
8923+ do_plt_call(module->module_core_rx,
8924 (unsigned long)ftrace_caller,
8925 sechdrs, module);
8926 #endif
8927diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8928index b4cc7be..1fe8bb3 100644
8929--- a/arch/powerpc/kernel/process.c
8930+++ b/arch/powerpc/kernel/process.c
8931@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8932 * Lookup NIP late so we have the best change of getting the
8933 * above info out without failing
8934 */
8935- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8936- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8937+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8938+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8939 #endif
8940 show_stack(current, (unsigned long *) regs->gpr[1]);
8941 if (!user_mode(regs))
8942@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8943 newsp = stack[0];
8944 ip = stack[STACK_FRAME_LR_SAVE];
8945 if (!firstframe || ip != lr) {
8946- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8947+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8948 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8949 if ((ip == rth) && curr_frame >= 0) {
8950- printk(" (%pS)",
8951+ printk(" (%pA)",
8952 (void *)current->ret_stack[curr_frame].ret);
8953 curr_frame--;
8954 }
8955@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8956 struct pt_regs *regs = (struct pt_regs *)
8957 (sp + STACK_FRAME_OVERHEAD);
8958 lr = regs->link;
8959- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8960+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8961 regs->trap, (void *)regs->nip, (void *)lr);
8962 firstframe = 1;
8963 }
8964@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8965 mtspr(SPRN_CTRLT, ctrl);
8966 }
8967 #endif /* CONFIG_PPC64 */
8968-
8969-unsigned long arch_align_stack(unsigned long sp)
8970-{
8971- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8972- sp -= get_random_int() & ~PAGE_MASK;
8973- return sp & ~0xf;
8974-}
8975-
8976-static inline unsigned long brk_rnd(void)
8977-{
8978- unsigned long rnd = 0;
8979-
8980- /* 8MB for 32bit, 1GB for 64bit */
8981- if (is_32bit_task())
8982- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8983- else
8984- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8985-
8986- return rnd << PAGE_SHIFT;
8987-}
8988-
8989-unsigned long arch_randomize_brk(struct mm_struct *mm)
8990-{
8991- unsigned long base = mm->brk;
8992- unsigned long ret;
8993-
8994-#ifdef CONFIG_PPC_STD_MMU_64
8995- /*
8996- * If we are using 1TB segments and we are allowed to randomise
8997- * the heap, we can put it above 1TB so it is backed by a 1TB
8998- * segment. Otherwise the heap will be in the bottom 1TB
8999- * which always uses 256MB segments and this may result in a
9000- * performance penalty.
9001- */
9002- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9003- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9004-#endif
9005-
9006- ret = PAGE_ALIGN(base + brk_rnd());
9007-
9008- if (ret < mm->brk)
9009- return mm->brk;
9010-
9011- return ret;
9012-}
9013-
9014diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9015index f21897b..28c0428 100644
9016--- a/arch/powerpc/kernel/ptrace.c
9017+++ b/arch/powerpc/kernel/ptrace.c
9018@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9019 return ret;
9020 }
9021
9022+#ifdef CONFIG_GRKERNSEC_SETXID
9023+extern void gr_delayed_cred_worker(void);
9024+#endif
9025+
9026 /*
9027 * We must return the syscall number to actually look up in the table.
9028 * This can be -1L to skip running any syscall at all.
9029@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9030
9031 secure_computing_strict(regs->gpr[0]);
9032
9033+#ifdef CONFIG_GRKERNSEC_SETXID
9034+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9035+ gr_delayed_cred_worker();
9036+#endif
9037+
9038 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9039 tracehook_report_syscall_entry(regs))
9040 /*
9041@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9042 {
9043 int step;
9044
9045+#ifdef CONFIG_GRKERNSEC_SETXID
9046+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9047+ gr_delayed_cred_worker();
9048+#endif
9049+
9050 audit_syscall_exit(regs);
9051
9052 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9053diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9054index b171001..4ac7ac5 100644
9055--- a/arch/powerpc/kernel/signal_32.c
9056+++ b/arch/powerpc/kernel/signal_32.c
9057@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9058 /* Save user registers on the stack */
9059 frame = &rt_sf->uc.uc_mcontext;
9060 addr = frame;
9061- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9062+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9063 sigret = 0;
9064 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9065 } else {
9066diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9067index 2cb0c94..c0c0bc9 100644
9068--- a/arch/powerpc/kernel/signal_64.c
9069+++ b/arch/powerpc/kernel/signal_64.c
9070@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9071 current->thread.fp_state.fpscr = 0;
9072
9073 /* Set up to return from userspace. */
9074- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9075+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9076 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9077 } else {
9078 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9079diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9080index e6595b7..24bde6e 100644
9081--- a/arch/powerpc/kernel/traps.c
9082+++ b/arch/powerpc/kernel/traps.c
9083@@ -36,6 +36,7 @@
9084 #include <linux/debugfs.h>
9085 #include <linux/ratelimit.h>
9086 #include <linux/context_tracking.h>
9087+#include <linux/uaccess.h>
9088
9089 #include <asm/emulated_ops.h>
9090 #include <asm/pgtable.h>
9091@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9092 return flags;
9093 }
9094
9095+extern void gr_handle_kernel_exploit(void);
9096+
9097 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9098 int signr)
9099 {
9100@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9101 panic("Fatal exception in interrupt");
9102 if (panic_on_oops)
9103 panic("Fatal exception");
9104+
9105+ gr_handle_kernel_exploit();
9106+
9107 do_exit(signr);
9108 }
9109
9110@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9111 enum ctx_state prev_state = exception_enter();
9112 unsigned int reason = get_reason(regs);
9113
9114+#ifdef CONFIG_PAX_REFCOUNT
9115+ unsigned int bkpt;
9116+ const struct exception_table_entry *entry;
9117+
9118+ if (reason & REASON_ILLEGAL) {
9119+ /* Check if PaX bad instruction */
9120+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9121+ current->thread.trap_nr = 0;
9122+ pax_report_refcount_overflow(regs);
9123+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9124+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9125+ regs->nip = entry->fixup;
9126+ return;
9127+ }
9128+ /* fixup_exception() could not handle */
9129+ goto bail;
9130+ }
9131+ }
9132+#endif
9133+
9134 /* We can now get here via a FP Unavailable exception if the core
9135 * has no FPU, in that case the reason flags will be 0 */
9136
9137diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9138index 305eb0d..accc5b40 100644
9139--- a/arch/powerpc/kernel/vdso.c
9140+++ b/arch/powerpc/kernel/vdso.c
9141@@ -34,6 +34,7 @@
9142 #include <asm/vdso.h>
9143 #include <asm/vdso_datapage.h>
9144 #include <asm/setup.h>
9145+#include <asm/mman.h>
9146
9147 #undef DEBUG
9148
9149@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9150 vdso_base = VDSO32_MBASE;
9151 #endif
9152
9153- current->mm->context.vdso_base = 0;
9154+ current->mm->context.vdso_base = ~0UL;
9155
9156 /* vDSO has a problem and was disabled, just don't "enable" it for the
9157 * process
9158@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9159 vdso_base = get_unmapped_area(NULL, vdso_base,
9160 (vdso_pages << PAGE_SHIFT) +
9161 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9162- 0, 0);
9163+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9164 if (IS_ERR_VALUE(vdso_base)) {
9165 rc = vdso_base;
9166 goto fail_mmapsem;
9167diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9168index c45eaab..5f41b57 100644
9169--- a/arch/powerpc/kvm/powerpc.c
9170+++ b/arch/powerpc/kvm/powerpc.c
9171@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9172 }
9173 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9174
9175-int kvm_arch_init(void *opaque)
9176+int kvm_arch_init(const void *opaque)
9177 {
9178 return 0;
9179 }
9180diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9181index 5eea6f3..5d10396 100644
9182--- a/arch/powerpc/lib/usercopy_64.c
9183+++ b/arch/powerpc/lib/usercopy_64.c
9184@@ -9,22 +9,6 @@
9185 #include <linux/module.h>
9186 #include <asm/uaccess.h>
9187
9188-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9189-{
9190- if (likely(access_ok(VERIFY_READ, from, n)))
9191- n = __copy_from_user(to, from, n);
9192- else
9193- memset(to, 0, n);
9194- return n;
9195-}
9196-
9197-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9198-{
9199- if (likely(access_ok(VERIFY_WRITE, to, n)))
9200- n = __copy_to_user(to, from, n);
9201- return n;
9202-}
9203-
9204 unsigned long copy_in_user(void __user *to, const void __user *from,
9205 unsigned long n)
9206 {
9207@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9208 return n;
9209 }
9210
9211-EXPORT_SYMBOL(copy_from_user);
9212-EXPORT_SYMBOL(copy_to_user);
9213 EXPORT_SYMBOL(copy_in_user);
9214
9215diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9216index 6154b0a..4de2b19 100644
9217--- a/arch/powerpc/mm/fault.c
9218+++ b/arch/powerpc/mm/fault.c
9219@@ -33,6 +33,10 @@
9220 #include <linux/ratelimit.h>
9221 #include <linux/context_tracking.h>
9222 #include <linux/hugetlb.h>
9223+#include <linux/slab.h>
9224+#include <linux/pagemap.h>
9225+#include <linux/compiler.h>
9226+#include <linux/unistd.h>
9227
9228 #include <asm/firmware.h>
9229 #include <asm/page.h>
9230@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9231 }
9232 #endif
9233
9234+#ifdef CONFIG_PAX_PAGEEXEC
9235+/*
9236+ * PaX: decide what to do with offenders (regs->nip = fault address)
9237+ *
9238+ * returns 1 when task should be killed
9239+ */
9240+static int pax_handle_fetch_fault(struct pt_regs *regs)
9241+{
9242+ return 1;
9243+}
9244+
9245+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9246+{
9247+ unsigned long i;
9248+
9249+ printk(KERN_ERR "PAX: bytes at PC: ");
9250+ for (i = 0; i < 5; i++) {
9251+ unsigned int c;
9252+ if (get_user(c, (unsigned int __user *)pc+i))
9253+ printk(KERN_CONT "???????? ");
9254+ else
9255+ printk(KERN_CONT "%08x ", c);
9256+ }
9257+ printk("\n");
9258+}
9259+#endif
9260+
9261 /*
9262 * Check whether the instruction at regs->nip is a store using
9263 * an update addressing form which will update r1.
9264@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9265 * indicate errors in DSISR but can validly be set in SRR1.
9266 */
9267 if (trap == 0x400)
9268- error_code &= 0x48200000;
9269+ error_code &= 0x58200000;
9270 else
9271 is_write = error_code & DSISR_ISSTORE;
9272 #else
9273@@ -383,7 +414,7 @@ good_area:
9274 * "undefined". Of those that can be set, this is the only
9275 * one which seems bad.
9276 */
9277- if (error_code & 0x10000000)
9278+ if (error_code & DSISR_GUARDED)
9279 /* Guarded storage error. */
9280 goto bad_area;
9281 #endif /* CONFIG_8xx */
9282@@ -398,7 +429,7 @@ good_area:
9283 * processors use the same I/D cache coherency mechanism
9284 * as embedded.
9285 */
9286- if (error_code & DSISR_PROTFAULT)
9287+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9288 goto bad_area;
9289 #endif /* CONFIG_PPC_STD_MMU */
9290
9291@@ -490,6 +521,23 @@ bad_area:
9292 bad_area_nosemaphore:
9293 /* User mode accesses cause a SIGSEGV */
9294 if (user_mode(regs)) {
9295+
9296+#ifdef CONFIG_PAX_PAGEEXEC
9297+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9298+#ifdef CONFIG_PPC_STD_MMU
9299+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9300+#else
9301+ if (is_exec && regs->nip == address) {
9302+#endif
9303+ switch (pax_handle_fetch_fault(regs)) {
9304+ }
9305+
9306+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9307+ do_group_exit(SIGKILL);
9308+ }
9309+ }
9310+#endif
9311+
9312 _exception(SIGSEGV, regs, code, address);
9313 goto bail;
9314 }
9315diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9316index cb8bdbe..cde4bc7 100644
9317--- a/arch/powerpc/mm/mmap.c
9318+++ b/arch/powerpc/mm/mmap.c
9319@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9320 return sysctl_legacy_va_layout;
9321 }
9322
9323-static unsigned long mmap_rnd(void)
9324+static unsigned long mmap_rnd(struct mm_struct *mm)
9325 {
9326 unsigned long rnd = 0;
9327
9328+#ifdef CONFIG_PAX_RANDMMAP
9329+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9330+#endif
9331+
9332 if (current->flags & PF_RANDOMIZE) {
9333 /* 8MB for 32bit, 1GB for 64bit */
9334 if (is_32bit_task())
9335@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9336 return rnd << PAGE_SHIFT;
9337 }
9338
9339-static inline unsigned long mmap_base(void)
9340+static inline unsigned long mmap_base(struct mm_struct *mm)
9341 {
9342 unsigned long gap = rlimit(RLIMIT_STACK);
9343
9344@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9345 else if (gap > MAX_GAP)
9346 gap = MAX_GAP;
9347
9348- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9349+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9350 }
9351
9352 /*
9353@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9354 */
9355 if (mmap_is_legacy()) {
9356 mm->mmap_base = TASK_UNMAPPED_BASE;
9357+
9358+#ifdef CONFIG_PAX_RANDMMAP
9359+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9360+ mm->mmap_base += mm->delta_mmap;
9361+#endif
9362+
9363 mm->get_unmapped_area = arch_get_unmapped_area;
9364 } else {
9365- mm->mmap_base = mmap_base();
9366+ mm->mmap_base = mmap_base(mm);
9367+
9368+#ifdef CONFIG_PAX_RANDMMAP
9369+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9370+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9371+#endif
9372+
9373 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9374 }
9375 }
9376diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9377index ded0ea1..f213a9b 100644
9378--- a/arch/powerpc/mm/slice.c
9379+++ b/arch/powerpc/mm/slice.c
9380@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9381 if ((mm->task_size - len) < addr)
9382 return 0;
9383 vma = find_vma(mm, addr);
9384- return (!vma || (addr + len) <= vma->vm_start);
9385+ return check_heap_stack_gap(vma, addr, len, 0);
9386 }
9387
9388 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9389@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9390 info.align_offset = 0;
9391
9392 addr = TASK_UNMAPPED_BASE;
9393+
9394+#ifdef CONFIG_PAX_RANDMMAP
9395+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9396+ addr += mm->delta_mmap;
9397+#endif
9398+
9399 while (addr < TASK_SIZE) {
9400 info.low_limit = addr;
9401 if (!slice_scan_available(addr, available, 1, &addr))
9402@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9403 if (fixed && addr > (mm->task_size - len))
9404 return -ENOMEM;
9405
9406+#ifdef CONFIG_PAX_RANDMMAP
9407+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9408+ addr = 0;
9409+#endif
9410+
9411 /* If hint, make sure it matches our alignment restrictions */
9412 if (!fixed && addr) {
9413 addr = _ALIGN_UP(addr, 1ul << pshift);
9414diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9415index f223875..94170e4 100644
9416--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9417+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9418@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9419 }
9420
9421 static struct pci_ops scc_pciex_pci_ops = {
9422- scc_pciex_read_config,
9423- scc_pciex_write_config,
9424+ .read = scc_pciex_read_config,
9425+ .write = scc_pciex_write_config,
9426 };
9427
9428 static void pciex_clear_intr_all(unsigned int __iomem *base)
9429diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9430index d966bbe..372124a 100644
9431--- a/arch/powerpc/platforms/cell/spufs/file.c
9432+++ b/arch/powerpc/platforms/cell/spufs/file.c
9433@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9434 return VM_FAULT_NOPAGE;
9435 }
9436
9437-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9438+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9439 unsigned long address,
9440- void *buf, int len, int write)
9441+ void *buf, size_t len, int write)
9442 {
9443 struct spu_context *ctx = vma->vm_file->private_data;
9444 unsigned long offset = address - vma->vm_start;
9445diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9446index fa934fe..c296056 100644
9447--- a/arch/s390/include/asm/atomic.h
9448+++ b/arch/s390/include/asm/atomic.h
9449@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9450 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9451 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9452
9453+#define atomic64_read_unchecked(v) atomic64_read(v)
9454+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9455+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9456+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9457+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9458+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9459+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9460+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9461+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9462+
9463 #endif /* __ARCH_S390_ATOMIC__ */
9464diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9465index 8d72471..5322500 100644
9466--- a/arch/s390/include/asm/barrier.h
9467+++ b/arch/s390/include/asm/barrier.h
9468@@ -42,7 +42,7 @@
9469 do { \
9470 compiletime_assert_atomic_type(*p); \
9471 barrier(); \
9472- ACCESS_ONCE(*p) = (v); \
9473+ ACCESS_ONCE_RW(*p) = (v); \
9474 } while (0)
9475
9476 #define smp_load_acquire(p) \
9477diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9478index 4d7ccac..d03d0ad 100644
9479--- a/arch/s390/include/asm/cache.h
9480+++ b/arch/s390/include/asm/cache.h
9481@@ -9,8 +9,10 @@
9482 #ifndef __ARCH_S390_CACHE_H
9483 #define __ARCH_S390_CACHE_H
9484
9485-#define L1_CACHE_BYTES 256
9486+#include <linux/const.h>
9487+
9488 #define L1_CACHE_SHIFT 8
9489+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9490 #define NET_SKB_PAD 32
9491
9492 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9493diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9494index f6e43d3..5f57681 100644
9495--- a/arch/s390/include/asm/elf.h
9496+++ b/arch/s390/include/asm/elf.h
9497@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9498 the loader. We need to make sure that it is out of the way of the program
9499 that it will "exec", and that there is sufficient room for the brk. */
9500
9501-extern unsigned long randomize_et_dyn(unsigned long base);
9502-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9503+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9504+
9505+#ifdef CONFIG_PAX_ASLR
9506+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9507+
9508+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9509+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9510+#endif
9511
9512 /* This yields a mask that user programs can use to figure out what
9513 instruction set this CPU supports. */
9514@@ -223,9 +229,6 @@ struct linux_binprm;
9515 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9516 int arch_setup_additional_pages(struct linux_binprm *, int);
9517
9518-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9519-#define arch_randomize_brk arch_randomize_brk
9520-
9521 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9522
9523 #endif
9524diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9525index c4a93d6..4d2a9b4 100644
9526--- a/arch/s390/include/asm/exec.h
9527+++ b/arch/s390/include/asm/exec.h
9528@@ -7,6 +7,6 @@
9529 #ifndef __ASM_EXEC_H
9530 #define __ASM_EXEC_H
9531
9532-extern unsigned long arch_align_stack(unsigned long sp);
9533+#define arch_align_stack(x) ((x) & ~0xfUL)
9534
9535 #endif /* __ASM_EXEC_H */
9536diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9537index cd4c68e..6764641 100644
9538--- a/arch/s390/include/asm/uaccess.h
9539+++ b/arch/s390/include/asm/uaccess.h
9540@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9541 __range_ok((unsigned long)(addr), (size)); \
9542 })
9543
9544+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9545 #define access_ok(type, addr, size) __access_ok(addr, size)
9546
9547 /*
9548@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9549 copy_to_user(void __user *to, const void *from, unsigned long n)
9550 {
9551 might_fault();
9552+
9553+ if ((long)n < 0)
9554+ return n;
9555+
9556 return __copy_to_user(to, from, n);
9557 }
9558
9559@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9560 static inline unsigned long __must_check
9561 copy_from_user(void *to, const void __user *from, unsigned long n)
9562 {
9563- unsigned int sz = __compiletime_object_size(to);
9564+ size_t sz = __compiletime_object_size(to);
9565
9566 might_fault();
9567- if (unlikely(sz != -1 && sz < n)) {
9568+
9569+ if ((long)n < 0)
9570+ return n;
9571+
9572+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9573 copy_from_user_overflow();
9574 return n;
9575 }
9576diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9577index 409d152..d90d368 100644
9578--- a/arch/s390/kernel/module.c
9579+++ b/arch/s390/kernel/module.c
9580@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9581
9582 /* Increase core size by size of got & plt and set start
9583 offsets for got and plt. */
9584- me->core_size = ALIGN(me->core_size, 4);
9585- me->arch.got_offset = me->core_size;
9586- me->core_size += me->arch.got_size;
9587- me->arch.plt_offset = me->core_size;
9588- me->core_size += me->arch.plt_size;
9589+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9590+ me->arch.got_offset = me->core_size_rw;
9591+ me->core_size_rw += me->arch.got_size;
9592+ me->arch.plt_offset = me->core_size_rx;
9593+ me->core_size_rx += me->arch.plt_size;
9594 return 0;
9595 }
9596
9597@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9598 if (info->got_initialized == 0) {
9599 Elf_Addr *gotent;
9600
9601- gotent = me->module_core + me->arch.got_offset +
9602+ gotent = me->module_core_rw + me->arch.got_offset +
9603 info->got_offset;
9604 *gotent = val;
9605 info->got_initialized = 1;
9606@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9607 rc = apply_rela_bits(loc, val, 0, 64, 0);
9608 else if (r_type == R_390_GOTENT ||
9609 r_type == R_390_GOTPLTENT) {
9610- val += (Elf_Addr) me->module_core - loc;
9611+ val += (Elf_Addr) me->module_core_rw - loc;
9612 rc = apply_rela_bits(loc, val, 1, 32, 1);
9613 }
9614 break;
9615@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9616 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9617 if (info->plt_initialized == 0) {
9618 unsigned int *ip;
9619- ip = me->module_core + me->arch.plt_offset +
9620+ ip = me->module_core_rx + me->arch.plt_offset +
9621 info->plt_offset;
9622 #ifndef CONFIG_64BIT
9623 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9624@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9625 val - loc + 0xffffUL < 0x1ffffeUL) ||
9626 (r_type == R_390_PLT32DBL &&
9627 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9628- val = (Elf_Addr) me->module_core +
9629+ val = (Elf_Addr) me->module_core_rx +
9630 me->arch.plt_offset +
9631 info->plt_offset;
9632 val += rela->r_addend - loc;
9633@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9634 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9635 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9636 val = val + rela->r_addend -
9637- ((Elf_Addr) me->module_core + me->arch.got_offset);
9638+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9639 if (r_type == R_390_GOTOFF16)
9640 rc = apply_rela_bits(loc, val, 0, 16, 0);
9641 else if (r_type == R_390_GOTOFF32)
9642@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9643 break;
9644 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9645 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9646- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9647+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9648 rela->r_addend - loc;
9649 if (r_type == R_390_GOTPC)
9650 rc = apply_rela_bits(loc, val, 1, 32, 0);
9651diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9652index aa7a839..6c2a916 100644
9653--- a/arch/s390/kernel/process.c
9654+++ b/arch/s390/kernel/process.c
9655@@ -219,37 +219,3 @@ unsigned long get_wchan(struct task_struct *p)
9656 }
9657 return 0;
9658 }
9659-
9660-unsigned long arch_align_stack(unsigned long sp)
9661-{
9662- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9663- sp -= get_random_int() & ~PAGE_MASK;
9664- return sp & ~0xf;
9665-}
9666-
9667-static inline unsigned long brk_rnd(void)
9668-{
9669- /* 8MB for 32bit, 1GB for 64bit */
9670- if (is_32bit_task())
9671- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9672- else
9673- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9674-}
9675-
9676-unsigned long arch_randomize_brk(struct mm_struct *mm)
9677-{
9678- unsigned long ret;
9679-
9680- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9681- return (ret > mm->brk) ? ret : mm->brk;
9682-}
9683-
9684-unsigned long randomize_et_dyn(unsigned long base)
9685-{
9686- unsigned long ret;
9687-
9688- if (!(current->flags & PF_RANDOMIZE))
9689- return base;
9690- ret = PAGE_ALIGN(base + brk_rnd());
9691- return (ret > base) ? ret : base;
9692-}
9693diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9694index 9b436c2..54fbf0a 100644
9695--- a/arch/s390/mm/mmap.c
9696+++ b/arch/s390/mm/mmap.c
9697@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9698 */
9699 if (mmap_is_legacy()) {
9700 mm->mmap_base = mmap_base_legacy();
9701+
9702+#ifdef CONFIG_PAX_RANDMMAP
9703+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9704+ mm->mmap_base += mm->delta_mmap;
9705+#endif
9706+
9707 mm->get_unmapped_area = arch_get_unmapped_area;
9708 } else {
9709 mm->mmap_base = mmap_base();
9710+
9711+#ifdef CONFIG_PAX_RANDMMAP
9712+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9713+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9714+#endif
9715+
9716 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9717 }
9718 }
9719@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9720 */
9721 if (mmap_is_legacy()) {
9722 mm->mmap_base = mmap_base_legacy();
9723+
9724+#ifdef CONFIG_PAX_RANDMMAP
9725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9726+ mm->mmap_base += mm->delta_mmap;
9727+#endif
9728+
9729 mm->get_unmapped_area = s390_get_unmapped_area;
9730 } else {
9731 mm->mmap_base = mmap_base();
9732+
9733+#ifdef CONFIG_PAX_RANDMMAP
9734+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9735+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9736+#endif
9737+
9738 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9739 }
9740 }
9741diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9742index ae3d59f..f65f075 100644
9743--- a/arch/score/include/asm/cache.h
9744+++ b/arch/score/include/asm/cache.h
9745@@ -1,7 +1,9 @@
9746 #ifndef _ASM_SCORE_CACHE_H
9747 #define _ASM_SCORE_CACHE_H
9748
9749+#include <linux/const.h>
9750+
9751 #define L1_CACHE_SHIFT 4
9752-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9753+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9754
9755 #endif /* _ASM_SCORE_CACHE_H */
9756diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9757index f9f3cd5..58ff438 100644
9758--- a/arch/score/include/asm/exec.h
9759+++ b/arch/score/include/asm/exec.h
9760@@ -1,6 +1,6 @@
9761 #ifndef _ASM_SCORE_EXEC_H
9762 #define _ASM_SCORE_EXEC_H
9763
9764-extern unsigned long arch_align_stack(unsigned long sp);
9765+#define arch_align_stack(x) (x)
9766
9767 #endif /* _ASM_SCORE_EXEC_H */
9768diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9769index a1519ad3..e8ac1ff 100644
9770--- a/arch/score/kernel/process.c
9771+++ b/arch/score/kernel/process.c
9772@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9773
9774 return task_pt_regs(task)->cp0_epc;
9775 }
9776-
9777-unsigned long arch_align_stack(unsigned long sp)
9778-{
9779- return sp;
9780-}
9781diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9782index ef9e555..331bd29 100644
9783--- a/arch/sh/include/asm/cache.h
9784+++ b/arch/sh/include/asm/cache.h
9785@@ -9,10 +9,11 @@
9786 #define __ASM_SH_CACHE_H
9787 #ifdef __KERNEL__
9788
9789+#include <linux/const.h>
9790 #include <linux/init.h>
9791 #include <cpu/cache.h>
9792
9793-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9794+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9795
9796 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9797
9798diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9799index 6777177..cb5e44f 100644
9800--- a/arch/sh/mm/mmap.c
9801+++ b/arch/sh/mm/mmap.c
9802@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9803 struct mm_struct *mm = current->mm;
9804 struct vm_area_struct *vma;
9805 int do_colour_align;
9806+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9807 struct vm_unmapped_area_info info;
9808
9809 if (flags & MAP_FIXED) {
9810@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9811 if (filp || (flags & MAP_SHARED))
9812 do_colour_align = 1;
9813
9814+#ifdef CONFIG_PAX_RANDMMAP
9815+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9816+#endif
9817+
9818 if (addr) {
9819 if (do_colour_align)
9820 addr = COLOUR_ALIGN(addr, pgoff);
9821@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9822 addr = PAGE_ALIGN(addr);
9823
9824 vma = find_vma(mm, addr);
9825- if (TASK_SIZE - len >= addr &&
9826- (!vma || addr + len <= vma->vm_start))
9827+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9828 return addr;
9829 }
9830
9831 info.flags = 0;
9832 info.length = len;
9833- info.low_limit = TASK_UNMAPPED_BASE;
9834+ info.low_limit = mm->mmap_base;
9835 info.high_limit = TASK_SIZE;
9836 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9837 info.align_offset = pgoff << PAGE_SHIFT;
9838@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9839 struct mm_struct *mm = current->mm;
9840 unsigned long addr = addr0;
9841 int do_colour_align;
9842+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9843 struct vm_unmapped_area_info info;
9844
9845 if (flags & MAP_FIXED) {
9846@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9847 if (filp || (flags & MAP_SHARED))
9848 do_colour_align = 1;
9849
9850+#ifdef CONFIG_PAX_RANDMMAP
9851+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9852+#endif
9853+
9854 /* requesting a specific address */
9855 if (addr) {
9856 if (do_colour_align)
9857@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9858 addr = PAGE_ALIGN(addr);
9859
9860 vma = find_vma(mm, addr);
9861- if (TASK_SIZE - len >= addr &&
9862- (!vma || addr + len <= vma->vm_start))
9863+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9864 return addr;
9865 }
9866
9867@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9868 VM_BUG_ON(addr != -ENOMEM);
9869 info.flags = 0;
9870 info.low_limit = TASK_UNMAPPED_BASE;
9871+
9872+#ifdef CONFIG_PAX_RANDMMAP
9873+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9874+ info.low_limit += mm->delta_mmap;
9875+#endif
9876+
9877 info.high_limit = TASK_SIZE;
9878 addr = vm_unmapped_area(&info);
9879 }
9880diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9881index 4082749..fd97781 100644
9882--- a/arch/sparc/include/asm/atomic_64.h
9883+++ b/arch/sparc/include/asm/atomic_64.h
9884@@ -15,18 +15,38 @@
9885 #define ATOMIC64_INIT(i) { (i) }
9886
9887 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9888+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9889+{
9890+ return ACCESS_ONCE(v->counter);
9891+}
9892 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9893+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9894+{
9895+ return ACCESS_ONCE(v->counter);
9896+}
9897
9898 #define atomic_set(v, i) (((v)->counter) = i)
9899+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9900+{
9901+ v->counter = i;
9902+}
9903 #define atomic64_set(v, i) (((v)->counter) = i)
9904+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9905+{
9906+ v->counter = i;
9907+}
9908
9909-#define ATOMIC_OP(op) \
9910-void atomic_##op(int, atomic_t *); \
9911-void atomic64_##op(long, atomic64_t *);
9912+#define __ATOMIC_OP(op, suffix) \
9913+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9914+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9915
9916-#define ATOMIC_OP_RETURN(op) \
9917-int atomic_##op##_return(int, atomic_t *); \
9918-long atomic64_##op##_return(long, atomic64_t *);
9919+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9920+
9921+#define __ATOMIC_OP_RETURN(op, suffix) \
9922+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9923+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9924+
9925+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9926
9927 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9928
9929@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9930
9931 #undef ATOMIC_OPS
9932 #undef ATOMIC_OP_RETURN
9933+#undef __ATOMIC_OP_RETURN
9934 #undef ATOMIC_OP
9935+#undef __ATOMIC_OP
9936
9937 #define atomic_dec_return(v) atomic_sub_return(1, v)
9938 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9939
9940 #define atomic_inc_return(v) atomic_add_return(1, v)
9941+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9942+{
9943+ return atomic_add_return_unchecked(1, v);
9944+}
9945 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9946+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9947+{
9948+ return atomic64_add_return_unchecked(1, v);
9949+}
9950
9951 /*
9952 * atomic_inc_and_test - increment and test
9953@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9954 * other cases.
9955 */
9956 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9957+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9958+{
9959+ return atomic_inc_return_unchecked(v) == 0;
9960+}
9961 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9962
9963 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9964@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9965 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9966
9967 #define atomic_inc(v) atomic_add(1, v)
9968+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9969+{
9970+ atomic_add_unchecked(1, v);
9971+}
9972 #define atomic64_inc(v) atomic64_add(1, v)
9973+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9974+{
9975+ atomic64_add_unchecked(1, v);
9976+}
9977
9978 #define atomic_dec(v) atomic_sub(1, v)
9979+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9980+{
9981+ atomic_sub_unchecked(1, v);
9982+}
9983 #define atomic64_dec(v) atomic64_sub(1, v)
9984+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9985+{
9986+ atomic64_sub_unchecked(1, v);
9987+}
9988
9989 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9990 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9991
9992 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9993+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9994+{
9995+ return cmpxchg(&v->counter, old, new);
9996+}
9997 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9998+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9999+{
10000+ return xchg(&v->counter, new);
10001+}
10002
10003 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10004 {
10005- int c, old;
10006+ int c, old, new;
10007 c = atomic_read(v);
10008 for (;;) {
10009- if (unlikely(c == (u)))
10010+ if (unlikely(c == u))
10011 break;
10012- old = atomic_cmpxchg((v), c, c + (a));
10013+
10014+ asm volatile("addcc %2, %0, %0\n"
10015+
10016+#ifdef CONFIG_PAX_REFCOUNT
10017+ "tvs %%icc, 6\n"
10018+#endif
10019+
10020+ : "=r" (new)
10021+ : "0" (c), "ir" (a)
10022+ : "cc");
10023+
10024+ old = atomic_cmpxchg(v, c, new);
10025 if (likely(old == c))
10026 break;
10027 c = old;
10028@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10029 #define atomic64_cmpxchg(v, o, n) \
10030 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10031 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10032+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10033+{
10034+ return xchg(&v->counter, new);
10035+}
10036
10037 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10038 {
10039- long c, old;
10040+ long c, old, new;
10041 c = atomic64_read(v);
10042 for (;;) {
10043- if (unlikely(c == (u)))
10044+ if (unlikely(c == u))
10045 break;
10046- old = atomic64_cmpxchg((v), c, c + (a));
10047+
10048+ asm volatile("addcc %2, %0, %0\n"
10049+
10050+#ifdef CONFIG_PAX_REFCOUNT
10051+ "tvs %%xcc, 6\n"
10052+#endif
10053+
10054+ : "=r" (new)
10055+ : "0" (c), "ir" (a)
10056+ : "cc");
10057+
10058+ old = atomic64_cmpxchg(v, c, new);
10059 if (likely(old == c))
10060 break;
10061 c = old;
10062 }
10063- return c != (u);
10064+ return c != u;
10065 }
10066
10067 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10068diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10069index 7664894..45a974b 100644
10070--- a/arch/sparc/include/asm/barrier_64.h
10071+++ b/arch/sparc/include/asm/barrier_64.h
10072@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10073 do { \
10074 compiletime_assert_atomic_type(*p); \
10075 barrier(); \
10076- ACCESS_ONCE(*p) = (v); \
10077+ ACCESS_ONCE_RW(*p) = (v); \
10078 } while (0)
10079
10080 #define smp_load_acquire(p) \
10081diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10082index 5bb6991..5c2132e 100644
10083--- a/arch/sparc/include/asm/cache.h
10084+++ b/arch/sparc/include/asm/cache.h
10085@@ -7,10 +7,12 @@
10086 #ifndef _SPARC_CACHE_H
10087 #define _SPARC_CACHE_H
10088
10089+#include <linux/const.h>
10090+
10091 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10092
10093 #define L1_CACHE_SHIFT 5
10094-#define L1_CACHE_BYTES 32
10095+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10096
10097 #ifdef CONFIG_SPARC32
10098 #define SMP_CACHE_BYTES_SHIFT 5
10099diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10100index a24e41f..47677ff 100644
10101--- a/arch/sparc/include/asm/elf_32.h
10102+++ b/arch/sparc/include/asm/elf_32.h
10103@@ -114,6 +114,13 @@ typedef struct {
10104
10105 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10106
10107+#ifdef CONFIG_PAX_ASLR
10108+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10109+
10110+#define PAX_DELTA_MMAP_LEN 16
10111+#define PAX_DELTA_STACK_LEN 16
10112+#endif
10113+
10114 /* This yields a mask that user programs can use to figure out what
10115 instruction set this cpu supports. This can NOT be done in userspace
10116 on Sparc. */
10117diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10118index 370ca1e..d4f4a98 100644
10119--- a/arch/sparc/include/asm/elf_64.h
10120+++ b/arch/sparc/include/asm/elf_64.h
10121@@ -189,6 +189,13 @@ typedef struct {
10122 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10123 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10124
10125+#ifdef CONFIG_PAX_ASLR
10126+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10127+
10128+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10129+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10130+#endif
10131+
10132 extern unsigned long sparc64_elf_hwcap;
10133 #define ELF_HWCAP sparc64_elf_hwcap
10134
10135diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10136index a3890da..f6a408e 100644
10137--- a/arch/sparc/include/asm/pgalloc_32.h
10138+++ b/arch/sparc/include/asm/pgalloc_32.h
10139@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10140 }
10141
10142 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10143+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10144
10145 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10146 unsigned long address)
10147diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10148index 5e31871..13469c6 100644
10149--- a/arch/sparc/include/asm/pgalloc_64.h
10150+++ b/arch/sparc/include/asm/pgalloc_64.h
10151@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10152 }
10153
10154 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10155+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10156
10157 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10158 {
10159@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10160 }
10161
10162 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10163+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10164
10165 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10166 {
10167diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10168index 59ba6f6..4518128 100644
10169--- a/arch/sparc/include/asm/pgtable.h
10170+++ b/arch/sparc/include/asm/pgtable.h
10171@@ -5,4 +5,8 @@
10172 #else
10173 #include <asm/pgtable_32.h>
10174 #endif
10175+
10176+#define ktla_ktva(addr) (addr)
10177+#define ktva_ktla(addr) (addr)
10178+
10179 #endif
10180diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10181index b9b91ae..950b91e 100644
10182--- a/arch/sparc/include/asm/pgtable_32.h
10183+++ b/arch/sparc/include/asm/pgtable_32.h
10184@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10185 #define PAGE_SHARED SRMMU_PAGE_SHARED
10186 #define PAGE_COPY SRMMU_PAGE_COPY
10187 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10188+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10189+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10190+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10191 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10192
10193 /* Top-level page directory - dummy used by init-mm.
10194@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10195
10196 /* xwr */
10197 #define __P000 PAGE_NONE
10198-#define __P001 PAGE_READONLY
10199-#define __P010 PAGE_COPY
10200-#define __P011 PAGE_COPY
10201+#define __P001 PAGE_READONLY_NOEXEC
10202+#define __P010 PAGE_COPY_NOEXEC
10203+#define __P011 PAGE_COPY_NOEXEC
10204 #define __P100 PAGE_READONLY
10205 #define __P101 PAGE_READONLY
10206 #define __P110 PAGE_COPY
10207 #define __P111 PAGE_COPY
10208
10209 #define __S000 PAGE_NONE
10210-#define __S001 PAGE_READONLY
10211-#define __S010 PAGE_SHARED
10212-#define __S011 PAGE_SHARED
10213+#define __S001 PAGE_READONLY_NOEXEC
10214+#define __S010 PAGE_SHARED_NOEXEC
10215+#define __S011 PAGE_SHARED_NOEXEC
10216 #define __S100 PAGE_READONLY
10217 #define __S101 PAGE_READONLY
10218 #define __S110 PAGE_SHARED
10219diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10220index 79da178..c2eede8 100644
10221--- a/arch/sparc/include/asm/pgtsrmmu.h
10222+++ b/arch/sparc/include/asm/pgtsrmmu.h
10223@@ -115,6 +115,11 @@
10224 SRMMU_EXEC | SRMMU_REF)
10225 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10226 SRMMU_EXEC | SRMMU_REF)
10227+
10228+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10229+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10230+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10231+
10232 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10233 SRMMU_DIRTY | SRMMU_REF)
10234
10235diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10236index 29d64b1..4272fe8 100644
10237--- a/arch/sparc/include/asm/setup.h
10238+++ b/arch/sparc/include/asm/setup.h
10239@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10240 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10241
10242 /* init_64.c */
10243-extern atomic_t dcpage_flushes;
10244-extern atomic_t dcpage_flushes_xcall;
10245+extern atomic_unchecked_t dcpage_flushes;
10246+extern atomic_unchecked_t dcpage_flushes_xcall;
10247
10248 extern int sysctl_tsb_ratio;
10249 #endif
10250diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10251index 9689176..63c18ea 100644
10252--- a/arch/sparc/include/asm/spinlock_64.h
10253+++ b/arch/sparc/include/asm/spinlock_64.h
10254@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10255
10256 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10257
10258-static void inline arch_read_lock(arch_rwlock_t *lock)
10259+static inline void arch_read_lock(arch_rwlock_t *lock)
10260 {
10261 unsigned long tmp1, tmp2;
10262
10263 __asm__ __volatile__ (
10264 "1: ldsw [%2], %0\n"
10265 " brlz,pn %0, 2f\n"
10266-"4: add %0, 1, %1\n"
10267+"4: addcc %0, 1, %1\n"
10268+
10269+#ifdef CONFIG_PAX_REFCOUNT
10270+" tvs %%icc, 6\n"
10271+#endif
10272+
10273 " cas [%2], %0, %1\n"
10274 " cmp %0, %1\n"
10275 " bne,pn %%icc, 1b\n"
10276@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10277 " .previous"
10278 : "=&r" (tmp1), "=&r" (tmp2)
10279 : "r" (lock)
10280- : "memory");
10281+ : "memory", "cc");
10282 }
10283
10284-static int inline arch_read_trylock(arch_rwlock_t *lock)
10285+static inline int arch_read_trylock(arch_rwlock_t *lock)
10286 {
10287 int tmp1, tmp2;
10288
10289@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10290 "1: ldsw [%2], %0\n"
10291 " brlz,a,pn %0, 2f\n"
10292 " mov 0, %0\n"
10293-" add %0, 1, %1\n"
10294+" addcc %0, 1, %1\n"
10295+
10296+#ifdef CONFIG_PAX_REFCOUNT
10297+" tvs %%icc, 6\n"
10298+#endif
10299+
10300 " cas [%2], %0, %1\n"
10301 " cmp %0, %1\n"
10302 " bne,pn %%icc, 1b\n"
10303@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10304 return tmp1;
10305 }
10306
10307-static void inline arch_read_unlock(arch_rwlock_t *lock)
10308+static inline void arch_read_unlock(arch_rwlock_t *lock)
10309 {
10310 unsigned long tmp1, tmp2;
10311
10312 __asm__ __volatile__(
10313 "1: lduw [%2], %0\n"
10314-" sub %0, 1, %1\n"
10315+" subcc %0, 1, %1\n"
10316+
10317+#ifdef CONFIG_PAX_REFCOUNT
10318+" tvs %%icc, 6\n"
10319+#endif
10320+
10321 " cas [%2], %0, %1\n"
10322 " cmp %0, %1\n"
10323 " bne,pn %%xcc, 1b\n"
10324@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10325 : "memory");
10326 }
10327
10328-static void inline arch_write_lock(arch_rwlock_t *lock)
10329+static inline void arch_write_lock(arch_rwlock_t *lock)
10330 {
10331 unsigned long mask, tmp1, tmp2;
10332
10333@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10334 : "memory");
10335 }
10336
10337-static void inline arch_write_unlock(arch_rwlock_t *lock)
10338+static inline void arch_write_unlock(arch_rwlock_t *lock)
10339 {
10340 __asm__ __volatile__(
10341 " stw %%g0, [%0]"
10342@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10343 : "memory");
10344 }
10345
10346-static int inline arch_write_trylock(arch_rwlock_t *lock)
10347+static inline int arch_write_trylock(arch_rwlock_t *lock)
10348 {
10349 unsigned long mask, tmp1, tmp2, result;
10350
10351diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10352index 025c984..a216504 100644
10353--- a/arch/sparc/include/asm/thread_info_32.h
10354+++ b/arch/sparc/include/asm/thread_info_32.h
10355@@ -49,6 +49,8 @@ struct thread_info {
10356 unsigned long w_saved;
10357
10358 struct restart_block restart_block;
10359+
10360+ unsigned long lowest_stack;
10361 };
10362
10363 /*
10364diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10365index 798f027..b009941 100644
10366--- a/arch/sparc/include/asm/thread_info_64.h
10367+++ b/arch/sparc/include/asm/thread_info_64.h
10368@@ -63,6 +63,8 @@ struct thread_info {
10369 struct pt_regs *kern_una_regs;
10370 unsigned int kern_una_insn;
10371
10372+ unsigned long lowest_stack;
10373+
10374 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10375 __attribute__ ((aligned(64)));
10376 };
10377@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10378 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10379 /* flag bit 4 is available */
10380 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10381-/* flag bit 6 is available */
10382+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10383 #define TIF_32BIT 7 /* 32-bit binary */
10384 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10385 #define TIF_SECCOMP 9 /* secure computing */
10386 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10387 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10388+
10389 /* NOTE: Thread flags >= 12 should be ones we have no interest
10390 * in using in assembly, else we can't use the mask as
10391 * an immediate value in instructions such as andcc.
10392@@ -215,12 +218,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10393 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10394 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10395 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10396+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10397
10398 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10399 _TIF_DO_NOTIFY_RESUME_MASK | \
10400 _TIF_NEED_RESCHED)
10401 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10402
10403+#define _TIF_WORK_SYSCALL \
10404+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10405+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10406+
10407 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10408
10409 /*
10410diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10411index bd56c28..4b63d83 100644
10412--- a/arch/sparc/include/asm/uaccess.h
10413+++ b/arch/sparc/include/asm/uaccess.h
10414@@ -1,5 +1,6 @@
10415 #ifndef ___ASM_SPARC_UACCESS_H
10416 #define ___ASM_SPARC_UACCESS_H
10417+
10418 #if defined(__sparc__) && defined(__arch64__)
10419 #include <asm/uaccess_64.h>
10420 #else
10421diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10422index 9634d08..f55fe4f 100644
10423--- a/arch/sparc/include/asm/uaccess_32.h
10424+++ b/arch/sparc/include/asm/uaccess_32.h
10425@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10426
10427 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10428 {
10429- if (n && __access_ok((unsigned long) to, n))
10430+ if ((long)n < 0)
10431+ return n;
10432+
10433+ if (n && __access_ok((unsigned long) to, n)) {
10434+ if (!__builtin_constant_p(n))
10435+ check_object_size(from, n, true);
10436 return __copy_user(to, (__force void __user *) from, n);
10437- else
10438+ } else
10439 return n;
10440 }
10441
10442 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10443 {
10444+ if ((long)n < 0)
10445+ return n;
10446+
10447+ if (!__builtin_constant_p(n))
10448+ check_object_size(from, n, true);
10449+
10450 return __copy_user(to, (__force void __user *) from, n);
10451 }
10452
10453 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10454 {
10455- if (n && __access_ok((unsigned long) from, n))
10456+ if ((long)n < 0)
10457+ return n;
10458+
10459+ if (n && __access_ok((unsigned long) from, n)) {
10460+ if (!__builtin_constant_p(n))
10461+ check_object_size(to, n, false);
10462 return __copy_user((__force void __user *) to, from, n);
10463- else
10464+ } else
10465 return n;
10466 }
10467
10468 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10469 {
10470+ if ((long)n < 0)
10471+ return n;
10472+
10473 return __copy_user((__force void __user *) to, from, n);
10474 }
10475
10476diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10477index c990a5e..f17b9c1 100644
10478--- a/arch/sparc/include/asm/uaccess_64.h
10479+++ b/arch/sparc/include/asm/uaccess_64.h
10480@@ -10,6 +10,7 @@
10481 #include <linux/compiler.h>
10482 #include <linux/string.h>
10483 #include <linux/thread_info.h>
10484+#include <linux/kernel.h>
10485 #include <asm/asi.h>
10486 #include <asm/spitfire.h>
10487 #include <asm-generic/uaccess-unaligned.h>
10488@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10489 static inline unsigned long __must_check
10490 copy_from_user(void *to, const void __user *from, unsigned long size)
10491 {
10492- unsigned long ret = ___copy_from_user(to, from, size);
10493+ unsigned long ret;
10494
10495+ if ((long)size < 0 || size > INT_MAX)
10496+ return size;
10497+
10498+ if (!__builtin_constant_p(size))
10499+ check_object_size(to, size, false);
10500+
10501+ ret = ___copy_from_user(to, from, size);
10502 if (unlikely(ret))
10503 ret = copy_from_user_fixup(to, from, size);
10504
10505@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10506 static inline unsigned long __must_check
10507 copy_to_user(void __user *to, const void *from, unsigned long size)
10508 {
10509- unsigned long ret = ___copy_to_user(to, from, size);
10510+ unsigned long ret;
10511
10512+ if ((long)size < 0 || size > INT_MAX)
10513+ return size;
10514+
10515+ if (!__builtin_constant_p(size))
10516+ check_object_size(from, size, true);
10517+
10518+ ret = ___copy_to_user(to, from, size);
10519 if (unlikely(ret))
10520 ret = copy_to_user_fixup(to, from, size);
10521 return ret;
10522diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10523index 7cf9c6e..6206648 100644
10524--- a/arch/sparc/kernel/Makefile
10525+++ b/arch/sparc/kernel/Makefile
10526@@ -4,7 +4,7 @@
10527 #
10528
10529 asflags-y := -ansi
10530-ccflags-y := -Werror
10531+#ccflags-y := -Werror
10532
10533 extra-y := head_$(BITS).o
10534
10535diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10536index 50e7b62..79fae35 100644
10537--- a/arch/sparc/kernel/process_32.c
10538+++ b/arch/sparc/kernel/process_32.c
10539@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10540
10541 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10542 r->psr, r->pc, r->npc, r->y, print_tainted());
10543- printk("PC: <%pS>\n", (void *) r->pc);
10544+ printk("PC: <%pA>\n", (void *) r->pc);
10545 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10546 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10547 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10548 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10549 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10550 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10551- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10552+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10553
10554 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10555 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10556@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10557 rw = (struct reg_window32 *) fp;
10558 pc = rw->ins[7];
10559 printk("[%08lx : ", pc);
10560- printk("%pS ] ", (void *) pc);
10561+ printk("%pA ] ", (void *) pc);
10562 fp = rw->ins[6];
10563 } while (++count < 16);
10564 printk("\n");
10565diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10566index 0be7bf9..2b1cba8 100644
10567--- a/arch/sparc/kernel/process_64.c
10568+++ b/arch/sparc/kernel/process_64.c
10569@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10570 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10571 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10572 if (regs->tstate & TSTATE_PRIV)
10573- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10574+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10575 }
10576
10577 void show_regs(struct pt_regs *regs)
10578@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10579
10580 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10581 regs->tpc, regs->tnpc, regs->y, print_tainted());
10582- printk("TPC: <%pS>\n", (void *) regs->tpc);
10583+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10584 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10585 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10586 regs->u_regs[3]);
10587@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10588 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10589 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10590 regs->u_regs[15]);
10591- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10592+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10593 show_regwindow(regs);
10594 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10595 }
10596@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10597 ((tp && tp->task) ? tp->task->pid : -1));
10598
10599 if (gp->tstate & TSTATE_PRIV) {
10600- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10601+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10602 (void *) gp->tpc,
10603 (void *) gp->o7,
10604 (void *) gp->i7,
10605diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10606index 79cc0d1..ec62734 100644
10607--- a/arch/sparc/kernel/prom_common.c
10608+++ b/arch/sparc/kernel/prom_common.c
10609@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10610
10611 unsigned int prom_early_allocated __initdata;
10612
10613-static struct of_pdt_ops prom_sparc_ops __initdata = {
10614+static struct of_pdt_ops prom_sparc_ops __initconst = {
10615 .nextprop = prom_common_nextprop,
10616 .getproplen = prom_getproplen,
10617 .getproperty = prom_getproperty,
10618diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10619index 9ddc492..27a5619 100644
10620--- a/arch/sparc/kernel/ptrace_64.c
10621+++ b/arch/sparc/kernel/ptrace_64.c
10622@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10623 return ret;
10624 }
10625
10626+#ifdef CONFIG_GRKERNSEC_SETXID
10627+extern void gr_delayed_cred_worker(void);
10628+#endif
10629+
10630 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10631 {
10632 int ret = 0;
10633@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10634 if (test_thread_flag(TIF_NOHZ))
10635 user_exit();
10636
10637+#ifdef CONFIG_GRKERNSEC_SETXID
10638+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10639+ gr_delayed_cred_worker();
10640+#endif
10641+
10642 if (test_thread_flag(TIF_SYSCALL_TRACE))
10643 ret = tracehook_report_syscall_entry(regs);
10644
10645@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10646 if (test_thread_flag(TIF_NOHZ))
10647 user_exit();
10648
10649+#ifdef CONFIG_GRKERNSEC_SETXID
10650+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10651+ gr_delayed_cred_worker();
10652+#endif
10653+
10654 audit_syscall_exit(regs);
10655
10656 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10657diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10658index da6f1a7..e5dea8f 100644
10659--- a/arch/sparc/kernel/smp_64.c
10660+++ b/arch/sparc/kernel/smp_64.c
10661@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10662 return;
10663
10664 #ifdef CONFIG_DEBUG_DCFLUSH
10665- atomic_inc(&dcpage_flushes);
10666+ atomic_inc_unchecked(&dcpage_flushes);
10667 #endif
10668
10669 this_cpu = get_cpu();
10670@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10671 xcall_deliver(data0, __pa(pg_addr),
10672 (u64) pg_addr, cpumask_of(cpu));
10673 #ifdef CONFIG_DEBUG_DCFLUSH
10674- atomic_inc(&dcpage_flushes_xcall);
10675+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10676 #endif
10677 }
10678 }
10679@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10680 preempt_disable();
10681
10682 #ifdef CONFIG_DEBUG_DCFLUSH
10683- atomic_inc(&dcpage_flushes);
10684+ atomic_inc_unchecked(&dcpage_flushes);
10685 #endif
10686 data0 = 0;
10687 pg_addr = page_address(page);
10688@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10689 xcall_deliver(data0, __pa(pg_addr),
10690 (u64) pg_addr, cpu_online_mask);
10691 #ifdef CONFIG_DEBUG_DCFLUSH
10692- atomic_inc(&dcpage_flushes_xcall);
10693+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10694 #endif
10695 }
10696 __local_flush_dcache_page(page);
10697diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10698index 646988d..b88905f 100644
10699--- a/arch/sparc/kernel/sys_sparc_32.c
10700+++ b/arch/sparc/kernel/sys_sparc_32.c
10701@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10702 if (len > TASK_SIZE - PAGE_SIZE)
10703 return -ENOMEM;
10704 if (!addr)
10705- addr = TASK_UNMAPPED_BASE;
10706+ addr = current->mm->mmap_base;
10707
10708 info.flags = 0;
10709 info.length = len;
10710diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10711index c85403d..6af95c9 100644
10712--- a/arch/sparc/kernel/sys_sparc_64.c
10713+++ b/arch/sparc/kernel/sys_sparc_64.c
10714@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10715 struct vm_area_struct * vma;
10716 unsigned long task_size = TASK_SIZE;
10717 int do_color_align;
10718+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10719 struct vm_unmapped_area_info info;
10720
10721 if (flags & MAP_FIXED) {
10722 /* We do not accept a shared mapping if it would violate
10723 * cache aliasing constraints.
10724 */
10725- if ((flags & MAP_SHARED) &&
10726+ if ((filp || (flags & MAP_SHARED)) &&
10727 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10728 return -EINVAL;
10729 return addr;
10730@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10731 if (filp || (flags & MAP_SHARED))
10732 do_color_align = 1;
10733
10734+#ifdef CONFIG_PAX_RANDMMAP
10735+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10736+#endif
10737+
10738 if (addr) {
10739 if (do_color_align)
10740 addr = COLOR_ALIGN(addr, pgoff);
10741@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10742 addr = PAGE_ALIGN(addr);
10743
10744 vma = find_vma(mm, addr);
10745- if (task_size - len >= addr &&
10746- (!vma || addr + len <= vma->vm_start))
10747+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10748 return addr;
10749 }
10750
10751 info.flags = 0;
10752 info.length = len;
10753- info.low_limit = TASK_UNMAPPED_BASE;
10754+ info.low_limit = mm->mmap_base;
10755 info.high_limit = min(task_size, VA_EXCLUDE_START);
10756 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10757 info.align_offset = pgoff << PAGE_SHIFT;
10758+ info.threadstack_offset = offset;
10759 addr = vm_unmapped_area(&info);
10760
10761 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10762 VM_BUG_ON(addr != -ENOMEM);
10763 info.low_limit = VA_EXCLUDE_END;
10764+
10765+#ifdef CONFIG_PAX_RANDMMAP
10766+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10767+ info.low_limit += mm->delta_mmap;
10768+#endif
10769+
10770 info.high_limit = task_size;
10771 addr = vm_unmapped_area(&info);
10772 }
10773@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10774 unsigned long task_size = STACK_TOP32;
10775 unsigned long addr = addr0;
10776 int do_color_align;
10777+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10778 struct vm_unmapped_area_info info;
10779
10780 /* This should only ever run for 32-bit processes. */
10781@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10782 /* We do not accept a shared mapping if it would violate
10783 * cache aliasing constraints.
10784 */
10785- if ((flags & MAP_SHARED) &&
10786+ if ((filp || (flags & MAP_SHARED)) &&
10787 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10788 return -EINVAL;
10789 return addr;
10790@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10791 if (filp || (flags & MAP_SHARED))
10792 do_color_align = 1;
10793
10794+#ifdef CONFIG_PAX_RANDMMAP
10795+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10796+#endif
10797+
10798 /* requesting a specific address */
10799 if (addr) {
10800 if (do_color_align)
10801@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10802 addr = PAGE_ALIGN(addr);
10803
10804 vma = find_vma(mm, addr);
10805- if (task_size - len >= addr &&
10806- (!vma || addr + len <= vma->vm_start))
10807+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10808 return addr;
10809 }
10810
10811@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10812 info.high_limit = mm->mmap_base;
10813 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10814 info.align_offset = pgoff << PAGE_SHIFT;
10815+ info.threadstack_offset = offset;
10816 addr = vm_unmapped_area(&info);
10817
10818 /*
10819@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10820 VM_BUG_ON(addr != -ENOMEM);
10821 info.flags = 0;
10822 info.low_limit = TASK_UNMAPPED_BASE;
10823+
10824+#ifdef CONFIG_PAX_RANDMMAP
10825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10826+ info.low_limit += mm->delta_mmap;
10827+#endif
10828+
10829 info.high_limit = STACK_TOP32;
10830 addr = vm_unmapped_area(&info);
10831 }
10832@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10833 EXPORT_SYMBOL(get_fb_unmapped_area);
10834
10835 /* Essentially the same as PowerPC. */
10836-static unsigned long mmap_rnd(void)
10837+static unsigned long mmap_rnd(struct mm_struct *mm)
10838 {
10839 unsigned long rnd = 0UL;
10840
10841+#ifdef CONFIG_PAX_RANDMMAP
10842+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10843+#endif
10844+
10845 if (current->flags & PF_RANDOMIZE) {
10846 unsigned long val = get_random_int();
10847 if (test_thread_flag(TIF_32BIT))
10848@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10849
10850 void arch_pick_mmap_layout(struct mm_struct *mm)
10851 {
10852- unsigned long random_factor = mmap_rnd();
10853+ unsigned long random_factor = mmap_rnd(mm);
10854 unsigned long gap;
10855
10856 /*
10857@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10858 gap == RLIM_INFINITY ||
10859 sysctl_legacy_va_layout) {
10860 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10861+
10862+#ifdef CONFIG_PAX_RANDMMAP
10863+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10864+ mm->mmap_base += mm->delta_mmap;
10865+#endif
10866+
10867 mm->get_unmapped_area = arch_get_unmapped_area;
10868 } else {
10869 /* We know it's 32-bit */
10870@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10871 gap = (task_size / 6 * 5);
10872
10873 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10874+
10875+#ifdef CONFIG_PAX_RANDMMAP
10876+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10877+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10878+#endif
10879+
10880 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10881 }
10882 }
10883diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10884index bb00089..e0ea580 100644
10885--- a/arch/sparc/kernel/syscalls.S
10886+++ b/arch/sparc/kernel/syscalls.S
10887@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10888 #endif
10889 .align 32
10890 1: ldx [%g6 + TI_FLAGS], %l5
10891- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10892+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10893 be,pt %icc, rtrap
10894 nop
10895 call syscall_trace_leave
10896@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10897
10898 srl %i3, 0, %o3 ! IEU0
10899 srl %i2, 0, %o2 ! IEU0 Group
10900- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10901+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10902 bne,pn %icc, linux_syscall_trace32 ! CTI
10903 mov %i0, %l5 ! IEU1
10904 5: call %l7 ! CTI Group brk forced
10905@@ -218,7 +218,7 @@ linux_sparc_syscall:
10906
10907 mov %i3, %o3 ! IEU1
10908 mov %i4, %o4 ! IEU0 Group
10909- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10910+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10911 bne,pn %icc, linux_syscall_trace ! CTI Group
10912 mov %i0, %l5 ! IEU0
10913 2: call %l7 ! CTI Group brk forced
10914@@ -233,7 +233,7 @@ ret_sys_call:
10915
10916 cmp %o0, -ERESTART_RESTARTBLOCK
10917 bgeu,pn %xcc, 1f
10918- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10919+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10920 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10921
10922 2:
10923diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10924index 6fd386c5..6907d81 100644
10925--- a/arch/sparc/kernel/traps_32.c
10926+++ b/arch/sparc/kernel/traps_32.c
10927@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10928 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10929 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10930
10931+extern void gr_handle_kernel_exploit(void);
10932+
10933 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10934 {
10935 static int die_counter;
10936@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10937 count++ < 30 &&
10938 (((unsigned long) rw) >= PAGE_OFFSET) &&
10939 !(((unsigned long) rw) & 0x7)) {
10940- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10941+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10942 (void *) rw->ins[7]);
10943 rw = (struct reg_window32 *)rw->ins[6];
10944 }
10945 }
10946 printk("Instruction DUMP:");
10947 instruction_dump ((unsigned long *) regs->pc);
10948- if(regs->psr & PSR_PS)
10949+ if(regs->psr & PSR_PS) {
10950+ gr_handle_kernel_exploit();
10951 do_exit(SIGKILL);
10952+ }
10953 do_exit(SIGSEGV);
10954 }
10955
10956diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10957index 981a769..d906eda 100644
10958--- a/arch/sparc/kernel/traps_64.c
10959+++ b/arch/sparc/kernel/traps_64.c
10960@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10961 i + 1,
10962 p->trapstack[i].tstate, p->trapstack[i].tpc,
10963 p->trapstack[i].tnpc, p->trapstack[i].tt);
10964- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10965+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10966 }
10967 }
10968
10969@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10970
10971 lvl -= 0x100;
10972 if (regs->tstate & TSTATE_PRIV) {
10973+
10974+#ifdef CONFIG_PAX_REFCOUNT
10975+ if (lvl == 6)
10976+ pax_report_refcount_overflow(regs);
10977+#endif
10978+
10979 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10980 die_if_kernel(buffer, regs);
10981 }
10982@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10983 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10984 {
10985 char buffer[32];
10986-
10987+
10988 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10989 0, lvl, SIGTRAP) == NOTIFY_STOP)
10990 return;
10991
10992+#ifdef CONFIG_PAX_REFCOUNT
10993+ if (lvl == 6)
10994+ pax_report_refcount_overflow(regs);
10995+#endif
10996+
10997 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10998
10999 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11000@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11001 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11002 printk("%s" "ERROR(%d): ",
11003 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11004- printk("TPC<%pS>\n", (void *) regs->tpc);
11005+ printk("TPC<%pA>\n", (void *) regs->tpc);
11006 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11007 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11008 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11009@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11010 smp_processor_id(),
11011 (type & 0x1) ? 'I' : 'D',
11012 regs->tpc);
11013- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11014+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11015 panic("Irrecoverable Cheetah+ parity error.");
11016 }
11017
11018@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11019 smp_processor_id(),
11020 (type & 0x1) ? 'I' : 'D',
11021 regs->tpc);
11022- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11023+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11024 }
11025
11026 struct sun4v_error_entry {
11027@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11028 /*0x38*/u64 reserved_5;
11029 };
11030
11031-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11032-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11033+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11034+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11035
11036 static const char *sun4v_err_type_to_str(u8 type)
11037 {
11038@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11039 }
11040
11041 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11042- int cpu, const char *pfx, atomic_t *ocnt)
11043+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11044 {
11045 u64 *raw_ptr = (u64 *) ent;
11046 u32 attrs;
11047@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11048
11049 show_regs(regs);
11050
11051- if ((cnt = atomic_read(ocnt)) != 0) {
11052- atomic_set(ocnt, 0);
11053+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11054+ atomic_set_unchecked(ocnt, 0);
11055 wmb();
11056 printk("%s: Queue overflowed %d times.\n",
11057 pfx, cnt);
11058@@ -2048,7 +2059,7 @@ out:
11059 */
11060 void sun4v_resum_overflow(struct pt_regs *regs)
11061 {
11062- atomic_inc(&sun4v_resum_oflow_cnt);
11063+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11064 }
11065
11066 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11067@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11068 /* XXX Actually even this can make not that much sense. Perhaps
11069 * XXX we should just pull the plug and panic directly from here?
11070 */
11071- atomic_inc(&sun4v_nonresum_oflow_cnt);
11072+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11073 }
11074
11075 static void sun4v_tlb_error(struct pt_regs *regs)
11076@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11077
11078 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11079 regs->tpc, tl);
11080- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11081+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11082 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11083- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11084+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11085 (void *) regs->u_regs[UREG_I7]);
11086 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11087 "pte[%lx] error[%lx]\n",
11088@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11089
11090 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11091 regs->tpc, tl);
11092- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11093+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11094 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11095- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11096+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11097 (void *) regs->u_regs[UREG_I7]);
11098 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11099 "pte[%lx] error[%lx]\n",
11100@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11101 fp = (unsigned long)sf->fp + STACK_BIAS;
11102 }
11103
11104- printk(" [%016lx] %pS\n", pc, (void *) pc);
11105+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11106 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11107 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11108 int index = tsk->curr_ret_stack;
11109 if (tsk->ret_stack && index >= graph) {
11110 pc = tsk->ret_stack[index - graph].ret;
11111- printk(" [%016lx] %pS\n", pc, (void *) pc);
11112+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11113 graph++;
11114 }
11115 }
11116@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11117 return (struct reg_window *) (fp + STACK_BIAS);
11118 }
11119
11120+extern void gr_handle_kernel_exploit(void);
11121+
11122 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11123 {
11124 static int die_counter;
11125@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11126 while (rw &&
11127 count++ < 30 &&
11128 kstack_valid(tp, (unsigned long) rw)) {
11129- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11130+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11131 (void *) rw->ins[7]);
11132
11133 rw = kernel_stack_up(rw);
11134@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11135 }
11136 user_instruction_dump ((unsigned int __user *) regs->tpc);
11137 }
11138- if (regs->tstate & TSTATE_PRIV)
11139+ if (regs->tstate & TSTATE_PRIV) {
11140+ gr_handle_kernel_exploit();
11141 do_exit(SIGKILL);
11142+ }
11143 do_exit(SIGSEGV);
11144 }
11145 EXPORT_SYMBOL(die_if_kernel);
11146diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11147index 62098a8..547ab2c 100644
11148--- a/arch/sparc/kernel/unaligned_64.c
11149+++ b/arch/sparc/kernel/unaligned_64.c
11150@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11151 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11152
11153 if (__ratelimit(&ratelimit)) {
11154- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11155+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11156 regs->tpc, (void *) regs->tpc);
11157 }
11158 }
11159diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11160index 3269b02..64f5231 100644
11161--- a/arch/sparc/lib/Makefile
11162+++ b/arch/sparc/lib/Makefile
11163@@ -2,7 +2,7 @@
11164 #
11165
11166 asflags-y := -ansi -DST_DIV0=0x02
11167-ccflags-y := -Werror
11168+#ccflags-y := -Werror
11169
11170 lib-$(CONFIG_SPARC32) += ashrdi3.o
11171 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11172diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11173index 05dac43..76f8ed4 100644
11174--- a/arch/sparc/lib/atomic_64.S
11175+++ b/arch/sparc/lib/atomic_64.S
11176@@ -15,11 +15,22 @@
11177 * a value and does the barriers.
11178 */
11179
11180-#define ATOMIC_OP(op) \
11181-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11182+#ifdef CONFIG_PAX_REFCOUNT
11183+#define __REFCOUNT_OP(op) op##cc
11184+#define __OVERFLOW_IOP tvs %icc, 6;
11185+#define __OVERFLOW_XOP tvs %xcc, 6;
11186+#else
11187+#define __REFCOUNT_OP(op) op
11188+#define __OVERFLOW_IOP
11189+#define __OVERFLOW_XOP
11190+#endif
11191+
11192+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11193+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11194 BACKOFF_SETUP(%o2); \
11195 1: lduw [%o1], %g1; \
11196- op %g1, %o0, %g7; \
11197+ asm_op %g1, %o0, %g7; \
11198+ post_op \
11199 cas [%o1], %g1, %g7; \
11200 cmp %g1, %g7; \
11201 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11202@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11203 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11204 ENDPROC(atomic_##op); \
11205
11206-#define ATOMIC_OP_RETURN(op) \
11207-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11208+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11209+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11210+
11211+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11212+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11213 BACKOFF_SETUP(%o2); \
11214 1: lduw [%o1], %g1; \
11215- op %g1, %o0, %g7; \
11216+ asm_op %g1, %o0, %g7; \
11217+ post_op \
11218 cas [%o1], %g1, %g7; \
11219 cmp %g1, %g7; \
11220 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11221@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11222 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11223 ENDPROC(atomic_##op##_return);
11224
11225+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11226+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11227+
11228 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11229
11230 ATOMIC_OPS(add)
11231@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11232
11233 #undef ATOMIC_OPS
11234 #undef ATOMIC_OP_RETURN
11235+#undef __ATOMIC_OP_RETURN
11236 #undef ATOMIC_OP
11237+#undef __ATOMIC_OP
11238
11239-#define ATOMIC64_OP(op) \
11240-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11241+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11242+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11243 BACKOFF_SETUP(%o2); \
11244 1: ldx [%o1], %g1; \
11245- op %g1, %o0, %g7; \
11246+ asm_op %g1, %o0, %g7; \
11247+ post_op \
11248 casx [%o1], %g1, %g7; \
11249 cmp %g1, %g7; \
11250 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11251@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11252 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11253 ENDPROC(atomic64_##op); \
11254
11255-#define ATOMIC64_OP_RETURN(op) \
11256-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11257+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11258+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11259+
11260+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11261+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11262 BACKOFF_SETUP(%o2); \
11263 1: ldx [%o1], %g1; \
11264- op %g1, %o0, %g7; \
11265+ asm_op %g1, %o0, %g7; \
11266+ post_op \
11267 casx [%o1], %g1, %g7; \
11268 cmp %g1, %g7; \
11269 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11270@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11271 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11272 ENDPROC(atomic64_##op##_return);
11273
11274+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11275+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11276+
11277 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11278
11279 ATOMIC64_OPS(add)
11280@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11281
11282 #undef ATOMIC64_OPS
11283 #undef ATOMIC64_OP_RETURN
11284+#undef __ATOMIC64_OP_RETURN
11285 #undef ATOMIC64_OP
11286+#undef __ATOMIC64_OP
11287+#undef __OVERFLOW_XOP
11288+#undef __OVERFLOW_IOP
11289+#undef __REFCOUNT_OP
11290
11291 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11292 BACKOFF_SETUP(%o2)
11293diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11294index 1d649a9..fbc5bfc 100644
11295--- a/arch/sparc/lib/ksyms.c
11296+++ b/arch/sparc/lib/ksyms.c
11297@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11298 /* Atomic counter implementation. */
11299 #define ATOMIC_OP(op) \
11300 EXPORT_SYMBOL(atomic_##op); \
11301-EXPORT_SYMBOL(atomic64_##op);
11302+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11303+EXPORT_SYMBOL(atomic64_##op); \
11304+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11305
11306 #define ATOMIC_OP_RETURN(op) \
11307 EXPORT_SYMBOL(atomic_##op##_return); \
11308@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11309 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11310
11311 ATOMIC_OPS(add)
11312+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11313+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11314 ATOMIC_OPS(sub)
11315
11316 #undef ATOMIC_OPS
11317diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11318index 30c3ecc..736f015 100644
11319--- a/arch/sparc/mm/Makefile
11320+++ b/arch/sparc/mm/Makefile
11321@@ -2,7 +2,7 @@
11322 #
11323
11324 asflags-y := -ansi
11325-ccflags-y := -Werror
11326+#ccflags-y := -Werror
11327
11328 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11329 obj-y += fault_$(BITS).o
11330diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11331index 70d8171..274c6c0 100644
11332--- a/arch/sparc/mm/fault_32.c
11333+++ b/arch/sparc/mm/fault_32.c
11334@@ -21,6 +21,9 @@
11335 #include <linux/perf_event.h>
11336 #include <linux/interrupt.h>
11337 #include <linux/kdebug.h>
11338+#include <linux/slab.h>
11339+#include <linux/pagemap.h>
11340+#include <linux/compiler.h>
11341
11342 #include <asm/page.h>
11343 #include <asm/pgtable.h>
11344@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11345 return safe_compute_effective_address(regs, insn);
11346 }
11347
11348+#ifdef CONFIG_PAX_PAGEEXEC
11349+#ifdef CONFIG_PAX_DLRESOLVE
11350+static void pax_emuplt_close(struct vm_area_struct *vma)
11351+{
11352+ vma->vm_mm->call_dl_resolve = 0UL;
11353+}
11354+
11355+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11356+{
11357+ unsigned int *kaddr;
11358+
11359+ vmf->page = alloc_page(GFP_HIGHUSER);
11360+ if (!vmf->page)
11361+ return VM_FAULT_OOM;
11362+
11363+ kaddr = kmap(vmf->page);
11364+ memset(kaddr, 0, PAGE_SIZE);
11365+ kaddr[0] = 0x9DE3BFA8U; /* save */
11366+ flush_dcache_page(vmf->page);
11367+ kunmap(vmf->page);
11368+ return VM_FAULT_MAJOR;
11369+}
11370+
11371+static const struct vm_operations_struct pax_vm_ops = {
11372+ .close = pax_emuplt_close,
11373+ .fault = pax_emuplt_fault
11374+};
11375+
11376+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11377+{
11378+ int ret;
11379+
11380+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11381+ vma->vm_mm = current->mm;
11382+ vma->vm_start = addr;
11383+ vma->vm_end = addr + PAGE_SIZE;
11384+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11385+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11386+ vma->vm_ops = &pax_vm_ops;
11387+
11388+ ret = insert_vm_struct(current->mm, vma);
11389+ if (ret)
11390+ return ret;
11391+
11392+ ++current->mm->total_vm;
11393+ return 0;
11394+}
11395+#endif
11396+
11397+/*
11398+ * PaX: decide what to do with offenders (regs->pc = fault address)
11399+ *
11400+ * returns 1 when task should be killed
11401+ * 2 when patched PLT trampoline was detected
11402+ * 3 when unpatched PLT trampoline was detected
11403+ */
11404+static int pax_handle_fetch_fault(struct pt_regs *regs)
11405+{
11406+
11407+#ifdef CONFIG_PAX_EMUPLT
11408+ int err;
11409+
11410+ do { /* PaX: patched PLT emulation #1 */
11411+ unsigned int sethi1, sethi2, jmpl;
11412+
11413+ err = get_user(sethi1, (unsigned int *)regs->pc);
11414+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11415+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11416+
11417+ if (err)
11418+ break;
11419+
11420+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11421+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11422+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11423+ {
11424+ unsigned int addr;
11425+
11426+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11427+ addr = regs->u_regs[UREG_G1];
11428+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11429+ regs->pc = addr;
11430+ regs->npc = addr+4;
11431+ return 2;
11432+ }
11433+ } while (0);
11434+
11435+ do { /* PaX: patched PLT emulation #2 */
11436+ unsigned int ba;
11437+
11438+ err = get_user(ba, (unsigned int *)regs->pc);
11439+
11440+ if (err)
11441+ break;
11442+
11443+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11444+ unsigned int addr;
11445+
11446+ if ((ba & 0xFFC00000U) == 0x30800000U)
11447+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11448+ else
11449+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11450+ regs->pc = addr;
11451+ regs->npc = addr+4;
11452+ return 2;
11453+ }
11454+ } while (0);
11455+
11456+ do { /* PaX: patched PLT emulation #3 */
11457+ unsigned int sethi, bajmpl, nop;
11458+
11459+ err = get_user(sethi, (unsigned int *)regs->pc);
11460+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11461+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11462+
11463+ if (err)
11464+ break;
11465+
11466+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11467+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11468+ nop == 0x01000000U)
11469+ {
11470+ unsigned int addr;
11471+
11472+ addr = (sethi & 0x003FFFFFU) << 10;
11473+ regs->u_regs[UREG_G1] = addr;
11474+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11475+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11476+ else
11477+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11478+ regs->pc = addr;
11479+ regs->npc = addr+4;
11480+ return 2;
11481+ }
11482+ } while (0);
11483+
11484+ do { /* PaX: unpatched PLT emulation step 1 */
11485+ unsigned int sethi, ba, nop;
11486+
11487+ err = get_user(sethi, (unsigned int *)regs->pc);
11488+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11489+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11490+
11491+ if (err)
11492+ break;
11493+
11494+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11495+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11496+ nop == 0x01000000U)
11497+ {
11498+ unsigned int addr, save, call;
11499+
11500+ if ((ba & 0xFFC00000U) == 0x30800000U)
11501+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11502+ else
11503+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11504+
11505+ err = get_user(save, (unsigned int *)addr);
11506+ err |= get_user(call, (unsigned int *)(addr+4));
11507+ err |= get_user(nop, (unsigned int *)(addr+8));
11508+ if (err)
11509+ break;
11510+
11511+#ifdef CONFIG_PAX_DLRESOLVE
11512+ if (save == 0x9DE3BFA8U &&
11513+ (call & 0xC0000000U) == 0x40000000U &&
11514+ nop == 0x01000000U)
11515+ {
11516+ struct vm_area_struct *vma;
11517+ unsigned long call_dl_resolve;
11518+
11519+ down_read(&current->mm->mmap_sem);
11520+ call_dl_resolve = current->mm->call_dl_resolve;
11521+ up_read(&current->mm->mmap_sem);
11522+ if (likely(call_dl_resolve))
11523+ goto emulate;
11524+
11525+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11526+
11527+ down_write(&current->mm->mmap_sem);
11528+ if (current->mm->call_dl_resolve) {
11529+ call_dl_resolve = current->mm->call_dl_resolve;
11530+ up_write(&current->mm->mmap_sem);
11531+ if (vma)
11532+ kmem_cache_free(vm_area_cachep, vma);
11533+ goto emulate;
11534+ }
11535+
11536+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11537+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11538+ up_write(&current->mm->mmap_sem);
11539+ if (vma)
11540+ kmem_cache_free(vm_area_cachep, vma);
11541+ return 1;
11542+ }
11543+
11544+ if (pax_insert_vma(vma, call_dl_resolve)) {
11545+ up_write(&current->mm->mmap_sem);
11546+ kmem_cache_free(vm_area_cachep, vma);
11547+ return 1;
11548+ }
11549+
11550+ current->mm->call_dl_resolve = call_dl_resolve;
11551+ up_write(&current->mm->mmap_sem);
11552+
11553+emulate:
11554+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11555+ regs->pc = call_dl_resolve;
11556+ regs->npc = addr+4;
11557+ return 3;
11558+ }
11559+#endif
11560+
11561+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11562+ if ((save & 0xFFC00000U) == 0x05000000U &&
11563+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11564+ nop == 0x01000000U)
11565+ {
11566+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11567+ regs->u_regs[UREG_G2] = addr + 4;
11568+ addr = (save & 0x003FFFFFU) << 10;
11569+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11570+ regs->pc = addr;
11571+ regs->npc = addr+4;
11572+ return 3;
11573+ }
11574+ }
11575+ } while (0);
11576+
11577+ do { /* PaX: unpatched PLT emulation step 2 */
11578+ unsigned int save, call, nop;
11579+
11580+ err = get_user(save, (unsigned int *)(regs->pc-4));
11581+ err |= get_user(call, (unsigned int *)regs->pc);
11582+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11583+ if (err)
11584+ break;
11585+
11586+ if (save == 0x9DE3BFA8U &&
11587+ (call & 0xC0000000U) == 0x40000000U &&
11588+ nop == 0x01000000U)
11589+ {
11590+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11591+
11592+ regs->u_regs[UREG_RETPC] = regs->pc;
11593+ regs->pc = dl_resolve;
11594+ regs->npc = dl_resolve+4;
11595+ return 3;
11596+ }
11597+ } while (0);
11598+#endif
11599+
11600+ return 1;
11601+}
11602+
11603+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11604+{
11605+ unsigned long i;
11606+
11607+ printk(KERN_ERR "PAX: bytes at PC: ");
11608+ for (i = 0; i < 8; i++) {
11609+ unsigned int c;
11610+ if (get_user(c, (unsigned int *)pc+i))
11611+ printk(KERN_CONT "???????? ");
11612+ else
11613+ printk(KERN_CONT "%08x ", c);
11614+ }
11615+ printk("\n");
11616+}
11617+#endif
11618+
11619 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11620 int text_fault)
11621 {
11622@@ -226,6 +500,24 @@ good_area:
11623 if (!(vma->vm_flags & VM_WRITE))
11624 goto bad_area;
11625 } else {
11626+
11627+#ifdef CONFIG_PAX_PAGEEXEC
11628+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11629+ up_read(&mm->mmap_sem);
11630+ switch (pax_handle_fetch_fault(regs)) {
11631+
11632+#ifdef CONFIG_PAX_EMUPLT
11633+ case 2:
11634+ case 3:
11635+ return;
11636+#endif
11637+
11638+ }
11639+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11640+ do_group_exit(SIGKILL);
11641+ }
11642+#endif
11643+
11644 /* Allow reads even for write-only mappings */
11645 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11646 goto bad_area;
11647diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11648index 4798232..f76e3aa 100644
11649--- a/arch/sparc/mm/fault_64.c
11650+++ b/arch/sparc/mm/fault_64.c
11651@@ -22,6 +22,9 @@
11652 #include <linux/kdebug.h>
11653 #include <linux/percpu.h>
11654 #include <linux/context_tracking.h>
11655+#include <linux/slab.h>
11656+#include <linux/pagemap.h>
11657+#include <linux/compiler.h>
11658
11659 #include <asm/page.h>
11660 #include <asm/pgtable.h>
11661@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11662 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11663 regs->tpc);
11664 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11665- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11666+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11667 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11668 dump_stack();
11669 unhandled_fault(regs->tpc, current, regs);
11670@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11671 show_regs(regs);
11672 }
11673
11674+#ifdef CONFIG_PAX_PAGEEXEC
11675+#ifdef CONFIG_PAX_DLRESOLVE
11676+static void pax_emuplt_close(struct vm_area_struct *vma)
11677+{
11678+ vma->vm_mm->call_dl_resolve = 0UL;
11679+}
11680+
11681+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11682+{
11683+ unsigned int *kaddr;
11684+
11685+ vmf->page = alloc_page(GFP_HIGHUSER);
11686+ if (!vmf->page)
11687+ return VM_FAULT_OOM;
11688+
11689+ kaddr = kmap(vmf->page);
11690+ memset(kaddr, 0, PAGE_SIZE);
11691+ kaddr[0] = 0x9DE3BFA8U; /* save */
11692+ flush_dcache_page(vmf->page);
11693+ kunmap(vmf->page);
11694+ return VM_FAULT_MAJOR;
11695+}
11696+
11697+static const struct vm_operations_struct pax_vm_ops = {
11698+ .close = pax_emuplt_close,
11699+ .fault = pax_emuplt_fault
11700+};
11701+
11702+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11703+{
11704+ int ret;
11705+
11706+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11707+ vma->vm_mm = current->mm;
11708+ vma->vm_start = addr;
11709+ vma->vm_end = addr + PAGE_SIZE;
11710+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11711+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11712+ vma->vm_ops = &pax_vm_ops;
11713+
11714+ ret = insert_vm_struct(current->mm, vma);
11715+ if (ret)
11716+ return ret;
11717+
11718+ ++current->mm->total_vm;
11719+ return 0;
11720+}
11721+#endif
11722+
11723+/*
11724+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11725+ *
11726+ * returns 1 when task should be killed
11727+ * 2 when patched PLT trampoline was detected
11728+ * 3 when unpatched PLT trampoline was detected
11729+ */
11730+static int pax_handle_fetch_fault(struct pt_regs *regs)
11731+{
11732+
11733+#ifdef CONFIG_PAX_EMUPLT
11734+ int err;
11735+
11736+ do { /* PaX: patched PLT emulation #1 */
11737+ unsigned int sethi1, sethi2, jmpl;
11738+
11739+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11740+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11741+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11742+
11743+ if (err)
11744+ break;
11745+
11746+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11747+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11748+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11749+ {
11750+ unsigned long addr;
11751+
11752+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11753+ addr = regs->u_regs[UREG_G1];
11754+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11755+
11756+ if (test_thread_flag(TIF_32BIT))
11757+ addr &= 0xFFFFFFFFUL;
11758+
11759+ regs->tpc = addr;
11760+ regs->tnpc = addr+4;
11761+ return 2;
11762+ }
11763+ } while (0);
11764+
11765+ do { /* PaX: patched PLT emulation #2 */
11766+ unsigned int ba;
11767+
11768+ err = get_user(ba, (unsigned int *)regs->tpc);
11769+
11770+ if (err)
11771+ break;
11772+
11773+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11774+ unsigned long addr;
11775+
11776+ if ((ba & 0xFFC00000U) == 0x30800000U)
11777+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11778+ else
11779+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11780+
11781+ if (test_thread_flag(TIF_32BIT))
11782+ addr &= 0xFFFFFFFFUL;
11783+
11784+ regs->tpc = addr;
11785+ regs->tnpc = addr+4;
11786+ return 2;
11787+ }
11788+ } while (0);
11789+
11790+ do { /* PaX: patched PLT emulation #3 */
11791+ unsigned int sethi, bajmpl, nop;
11792+
11793+ err = get_user(sethi, (unsigned int *)regs->tpc);
11794+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11795+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11796+
11797+ if (err)
11798+ break;
11799+
11800+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11801+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11802+ nop == 0x01000000U)
11803+ {
11804+ unsigned long addr;
11805+
11806+ addr = (sethi & 0x003FFFFFU) << 10;
11807+ regs->u_regs[UREG_G1] = addr;
11808+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11809+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11810+ else
11811+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11812+
11813+ if (test_thread_flag(TIF_32BIT))
11814+ addr &= 0xFFFFFFFFUL;
11815+
11816+ regs->tpc = addr;
11817+ regs->tnpc = addr+4;
11818+ return 2;
11819+ }
11820+ } while (0);
11821+
11822+ do { /* PaX: patched PLT emulation #4 */
11823+ unsigned int sethi, mov1, call, mov2;
11824+
11825+ err = get_user(sethi, (unsigned int *)regs->tpc);
11826+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11827+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11828+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11829+
11830+ if (err)
11831+ break;
11832+
11833+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11834+ mov1 == 0x8210000FU &&
11835+ (call & 0xC0000000U) == 0x40000000U &&
11836+ mov2 == 0x9E100001U)
11837+ {
11838+ unsigned long addr;
11839+
11840+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11841+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11842+
11843+ if (test_thread_flag(TIF_32BIT))
11844+ addr &= 0xFFFFFFFFUL;
11845+
11846+ regs->tpc = addr;
11847+ regs->tnpc = addr+4;
11848+ return 2;
11849+ }
11850+ } while (0);
11851+
11852+ do { /* PaX: patched PLT emulation #5 */
11853+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11854+
11855+ err = get_user(sethi, (unsigned int *)regs->tpc);
11856+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11857+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11858+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11859+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11860+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11861+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11862+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11863+
11864+ if (err)
11865+ break;
11866+
11867+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11868+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11869+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11870+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11871+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11872+ sllx == 0x83287020U &&
11873+ jmpl == 0x81C04005U &&
11874+ nop == 0x01000000U)
11875+ {
11876+ unsigned long addr;
11877+
11878+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11879+ regs->u_regs[UREG_G1] <<= 32;
11880+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11881+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11882+ regs->tpc = addr;
11883+ regs->tnpc = addr+4;
11884+ return 2;
11885+ }
11886+ } while (0);
11887+
11888+ do { /* PaX: patched PLT emulation #6 */
11889+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11890+
11891+ err = get_user(sethi, (unsigned int *)regs->tpc);
11892+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11893+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11894+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11895+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11896+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11897+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11898+
11899+ if (err)
11900+ break;
11901+
11902+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11903+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11904+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11905+ sllx == 0x83287020U &&
11906+ (or & 0xFFFFE000U) == 0x8A116000U &&
11907+ jmpl == 0x81C04005U &&
11908+ nop == 0x01000000U)
11909+ {
11910+ unsigned long addr;
11911+
11912+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11913+ regs->u_regs[UREG_G1] <<= 32;
11914+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11915+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11916+ regs->tpc = addr;
11917+ regs->tnpc = addr+4;
11918+ return 2;
11919+ }
11920+ } while (0);
11921+
11922+ do { /* PaX: unpatched PLT emulation step 1 */
11923+ unsigned int sethi, ba, nop;
11924+
11925+ err = get_user(sethi, (unsigned int *)regs->tpc);
11926+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11927+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11928+
11929+ if (err)
11930+ break;
11931+
11932+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11933+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11934+ nop == 0x01000000U)
11935+ {
11936+ unsigned long addr;
11937+ unsigned int save, call;
11938+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11939+
11940+ if ((ba & 0xFFC00000U) == 0x30800000U)
11941+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11942+ else
11943+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11944+
11945+ if (test_thread_flag(TIF_32BIT))
11946+ addr &= 0xFFFFFFFFUL;
11947+
11948+ err = get_user(save, (unsigned int *)addr);
11949+ err |= get_user(call, (unsigned int *)(addr+4));
11950+ err |= get_user(nop, (unsigned int *)(addr+8));
11951+ if (err)
11952+ break;
11953+
11954+#ifdef CONFIG_PAX_DLRESOLVE
11955+ if (save == 0x9DE3BFA8U &&
11956+ (call & 0xC0000000U) == 0x40000000U &&
11957+ nop == 0x01000000U)
11958+ {
11959+ struct vm_area_struct *vma;
11960+ unsigned long call_dl_resolve;
11961+
11962+ down_read(&current->mm->mmap_sem);
11963+ call_dl_resolve = current->mm->call_dl_resolve;
11964+ up_read(&current->mm->mmap_sem);
11965+ if (likely(call_dl_resolve))
11966+ goto emulate;
11967+
11968+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11969+
11970+ down_write(&current->mm->mmap_sem);
11971+ if (current->mm->call_dl_resolve) {
11972+ call_dl_resolve = current->mm->call_dl_resolve;
11973+ up_write(&current->mm->mmap_sem);
11974+ if (vma)
11975+ kmem_cache_free(vm_area_cachep, vma);
11976+ goto emulate;
11977+ }
11978+
11979+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11980+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11981+ up_write(&current->mm->mmap_sem);
11982+ if (vma)
11983+ kmem_cache_free(vm_area_cachep, vma);
11984+ return 1;
11985+ }
11986+
11987+ if (pax_insert_vma(vma, call_dl_resolve)) {
11988+ up_write(&current->mm->mmap_sem);
11989+ kmem_cache_free(vm_area_cachep, vma);
11990+ return 1;
11991+ }
11992+
11993+ current->mm->call_dl_resolve = call_dl_resolve;
11994+ up_write(&current->mm->mmap_sem);
11995+
11996+emulate:
11997+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11998+ regs->tpc = call_dl_resolve;
11999+ regs->tnpc = addr+4;
12000+ return 3;
12001+ }
12002+#endif
12003+
12004+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12005+ if ((save & 0xFFC00000U) == 0x05000000U &&
12006+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12007+ nop == 0x01000000U)
12008+ {
12009+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12010+ regs->u_regs[UREG_G2] = addr + 4;
12011+ addr = (save & 0x003FFFFFU) << 10;
12012+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12013+
12014+ if (test_thread_flag(TIF_32BIT))
12015+ addr &= 0xFFFFFFFFUL;
12016+
12017+ regs->tpc = addr;
12018+ regs->tnpc = addr+4;
12019+ return 3;
12020+ }
12021+
12022+ /* PaX: 64-bit PLT stub */
12023+ err = get_user(sethi1, (unsigned int *)addr);
12024+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12025+ err |= get_user(or1, (unsigned int *)(addr+8));
12026+ err |= get_user(or2, (unsigned int *)(addr+12));
12027+ err |= get_user(sllx, (unsigned int *)(addr+16));
12028+ err |= get_user(add, (unsigned int *)(addr+20));
12029+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12030+ err |= get_user(nop, (unsigned int *)(addr+28));
12031+ if (err)
12032+ break;
12033+
12034+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12035+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12036+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12037+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12038+ sllx == 0x89293020U &&
12039+ add == 0x8A010005U &&
12040+ jmpl == 0x89C14000U &&
12041+ nop == 0x01000000U)
12042+ {
12043+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12044+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12045+ regs->u_regs[UREG_G4] <<= 32;
12046+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12047+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12048+ regs->u_regs[UREG_G4] = addr + 24;
12049+ addr = regs->u_regs[UREG_G5];
12050+ regs->tpc = addr;
12051+ regs->tnpc = addr+4;
12052+ return 3;
12053+ }
12054+ }
12055+ } while (0);
12056+
12057+#ifdef CONFIG_PAX_DLRESOLVE
12058+ do { /* PaX: unpatched PLT emulation step 2 */
12059+ unsigned int save, call, nop;
12060+
12061+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12062+ err |= get_user(call, (unsigned int *)regs->tpc);
12063+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12064+ if (err)
12065+ break;
12066+
12067+ if (save == 0x9DE3BFA8U &&
12068+ (call & 0xC0000000U) == 0x40000000U &&
12069+ nop == 0x01000000U)
12070+ {
12071+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12072+
12073+ if (test_thread_flag(TIF_32BIT))
12074+ dl_resolve &= 0xFFFFFFFFUL;
12075+
12076+ regs->u_regs[UREG_RETPC] = regs->tpc;
12077+ regs->tpc = dl_resolve;
12078+ regs->tnpc = dl_resolve+4;
12079+ return 3;
12080+ }
12081+ } while (0);
12082+#endif
12083+
12084+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12085+ unsigned int sethi, ba, nop;
12086+
12087+ err = get_user(sethi, (unsigned int *)regs->tpc);
12088+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12089+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12090+
12091+ if (err)
12092+ break;
12093+
12094+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12095+ (ba & 0xFFF00000U) == 0x30600000U &&
12096+ nop == 0x01000000U)
12097+ {
12098+ unsigned long addr;
12099+
12100+ addr = (sethi & 0x003FFFFFU) << 10;
12101+ regs->u_regs[UREG_G1] = addr;
12102+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12103+
12104+ if (test_thread_flag(TIF_32BIT))
12105+ addr &= 0xFFFFFFFFUL;
12106+
12107+ regs->tpc = addr;
12108+ regs->tnpc = addr+4;
12109+ return 2;
12110+ }
12111+ } while (0);
12112+
12113+#endif
12114+
12115+ return 1;
12116+}
12117+
12118+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12119+{
12120+ unsigned long i;
12121+
12122+ printk(KERN_ERR "PAX: bytes at PC: ");
12123+ for (i = 0; i < 8; i++) {
12124+ unsigned int c;
12125+ if (get_user(c, (unsigned int *)pc+i))
12126+ printk(KERN_CONT "???????? ");
12127+ else
12128+ printk(KERN_CONT "%08x ", c);
12129+ }
12130+ printk("\n");
12131+}
12132+#endif
12133+
12134 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12135 {
12136 enum ctx_state prev_state = exception_enter();
12137@@ -353,6 +816,29 @@ retry:
12138 if (!vma)
12139 goto bad_area;
12140
12141+#ifdef CONFIG_PAX_PAGEEXEC
12142+ /* PaX: detect ITLB misses on non-exec pages */
12143+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12144+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12145+ {
12146+ if (address != regs->tpc)
12147+ goto good_area;
12148+
12149+ up_read(&mm->mmap_sem);
12150+ switch (pax_handle_fetch_fault(regs)) {
12151+
12152+#ifdef CONFIG_PAX_EMUPLT
12153+ case 2:
12154+ case 3:
12155+ return;
12156+#endif
12157+
12158+ }
12159+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12160+ do_group_exit(SIGKILL);
12161+ }
12162+#endif
12163+
12164 /* Pure DTLB misses do not tell us whether the fault causing
12165 * load/store/atomic was a write or not, it only says that there
12166 * was no match. So in such a case we (carefully) read the
12167diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12168index d329537..2c3746a 100644
12169--- a/arch/sparc/mm/hugetlbpage.c
12170+++ b/arch/sparc/mm/hugetlbpage.c
12171@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12172 unsigned long addr,
12173 unsigned long len,
12174 unsigned long pgoff,
12175- unsigned long flags)
12176+ unsigned long flags,
12177+ unsigned long offset)
12178 {
12179+ struct mm_struct *mm = current->mm;
12180 unsigned long task_size = TASK_SIZE;
12181 struct vm_unmapped_area_info info;
12182
12183@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12184
12185 info.flags = 0;
12186 info.length = len;
12187- info.low_limit = TASK_UNMAPPED_BASE;
12188+ info.low_limit = mm->mmap_base;
12189 info.high_limit = min(task_size, VA_EXCLUDE_START);
12190 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12191 info.align_offset = 0;
12192+ info.threadstack_offset = offset;
12193 addr = vm_unmapped_area(&info);
12194
12195 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12196 VM_BUG_ON(addr != -ENOMEM);
12197 info.low_limit = VA_EXCLUDE_END;
12198+
12199+#ifdef CONFIG_PAX_RANDMMAP
12200+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12201+ info.low_limit += mm->delta_mmap;
12202+#endif
12203+
12204 info.high_limit = task_size;
12205 addr = vm_unmapped_area(&info);
12206 }
12207@@ -55,7 +64,8 @@ static unsigned long
12208 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12209 const unsigned long len,
12210 const unsigned long pgoff,
12211- const unsigned long flags)
12212+ const unsigned long flags,
12213+ const unsigned long offset)
12214 {
12215 struct mm_struct *mm = current->mm;
12216 unsigned long addr = addr0;
12217@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12218 info.high_limit = mm->mmap_base;
12219 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12220 info.align_offset = 0;
12221+ info.threadstack_offset = offset;
12222 addr = vm_unmapped_area(&info);
12223
12224 /*
12225@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12226 VM_BUG_ON(addr != -ENOMEM);
12227 info.flags = 0;
12228 info.low_limit = TASK_UNMAPPED_BASE;
12229+
12230+#ifdef CONFIG_PAX_RANDMMAP
12231+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12232+ info.low_limit += mm->delta_mmap;
12233+#endif
12234+
12235 info.high_limit = STACK_TOP32;
12236 addr = vm_unmapped_area(&info);
12237 }
12238@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12239 struct mm_struct *mm = current->mm;
12240 struct vm_area_struct *vma;
12241 unsigned long task_size = TASK_SIZE;
12242+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12243
12244 if (test_thread_flag(TIF_32BIT))
12245 task_size = STACK_TOP32;
12246@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12247 return addr;
12248 }
12249
12250+#ifdef CONFIG_PAX_RANDMMAP
12251+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12252+#endif
12253+
12254 if (addr) {
12255 addr = ALIGN(addr, HPAGE_SIZE);
12256 vma = find_vma(mm, addr);
12257- if (task_size - len >= addr &&
12258- (!vma || addr + len <= vma->vm_start))
12259+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12260 return addr;
12261 }
12262 if (mm->get_unmapped_area == arch_get_unmapped_area)
12263 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12264- pgoff, flags);
12265+ pgoff, flags, offset);
12266 else
12267 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12268- pgoff, flags);
12269+ pgoff, flags, offset);
12270 }
12271
12272 pte_t *huge_pte_alloc(struct mm_struct *mm,
12273diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12274index 3ea267c..93f0659 100644
12275--- a/arch/sparc/mm/init_64.c
12276+++ b/arch/sparc/mm/init_64.c
12277@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12278 int num_kernel_image_mappings;
12279
12280 #ifdef CONFIG_DEBUG_DCFLUSH
12281-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12282+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12283 #ifdef CONFIG_SMP
12284-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12285+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12286 #endif
12287 #endif
12288
12289@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12290 {
12291 BUG_ON(tlb_type == hypervisor);
12292 #ifdef CONFIG_DEBUG_DCFLUSH
12293- atomic_inc(&dcpage_flushes);
12294+ atomic_inc_unchecked(&dcpage_flushes);
12295 #endif
12296
12297 #ifdef DCACHE_ALIASING_POSSIBLE
12298@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12299
12300 #ifdef CONFIG_DEBUG_DCFLUSH
12301 seq_printf(m, "DCPageFlushes\t: %d\n",
12302- atomic_read(&dcpage_flushes));
12303+ atomic_read_unchecked(&dcpage_flushes));
12304 #ifdef CONFIG_SMP
12305 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12306- atomic_read(&dcpage_flushes_xcall));
12307+ atomic_read_unchecked(&dcpage_flushes_xcall));
12308 #endif /* CONFIG_SMP */
12309 #endif /* CONFIG_DEBUG_DCFLUSH */
12310 }
12311diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12312index 7cca418..53fc030 100644
12313--- a/arch/tile/Kconfig
12314+++ b/arch/tile/Kconfig
12315@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12316
12317 config KEXEC
12318 bool "kexec system call"
12319+ depends on !GRKERNSEC_KMEM
12320 ---help---
12321 kexec is a system call that implements the ability to shutdown your
12322 current kernel, and to start another kernel. It is like a reboot
12323diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12324index 7b11c5f..755a026 100644
12325--- a/arch/tile/include/asm/atomic_64.h
12326+++ b/arch/tile/include/asm/atomic_64.h
12327@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12328
12329 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12330
12331+#define atomic64_read_unchecked(v) atomic64_read(v)
12332+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12333+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12334+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12335+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12336+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12337+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12338+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12339+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12340+
12341 /* Define this to indicate that cmpxchg is an efficient operation. */
12342 #define __HAVE_ARCH_CMPXCHG
12343
12344diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12345index 6160761..00cac88 100644
12346--- a/arch/tile/include/asm/cache.h
12347+++ b/arch/tile/include/asm/cache.h
12348@@ -15,11 +15,12 @@
12349 #ifndef _ASM_TILE_CACHE_H
12350 #define _ASM_TILE_CACHE_H
12351
12352+#include <linux/const.h>
12353 #include <arch/chip.h>
12354
12355 /* bytes per L1 data cache line */
12356 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12357-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12358+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12359
12360 /* bytes per L2 cache line */
12361 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12362diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12363index b6cde32..c0cb736 100644
12364--- a/arch/tile/include/asm/uaccess.h
12365+++ b/arch/tile/include/asm/uaccess.h
12366@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12367 const void __user *from,
12368 unsigned long n)
12369 {
12370- int sz = __compiletime_object_size(to);
12371+ size_t sz = __compiletime_object_size(to);
12372
12373- if (likely(sz == -1 || sz >= n))
12374+ if (likely(sz == (size_t)-1 || sz >= n))
12375 n = _copy_from_user(to, from, n);
12376 else
12377 copy_from_user_overflow();
12378diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12379index 3270e00..a77236e 100644
12380--- a/arch/tile/mm/hugetlbpage.c
12381+++ b/arch/tile/mm/hugetlbpage.c
12382@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12383 info.high_limit = TASK_SIZE;
12384 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12385 info.align_offset = 0;
12386+ info.threadstack_offset = 0;
12387 return vm_unmapped_area(&info);
12388 }
12389
12390@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12391 info.high_limit = current->mm->mmap_base;
12392 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12393 info.align_offset = 0;
12394+ info.threadstack_offset = 0;
12395 addr = vm_unmapped_area(&info);
12396
12397 /*
12398diff --git a/arch/um/Makefile b/arch/um/Makefile
12399index e4b1a96..16162f8 100644
12400--- a/arch/um/Makefile
12401+++ b/arch/um/Makefile
12402@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12403 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12404 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12405
12406+ifdef CONSTIFY_PLUGIN
12407+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12408+endif
12409+
12410 #This will adjust *FLAGS accordingly to the platform.
12411 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12412
12413diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12414index 19e1bdd..3665b77 100644
12415--- a/arch/um/include/asm/cache.h
12416+++ b/arch/um/include/asm/cache.h
12417@@ -1,6 +1,7 @@
12418 #ifndef __UM_CACHE_H
12419 #define __UM_CACHE_H
12420
12421+#include <linux/const.h>
12422
12423 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12424 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12425@@ -12,6 +13,6 @@
12426 # define L1_CACHE_SHIFT 5
12427 #endif
12428
12429-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12430+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12431
12432 #endif
12433diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12434index 2e0a6b1..a64d0f5 100644
12435--- a/arch/um/include/asm/kmap_types.h
12436+++ b/arch/um/include/asm/kmap_types.h
12437@@ -8,6 +8,6 @@
12438
12439 /* No more #include "asm/arch/kmap_types.h" ! */
12440
12441-#define KM_TYPE_NR 14
12442+#define KM_TYPE_NR 15
12443
12444 #endif
12445diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12446index 71c5d13..4c7b9f1 100644
12447--- a/arch/um/include/asm/page.h
12448+++ b/arch/um/include/asm/page.h
12449@@ -14,6 +14,9 @@
12450 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12451 #define PAGE_MASK (~(PAGE_SIZE-1))
12452
12453+#define ktla_ktva(addr) (addr)
12454+#define ktva_ktla(addr) (addr)
12455+
12456 #ifndef __ASSEMBLY__
12457
12458 struct page;
12459diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12460index 0032f92..cd151e0 100644
12461--- a/arch/um/include/asm/pgtable-3level.h
12462+++ b/arch/um/include/asm/pgtable-3level.h
12463@@ -58,6 +58,7 @@
12464 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12465 #define pud_populate(mm, pud, pmd) \
12466 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12467+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12468
12469 #ifdef CONFIG_64BIT
12470 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12471diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12472index f17bca8..48adb87 100644
12473--- a/arch/um/kernel/process.c
12474+++ b/arch/um/kernel/process.c
12475@@ -356,22 +356,6 @@ int singlestepping(void * t)
12476 return 2;
12477 }
12478
12479-/*
12480- * Only x86 and x86_64 have an arch_align_stack().
12481- * All other arches have "#define arch_align_stack(x) (x)"
12482- * in their asm/exec.h
12483- * As this is included in UML from asm-um/system-generic.h,
12484- * we can use it to behave as the subarch does.
12485- */
12486-#ifndef arch_align_stack
12487-unsigned long arch_align_stack(unsigned long sp)
12488-{
12489- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12490- sp -= get_random_int() % 8192;
12491- return sp & ~0xf;
12492-}
12493-#endif
12494-
12495 unsigned long get_wchan(struct task_struct *p)
12496 {
12497 unsigned long stack_page, sp, ip;
12498diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12499index ad8f795..2c7eec6 100644
12500--- a/arch/unicore32/include/asm/cache.h
12501+++ b/arch/unicore32/include/asm/cache.h
12502@@ -12,8 +12,10 @@
12503 #ifndef __UNICORE_CACHE_H__
12504 #define __UNICORE_CACHE_H__
12505
12506-#define L1_CACHE_SHIFT (5)
12507-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12508+#include <linux/const.h>
12509+
12510+#define L1_CACHE_SHIFT 5
12511+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12512
12513 /*
12514 * Memory returned by kmalloc() may be used for DMA, so we must make
12515diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12516index 0dc9d01..98df103 100644
12517--- a/arch/x86/Kconfig
12518+++ b/arch/x86/Kconfig
12519@@ -130,7 +130,7 @@ config X86
12520 select RTC_LIB
12521 select HAVE_DEBUG_STACKOVERFLOW
12522 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12523- select HAVE_CC_STACKPROTECTOR
12524+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12525 select GENERIC_CPU_AUTOPROBE
12526 select HAVE_ARCH_AUDITSYSCALL
12527 select ARCH_SUPPORTS_ATOMIC_RMW
12528@@ -263,7 +263,7 @@ config X86_HT
12529
12530 config X86_32_LAZY_GS
12531 def_bool y
12532- depends on X86_32 && !CC_STACKPROTECTOR
12533+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12534
12535 config ARCH_HWEIGHT_CFLAGS
12536 string
12537@@ -601,6 +601,7 @@ config SCHED_OMIT_FRAME_POINTER
12538
12539 menuconfig HYPERVISOR_GUEST
12540 bool "Linux guest support"
12541+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12542 ---help---
12543 Say Y here to enable options for running Linux under various hyper-
12544 visors. This option enables basic hypervisor detection and platform
12545@@ -978,6 +979,7 @@ config VM86
12546
12547 config X86_16BIT
12548 bool "Enable support for 16-bit segments" if EXPERT
12549+ depends on !GRKERNSEC
12550 default y
12551 ---help---
12552 This option is required by programs like Wine to run 16-bit
12553@@ -1151,6 +1153,7 @@ choice
12554
12555 config NOHIGHMEM
12556 bool "off"
12557+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12558 ---help---
12559 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12560 However, the address space of 32-bit x86 processors is only 4
12561@@ -1187,6 +1190,7 @@ config NOHIGHMEM
12562
12563 config HIGHMEM4G
12564 bool "4GB"
12565+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12566 ---help---
12567 Select this if you have a 32-bit processor and between 1 and 4
12568 gigabytes of physical RAM.
12569@@ -1239,7 +1243,7 @@ config PAGE_OFFSET
12570 hex
12571 default 0xB0000000 if VMSPLIT_3G_OPT
12572 default 0x80000000 if VMSPLIT_2G
12573- default 0x78000000 if VMSPLIT_2G_OPT
12574+ default 0x70000000 if VMSPLIT_2G_OPT
12575 default 0x40000000 if VMSPLIT_1G
12576 default 0xC0000000
12577 depends on X86_32
12578@@ -1680,6 +1684,7 @@ source kernel/Kconfig.hz
12579
12580 config KEXEC
12581 bool "kexec system call"
12582+ depends on !GRKERNSEC_KMEM
12583 ---help---
12584 kexec is a system call that implements the ability to shutdown your
12585 current kernel, and to start another kernel. It is like a reboot
12586@@ -1865,7 +1870,9 @@ config X86_NEED_RELOCS
12587
12588 config PHYSICAL_ALIGN
12589 hex "Alignment value to which kernel should be aligned"
12590- default "0x200000"
12591+ default "0x1000000"
12592+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12593+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12594 range 0x2000 0x1000000 if X86_32
12595 range 0x200000 0x1000000 if X86_64
12596 ---help---
12597@@ -1948,6 +1955,7 @@ config COMPAT_VDSO
12598 def_bool n
12599 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12600 depends on X86_32 || IA32_EMULATION
12601+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12602 ---help---
12603 Certain buggy versions of glibc will crash if they are
12604 presented with a 32-bit vDSO that is not mapped at the address
12605diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12606index 6983314..54ad7e8 100644
12607--- a/arch/x86/Kconfig.cpu
12608+++ b/arch/x86/Kconfig.cpu
12609@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12610
12611 config X86_F00F_BUG
12612 def_bool y
12613- depends on M586MMX || M586TSC || M586 || M486
12614+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12615
12616 config X86_INVD_BUG
12617 def_bool y
12618@@ -327,7 +327,7 @@ config X86_INVD_BUG
12619
12620 config X86_ALIGNMENT_16
12621 def_bool y
12622- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12623+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12624
12625 config X86_INTEL_USERCOPY
12626 def_bool y
12627@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12628 # generates cmov.
12629 config X86_CMOV
12630 def_bool y
12631- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12632+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12633
12634 config X86_MINIMUM_CPU_FAMILY
12635 int
12636diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12637index 61bd2ad..50b625d 100644
12638--- a/arch/x86/Kconfig.debug
12639+++ b/arch/x86/Kconfig.debug
12640@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12641 config DEBUG_RODATA
12642 bool "Write protect kernel read-only data structures"
12643 default y
12644- depends on DEBUG_KERNEL
12645+ depends on DEBUG_KERNEL && BROKEN
12646 ---help---
12647 Mark the kernel read-only data as write-protected in the pagetables,
12648 in order to catch accidental (and incorrect) writes to such const
12649@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12650
12651 config DEBUG_SET_MODULE_RONX
12652 bool "Set loadable kernel module data as NX and text as RO"
12653- depends on MODULES
12654+ depends on MODULES && BROKEN
12655 ---help---
12656 This option helps catch unintended modifications to loadable
12657 kernel module's text and read-only data. It also prevents execution
12658diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12659index 920e616..ac3d4df 100644
12660--- a/arch/x86/Makefile
12661+++ b/arch/x86/Makefile
12662@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12663 # CPU-specific tuning. Anything which can be shared with UML should go here.
12664 include $(srctree)/arch/x86/Makefile_32.cpu
12665 KBUILD_CFLAGS += $(cflags-y)
12666-
12667- # temporary until string.h is fixed
12668- KBUILD_CFLAGS += -ffreestanding
12669 else
12670 BITS := 64
12671 UTS_MACHINE := x86_64
12672@@ -107,6 +104,9 @@ else
12673 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12674 endif
12675
12676+# temporary until string.h is fixed
12677+KBUILD_CFLAGS += -ffreestanding
12678+
12679 # Make sure compiler does not have buggy stack-protector support.
12680 ifdef CONFIG_CC_STACKPROTECTOR
12681 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12682@@ -180,6 +180,7 @@ archheaders:
12683 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12684
12685 archprepare:
12686+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12687 ifeq ($(CONFIG_KEXEC_FILE),y)
12688 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12689 endif
12690@@ -263,3 +264,9 @@ define archhelp
12691 echo ' FDARGS="..." arguments for the booted kernel'
12692 echo ' FDINITRD=file initrd for the booted kernel'
12693 endef
12694+
12695+define OLD_LD
12696+
12697+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12698+*** Please upgrade your binutils to 2.18 or newer
12699+endef
12700diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12701index 3db07f3..9d81d0f 100644
12702--- a/arch/x86/boot/Makefile
12703+++ b/arch/x86/boot/Makefile
12704@@ -56,6 +56,9 @@ clean-files += cpustr.h
12705 # ---------------------------------------------------------------------------
12706
12707 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12708+ifdef CONSTIFY_PLUGIN
12709+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12710+endif
12711 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12712 GCOV_PROFILE := n
12713
12714diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12715index 878e4b9..20537ab 100644
12716--- a/arch/x86/boot/bitops.h
12717+++ b/arch/x86/boot/bitops.h
12718@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12719 u8 v;
12720 const u32 *p = (const u32 *)addr;
12721
12722- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12723+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12724 return v;
12725 }
12726
12727@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12728
12729 static inline void set_bit(int nr, void *addr)
12730 {
12731- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12732+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12733 }
12734
12735 #endif /* BOOT_BITOPS_H */
12736diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12737index bd49ec6..94c7f58 100644
12738--- a/arch/x86/boot/boot.h
12739+++ b/arch/x86/boot/boot.h
12740@@ -84,7 +84,7 @@ static inline void io_delay(void)
12741 static inline u16 ds(void)
12742 {
12743 u16 seg;
12744- asm("movw %%ds,%0" : "=rm" (seg));
12745+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12746 return seg;
12747 }
12748
12749diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12750index 8bd44e8..6b111e9 100644
12751--- a/arch/x86/boot/compressed/Makefile
12752+++ b/arch/x86/boot/compressed/Makefile
12753@@ -28,6 +28,9 @@ KBUILD_CFLAGS += $(cflags-y)
12754 KBUILD_CFLAGS += -mno-mmx -mno-sse
12755 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12756 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12757+ifdef CONSTIFY_PLUGIN
12758+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12759+endif
12760
12761 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12762 GCOV_PROFILE := n
12763diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12764index a53440e..c3dbf1e 100644
12765--- a/arch/x86/boot/compressed/efi_stub_32.S
12766+++ b/arch/x86/boot/compressed/efi_stub_32.S
12767@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12768 * parameter 2, ..., param n. To make things easy, we save the return
12769 * address of efi_call_phys in a global variable.
12770 */
12771- popl %ecx
12772- movl %ecx, saved_return_addr(%edx)
12773- /* get the function pointer into ECX*/
12774- popl %ecx
12775- movl %ecx, efi_rt_function_ptr(%edx)
12776+ popl saved_return_addr(%edx)
12777+ popl efi_rt_function_ptr(%edx)
12778
12779 /*
12780 * 3. Call the physical function.
12781 */
12782- call *%ecx
12783+ call *efi_rt_function_ptr(%edx)
12784
12785 /*
12786 * 4. Balance the stack. And because EAX contain the return value,
12787@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12788 1: popl %edx
12789 subl $1b, %edx
12790
12791- movl efi_rt_function_ptr(%edx), %ecx
12792- pushl %ecx
12793+ pushl efi_rt_function_ptr(%edx)
12794
12795 /*
12796 * 10. Push the saved return address onto the stack and return.
12797 */
12798- movl saved_return_addr(%edx), %ecx
12799- pushl %ecx
12800- ret
12801+ jmpl *saved_return_addr(%edx)
12802 ENDPROC(efi_call_phys)
12803 .previous
12804
12805diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12806index 630384a..278e788 100644
12807--- a/arch/x86/boot/compressed/efi_thunk_64.S
12808+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12809@@ -189,8 +189,8 @@ efi_gdt64:
12810 .long 0 /* Filled out by user */
12811 .word 0
12812 .quad 0x0000000000000000 /* NULL descriptor */
12813- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12814- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12815+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12816+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12817 .quad 0x0080890000000000 /* TS descriptor */
12818 .quad 0x0000000000000000 /* TS continued */
12819 efi_gdt64_end:
12820diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12821index 1d7fbbc..36ecd58 100644
12822--- a/arch/x86/boot/compressed/head_32.S
12823+++ b/arch/x86/boot/compressed/head_32.S
12824@@ -140,10 +140,10 @@ preferred_addr:
12825 addl %eax, %ebx
12826 notl %eax
12827 andl %eax, %ebx
12828- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12829+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12830 jge 1f
12831 #endif
12832- movl $LOAD_PHYSICAL_ADDR, %ebx
12833+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12834 1:
12835
12836 /* Target address to relocate to for decompression */
12837diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12838index 6b1766c..ad465c9 100644
12839--- a/arch/x86/boot/compressed/head_64.S
12840+++ b/arch/x86/boot/compressed/head_64.S
12841@@ -94,10 +94,10 @@ ENTRY(startup_32)
12842 addl %eax, %ebx
12843 notl %eax
12844 andl %eax, %ebx
12845- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12846+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12847 jge 1f
12848 #endif
12849- movl $LOAD_PHYSICAL_ADDR, %ebx
12850+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12851 1:
12852
12853 /* Target address to relocate to for decompression */
12854@@ -322,10 +322,10 @@ preferred_addr:
12855 addq %rax, %rbp
12856 notq %rax
12857 andq %rax, %rbp
12858- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12859+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12860 jge 1f
12861 #endif
12862- movq $LOAD_PHYSICAL_ADDR, %rbp
12863+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12864 1:
12865
12866 /* Target address to relocate to for decompression */
12867@@ -434,8 +434,8 @@ gdt:
12868 .long gdt
12869 .word 0
12870 .quad 0x0000000000000000 /* NULL descriptor */
12871- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12872- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12873+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12874+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12875 .quad 0x0080890000000000 /* TS descriptor */
12876 .quad 0x0000000000000000 /* TS continued */
12877 gdt_end:
12878diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12879index a950864..c710239 100644
12880--- a/arch/x86/boot/compressed/misc.c
12881+++ b/arch/x86/boot/compressed/misc.c
12882@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12883 * Calculate the delta between where vmlinux was linked to load
12884 * and where it was actually loaded.
12885 */
12886- delta = min_addr - LOAD_PHYSICAL_ADDR;
12887+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12888 if (!delta) {
12889 debug_putstr("No relocation needed... ");
12890 return;
12891@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12892 Elf32_Ehdr ehdr;
12893 Elf32_Phdr *phdrs, *phdr;
12894 #endif
12895- void *dest;
12896+ void *dest, *prev;
12897 int i;
12898
12899 memcpy(&ehdr, output, sizeof(ehdr));
12900@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12901 case PT_LOAD:
12902 #ifdef CONFIG_RELOCATABLE
12903 dest = output;
12904- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12905+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12906 #else
12907 dest = (void *)(phdr->p_paddr);
12908 #endif
12909 memcpy(dest,
12910 output + phdr->p_offset,
12911 phdr->p_filesz);
12912+ if (i)
12913+ memset(prev, 0xff, dest - prev);
12914+ prev = dest + phdr->p_filesz;
12915 break;
12916 default: /* Ignore other PT_* */ break;
12917 }
12918@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12919 error("Destination address too large");
12920 #endif
12921 #ifndef CONFIG_RELOCATABLE
12922- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12923+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12924 error("Wrong destination address");
12925 #endif
12926
12927diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12928index 1fd7d57..0f7d096 100644
12929--- a/arch/x86/boot/cpucheck.c
12930+++ b/arch/x86/boot/cpucheck.c
12931@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12932 u32 ecx = MSR_K7_HWCR;
12933 u32 eax, edx;
12934
12935- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12936+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12937 eax &= ~(1 << 15);
12938- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12939+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12940
12941 get_cpuflags(); /* Make sure it really did something */
12942 err = check_cpuflags();
12943@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12944 u32 ecx = MSR_VIA_FCR;
12945 u32 eax, edx;
12946
12947- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12948+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12949 eax |= (1<<1)|(1<<7);
12950- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12951+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12952
12953 set_bit(X86_FEATURE_CX8, cpu.flags);
12954 err = check_cpuflags();
12955@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12956 u32 eax, edx;
12957 u32 level = 1;
12958
12959- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12960- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12961- asm("cpuid"
12962+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12963+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12964+ asm volatile("cpuid"
12965 : "+a" (level), "=d" (cpu.flags[0])
12966 : : "ecx", "ebx");
12967- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12968+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12969
12970 err = check_cpuflags();
12971 } else if (err == 0x01 &&
12972diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12973index 16ef025..91e033b 100644
12974--- a/arch/x86/boot/header.S
12975+++ b/arch/x86/boot/header.S
12976@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12977 # single linked list of
12978 # struct setup_data
12979
12980-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12981+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12982
12983 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12984+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12985+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12986+#else
12987 #define VO_INIT_SIZE (VO__end - VO__text)
12988+#endif
12989 #if ZO_INIT_SIZE > VO_INIT_SIZE
12990 #define INIT_SIZE ZO_INIT_SIZE
12991 #else
12992diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12993index db75d07..8e6d0af 100644
12994--- a/arch/x86/boot/memory.c
12995+++ b/arch/x86/boot/memory.c
12996@@ -19,7 +19,7 @@
12997
12998 static int detect_memory_e820(void)
12999 {
13000- int count = 0;
13001+ unsigned int count = 0;
13002 struct biosregs ireg, oreg;
13003 struct e820entry *desc = boot_params.e820_map;
13004 static struct e820entry buf; /* static so it is zeroed */
13005diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13006index ba3e100..6501b8f 100644
13007--- a/arch/x86/boot/video-vesa.c
13008+++ b/arch/x86/boot/video-vesa.c
13009@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13010
13011 boot_params.screen_info.vesapm_seg = oreg.es;
13012 boot_params.screen_info.vesapm_off = oreg.di;
13013+ boot_params.screen_info.vesapm_size = oreg.cx;
13014 }
13015
13016 /*
13017diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13018index 43eda28..5ab5fdb 100644
13019--- a/arch/x86/boot/video.c
13020+++ b/arch/x86/boot/video.c
13021@@ -96,7 +96,7 @@ static void store_mode_params(void)
13022 static unsigned int get_entry(void)
13023 {
13024 char entry_buf[4];
13025- int i, len = 0;
13026+ unsigned int i, len = 0;
13027 int key;
13028 unsigned int v;
13029
13030diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13031index 9105655..41779c1 100644
13032--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13033+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13034@@ -8,6 +8,8 @@
13035 * including this sentence is retained in full.
13036 */
13037
13038+#include <asm/alternative-asm.h>
13039+
13040 .extern crypto_ft_tab
13041 .extern crypto_it_tab
13042 .extern crypto_fl_tab
13043@@ -70,6 +72,8 @@
13044 je B192; \
13045 leaq 32(r9),r9;
13046
13047+#define ret pax_force_retaddr; ret
13048+
13049 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13050 movq r1,r2; \
13051 movq r3,r4; \
13052diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13053index 477e9d7..c92c7d8 100644
13054--- a/arch/x86/crypto/aesni-intel_asm.S
13055+++ b/arch/x86/crypto/aesni-intel_asm.S
13056@@ -31,6 +31,7 @@
13057
13058 #include <linux/linkage.h>
13059 #include <asm/inst.h>
13060+#include <asm/alternative-asm.h>
13061
13062 #ifdef __x86_64__
13063 .data
13064@@ -205,7 +206,7 @@ enc: .octa 0x2
13065 * num_initial_blocks = b mod 4
13066 * encrypt the initial num_initial_blocks blocks and apply ghash on
13067 * the ciphertext
13068-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13069+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13070 * are clobbered
13071 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13072 */
13073@@ -214,8 +215,8 @@ enc: .octa 0x2
13074 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13075 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13076 mov arg7, %r10 # %r10 = AAD
13077- mov arg8, %r12 # %r12 = aadLen
13078- mov %r12, %r11
13079+ mov arg8, %r15 # %r15 = aadLen
13080+ mov %r15, %r11
13081 pxor %xmm\i, %xmm\i
13082 _get_AAD_loop\num_initial_blocks\operation:
13083 movd (%r10), \TMP1
13084@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13085 psrldq $4, %xmm\i
13086 pxor \TMP1, %xmm\i
13087 add $4, %r10
13088- sub $4, %r12
13089+ sub $4, %r15
13090 jne _get_AAD_loop\num_initial_blocks\operation
13091 cmp $16, %r11
13092 je _get_AAD_loop2_done\num_initial_blocks\operation
13093- mov $16, %r12
13094+ mov $16, %r15
13095 _get_AAD_loop2\num_initial_blocks\operation:
13096 psrldq $4, %xmm\i
13097- sub $4, %r12
13098- cmp %r11, %r12
13099+ sub $4, %r15
13100+ cmp %r11, %r15
13101 jne _get_AAD_loop2\num_initial_blocks\operation
13102 _get_AAD_loop2_done\num_initial_blocks\operation:
13103 movdqa SHUF_MASK(%rip), %xmm14
13104@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13105 * num_initial_blocks = b mod 4
13106 * encrypt the initial num_initial_blocks blocks and apply ghash on
13107 * the ciphertext
13108-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13109+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13110 * are clobbered
13111 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13112 */
13113@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13114 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13115 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13116 mov arg7, %r10 # %r10 = AAD
13117- mov arg8, %r12 # %r12 = aadLen
13118- mov %r12, %r11
13119+ mov arg8, %r15 # %r15 = aadLen
13120+ mov %r15, %r11
13121 pxor %xmm\i, %xmm\i
13122 _get_AAD_loop\num_initial_blocks\operation:
13123 movd (%r10), \TMP1
13124@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13125 psrldq $4, %xmm\i
13126 pxor \TMP1, %xmm\i
13127 add $4, %r10
13128- sub $4, %r12
13129+ sub $4, %r15
13130 jne _get_AAD_loop\num_initial_blocks\operation
13131 cmp $16, %r11
13132 je _get_AAD_loop2_done\num_initial_blocks\operation
13133- mov $16, %r12
13134+ mov $16, %r15
13135 _get_AAD_loop2\num_initial_blocks\operation:
13136 psrldq $4, %xmm\i
13137- sub $4, %r12
13138- cmp %r11, %r12
13139+ sub $4, %r15
13140+ cmp %r11, %r15
13141 jne _get_AAD_loop2\num_initial_blocks\operation
13142 _get_AAD_loop2_done\num_initial_blocks\operation:
13143 movdqa SHUF_MASK(%rip), %xmm14
13144@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13145 *
13146 *****************************************************************************/
13147 ENTRY(aesni_gcm_dec)
13148- push %r12
13149+ push %r15
13150 push %r13
13151 push %r14
13152 mov %rsp, %r14
13153@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13154 */
13155 sub $VARIABLE_OFFSET, %rsp
13156 and $~63, %rsp # align rsp to 64 bytes
13157- mov %arg6, %r12
13158- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13159+ mov %arg6, %r15
13160+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13161 movdqa SHUF_MASK(%rip), %xmm2
13162 PSHUFB_XMM %xmm2, %xmm13
13163
13164@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13165 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13166 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13167 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13168- mov %r13, %r12
13169- and $(3<<4), %r12
13170+ mov %r13, %r15
13171+ and $(3<<4), %r15
13172 jz _initial_num_blocks_is_0_decrypt
13173- cmp $(2<<4), %r12
13174+ cmp $(2<<4), %r15
13175 jb _initial_num_blocks_is_1_decrypt
13176 je _initial_num_blocks_is_2_decrypt
13177 _initial_num_blocks_is_3_decrypt:
13178@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13179 sub $16, %r11
13180 add %r13, %r11
13181 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13182- lea SHIFT_MASK+16(%rip), %r12
13183- sub %r13, %r12
13184+ lea SHIFT_MASK+16(%rip), %r15
13185+ sub %r13, %r15
13186 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13187 # (%r13 is the number of bytes in plaintext mod 16)
13188- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13189+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13190 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13191
13192 movdqa %xmm1, %xmm2
13193 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13194- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13195+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13196 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13197 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13198 pand %xmm1, %xmm2
13199@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13200 sub $1, %r13
13201 jne _less_than_8_bytes_left_decrypt
13202 _multiple_of_16_bytes_decrypt:
13203- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13204- shl $3, %r12 # convert into number of bits
13205- movd %r12d, %xmm15 # len(A) in %xmm15
13206+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13207+ shl $3, %r15 # convert into number of bits
13208+ movd %r15d, %xmm15 # len(A) in %xmm15
13209 shl $3, %arg4 # len(C) in bits (*128)
13210 MOVQ_R64_XMM %arg4, %xmm1
13211 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13212@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13213 mov %r14, %rsp
13214 pop %r14
13215 pop %r13
13216- pop %r12
13217+ pop %r15
13218+ pax_force_retaddr
13219 ret
13220 ENDPROC(aesni_gcm_dec)
13221
13222@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13223 * poly = x^128 + x^127 + x^126 + x^121 + 1
13224 ***************************************************************************/
13225 ENTRY(aesni_gcm_enc)
13226- push %r12
13227+ push %r15
13228 push %r13
13229 push %r14
13230 mov %rsp, %r14
13231@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13232 #
13233 sub $VARIABLE_OFFSET, %rsp
13234 and $~63, %rsp
13235- mov %arg6, %r12
13236- movdqu (%r12), %xmm13
13237+ mov %arg6, %r15
13238+ movdqu (%r15), %xmm13
13239 movdqa SHUF_MASK(%rip), %xmm2
13240 PSHUFB_XMM %xmm2, %xmm13
13241
13242@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13243 movdqa %xmm13, HashKey(%rsp)
13244 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13245 and $-16, %r13
13246- mov %r13, %r12
13247+ mov %r13, %r15
13248
13249 # Encrypt first few blocks
13250
13251- and $(3<<4), %r12
13252+ and $(3<<4), %r15
13253 jz _initial_num_blocks_is_0_encrypt
13254- cmp $(2<<4), %r12
13255+ cmp $(2<<4), %r15
13256 jb _initial_num_blocks_is_1_encrypt
13257 je _initial_num_blocks_is_2_encrypt
13258 _initial_num_blocks_is_3_encrypt:
13259@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13260 sub $16, %r11
13261 add %r13, %r11
13262 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13263- lea SHIFT_MASK+16(%rip), %r12
13264- sub %r13, %r12
13265+ lea SHIFT_MASK+16(%rip), %r15
13266+ sub %r13, %r15
13267 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13268 # (%r13 is the number of bytes in plaintext mod 16)
13269- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13270+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13271 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13272 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13273- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13274+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13275 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13276 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13277 movdqa SHUF_MASK(%rip), %xmm10
13278@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13279 sub $1, %r13
13280 jne _less_than_8_bytes_left_encrypt
13281 _multiple_of_16_bytes_encrypt:
13282- mov arg8, %r12 # %r12 = addLen (number of bytes)
13283- shl $3, %r12
13284- movd %r12d, %xmm15 # len(A) in %xmm15
13285+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13286+ shl $3, %r15
13287+ movd %r15d, %xmm15 # len(A) in %xmm15
13288 shl $3, %arg4 # len(C) in bits (*128)
13289 MOVQ_R64_XMM %arg4, %xmm1
13290 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13291@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13292 mov %r14, %rsp
13293 pop %r14
13294 pop %r13
13295- pop %r12
13296+ pop %r15
13297+ pax_force_retaddr
13298 ret
13299 ENDPROC(aesni_gcm_enc)
13300
13301@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13302 pxor %xmm1, %xmm0
13303 movaps %xmm0, (TKEYP)
13304 add $0x10, TKEYP
13305+ pax_force_retaddr
13306 ret
13307 ENDPROC(_key_expansion_128)
13308 ENDPROC(_key_expansion_256a)
13309@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13310 shufps $0b01001110, %xmm2, %xmm1
13311 movaps %xmm1, 0x10(TKEYP)
13312 add $0x20, TKEYP
13313+ pax_force_retaddr
13314 ret
13315 ENDPROC(_key_expansion_192a)
13316
13317@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13318
13319 movaps %xmm0, (TKEYP)
13320 add $0x10, TKEYP
13321+ pax_force_retaddr
13322 ret
13323 ENDPROC(_key_expansion_192b)
13324
13325@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13326 pxor %xmm1, %xmm2
13327 movaps %xmm2, (TKEYP)
13328 add $0x10, TKEYP
13329+ pax_force_retaddr
13330 ret
13331 ENDPROC(_key_expansion_256b)
13332
13333@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13334 #ifndef __x86_64__
13335 popl KEYP
13336 #endif
13337+ pax_force_retaddr
13338 ret
13339 ENDPROC(aesni_set_key)
13340
13341@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13342 popl KLEN
13343 popl KEYP
13344 #endif
13345+ pax_force_retaddr
13346 ret
13347 ENDPROC(aesni_enc)
13348
13349@@ -1974,6 +1983,7 @@ _aesni_enc1:
13350 AESENC KEY STATE
13351 movaps 0x70(TKEYP), KEY
13352 AESENCLAST KEY STATE
13353+ pax_force_retaddr
13354 ret
13355 ENDPROC(_aesni_enc1)
13356
13357@@ -2083,6 +2093,7 @@ _aesni_enc4:
13358 AESENCLAST KEY STATE2
13359 AESENCLAST KEY STATE3
13360 AESENCLAST KEY STATE4
13361+ pax_force_retaddr
13362 ret
13363 ENDPROC(_aesni_enc4)
13364
13365@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13366 popl KLEN
13367 popl KEYP
13368 #endif
13369+ pax_force_retaddr
13370 ret
13371 ENDPROC(aesni_dec)
13372
13373@@ -2164,6 +2176,7 @@ _aesni_dec1:
13374 AESDEC KEY STATE
13375 movaps 0x70(TKEYP), KEY
13376 AESDECLAST KEY STATE
13377+ pax_force_retaddr
13378 ret
13379 ENDPROC(_aesni_dec1)
13380
13381@@ -2273,6 +2286,7 @@ _aesni_dec4:
13382 AESDECLAST KEY STATE2
13383 AESDECLAST KEY STATE3
13384 AESDECLAST KEY STATE4
13385+ pax_force_retaddr
13386 ret
13387 ENDPROC(_aesni_dec4)
13388
13389@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13390 popl KEYP
13391 popl LEN
13392 #endif
13393+ pax_force_retaddr
13394 ret
13395 ENDPROC(aesni_ecb_enc)
13396
13397@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13398 popl KEYP
13399 popl LEN
13400 #endif
13401+ pax_force_retaddr
13402 ret
13403 ENDPROC(aesni_ecb_dec)
13404
13405@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13406 popl LEN
13407 popl IVP
13408 #endif
13409+ pax_force_retaddr
13410 ret
13411 ENDPROC(aesni_cbc_enc)
13412
13413@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13414 popl LEN
13415 popl IVP
13416 #endif
13417+ pax_force_retaddr
13418 ret
13419 ENDPROC(aesni_cbc_dec)
13420
13421@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13422 mov $1, TCTR_LOW
13423 MOVQ_R64_XMM TCTR_LOW INC
13424 MOVQ_R64_XMM CTR TCTR_LOW
13425+ pax_force_retaddr
13426 ret
13427 ENDPROC(_aesni_inc_init)
13428
13429@@ -2579,6 +2598,7 @@ _aesni_inc:
13430 .Linc_low:
13431 movaps CTR, IV
13432 PSHUFB_XMM BSWAP_MASK IV
13433+ pax_force_retaddr
13434 ret
13435 ENDPROC(_aesni_inc)
13436
13437@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13438 .Lctr_enc_ret:
13439 movups IV, (IVP)
13440 .Lctr_enc_just_ret:
13441+ pax_force_retaddr
13442 ret
13443 ENDPROC(aesni_ctr_enc)
13444
13445@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13446 pxor INC, STATE4
13447 movdqu STATE4, 0x70(OUTP)
13448
13449+ pax_force_retaddr
13450 ret
13451 ENDPROC(aesni_xts_crypt8)
13452
13453diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13454index 246c670..466e2d6 100644
13455--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13456+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13457@@ -21,6 +21,7 @@
13458 */
13459
13460 #include <linux/linkage.h>
13461+#include <asm/alternative-asm.h>
13462
13463 .file "blowfish-x86_64-asm.S"
13464 .text
13465@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13466 jnz .L__enc_xor;
13467
13468 write_block();
13469+ pax_force_retaddr
13470 ret;
13471 .L__enc_xor:
13472 xor_block();
13473+ pax_force_retaddr
13474 ret;
13475 ENDPROC(__blowfish_enc_blk)
13476
13477@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13478
13479 movq %r11, %rbp;
13480
13481+ pax_force_retaddr
13482 ret;
13483 ENDPROC(blowfish_dec_blk)
13484
13485@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13486
13487 popq %rbx;
13488 popq %rbp;
13489+ pax_force_retaddr
13490 ret;
13491
13492 .L__enc_xor4:
13493@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13494
13495 popq %rbx;
13496 popq %rbp;
13497+ pax_force_retaddr
13498 ret;
13499 ENDPROC(__blowfish_enc_blk_4way)
13500
13501@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13502 popq %rbx;
13503 popq %rbp;
13504
13505+ pax_force_retaddr
13506 ret;
13507 ENDPROC(blowfish_dec_blk_4way)
13508diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13509index ce71f92..1dce7ec 100644
13510--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13511+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13512@@ -16,6 +16,7 @@
13513 */
13514
13515 #include <linux/linkage.h>
13516+#include <asm/alternative-asm.h>
13517
13518 #define CAMELLIA_TABLE_BYTE_LEN 272
13519
13520@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13521 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13522 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13523 %rcx, (%r9));
13524+ pax_force_retaddr
13525 ret;
13526 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13527
13528@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13529 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13530 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13531 %rax, (%r9));
13532+ pax_force_retaddr
13533 ret;
13534 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13535
13536@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13537 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13538 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13539
13540+ pax_force_retaddr
13541 ret;
13542
13543 .align 8
13544@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13545 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13546 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13547
13548+ pax_force_retaddr
13549 ret;
13550
13551 .align 8
13552@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13553 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13554 %xmm8, %rsi);
13555
13556+ pax_force_retaddr
13557 ret;
13558 ENDPROC(camellia_ecb_enc_16way)
13559
13560@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13561 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13562 %xmm8, %rsi);
13563
13564+ pax_force_retaddr
13565 ret;
13566 ENDPROC(camellia_ecb_dec_16way)
13567
13568@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13569 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13570 %xmm8, %rsi);
13571
13572+ pax_force_retaddr
13573 ret;
13574 ENDPROC(camellia_cbc_dec_16way)
13575
13576@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13577 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13578 %xmm8, %rsi);
13579
13580+ pax_force_retaddr
13581 ret;
13582 ENDPROC(camellia_ctr_16way)
13583
13584@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13585 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13586 %xmm8, %rsi);
13587
13588+ pax_force_retaddr
13589 ret;
13590 ENDPROC(camellia_xts_crypt_16way)
13591
13592diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13593index 0e0b886..5a3123c 100644
13594--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13595+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13596@@ -11,6 +11,7 @@
13597 */
13598
13599 #include <linux/linkage.h>
13600+#include <asm/alternative-asm.h>
13601
13602 #define CAMELLIA_TABLE_BYTE_LEN 272
13603
13604@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13605 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13606 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13607 %rcx, (%r9));
13608+ pax_force_retaddr
13609 ret;
13610 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13611
13612@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13613 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13614 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13615 %rax, (%r9));
13616+ pax_force_retaddr
13617 ret;
13618 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13619
13620@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13621 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13622 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13623
13624+ pax_force_retaddr
13625 ret;
13626
13627 .align 8
13628@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13629 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13630 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13631
13632+ pax_force_retaddr
13633 ret;
13634
13635 .align 8
13636@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13637
13638 vzeroupper;
13639
13640+ pax_force_retaddr
13641 ret;
13642 ENDPROC(camellia_ecb_enc_32way)
13643
13644@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13645
13646 vzeroupper;
13647
13648+ pax_force_retaddr
13649 ret;
13650 ENDPROC(camellia_ecb_dec_32way)
13651
13652@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13653
13654 vzeroupper;
13655
13656+ pax_force_retaddr
13657 ret;
13658 ENDPROC(camellia_cbc_dec_32way)
13659
13660@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13661
13662 vzeroupper;
13663
13664+ pax_force_retaddr
13665 ret;
13666 ENDPROC(camellia_ctr_32way)
13667
13668@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13669
13670 vzeroupper;
13671
13672+ pax_force_retaddr
13673 ret;
13674 ENDPROC(camellia_xts_crypt_32way)
13675
13676diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13677index 310319c..db3d7b5 100644
13678--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13679+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13680@@ -21,6 +21,7 @@
13681 */
13682
13683 #include <linux/linkage.h>
13684+#include <asm/alternative-asm.h>
13685
13686 .file "camellia-x86_64-asm_64.S"
13687 .text
13688@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13689 enc_outunpack(mov, RT1);
13690
13691 movq RRBP, %rbp;
13692+ pax_force_retaddr
13693 ret;
13694
13695 .L__enc_xor:
13696 enc_outunpack(xor, RT1);
13697
13698 movq RRBP, %rbp;
13699+ pax_force_retaddr
13700 ret;
13701 ENDPROC(__camellia_enc_blk)
13702
13703@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13704 dec_outunpack();
13705
13706 movq RRBP, %rbp;
13707+ pax_force_retaddr
13708 ret;
13709 ENDPROC(camellia_dec_blk)
13710
13711@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13712
13713 movq RRBP, %rbp;
13714 popq %rbx;
13715+ pax_force_retaddr
13716 ret;
13717
13718 .L__enc2_xor:
13719@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13720
13721 movq RRBP, %rbp;
13722 popq %rbx;
13723+ pax_force_retaddr
13724 ret;
13725 ENDPROC(__camellia_enc_blk_2way)
13726
13727@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13728
13729 movq RRBP, %rbp;
13730 movq RXOR, %rbx;
13731+ pax_force_retaddr
13732 ret;
13733 ENDPROC(camellia_dec_blk_2way)
13734diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13735index c35fd5d..2d8c7db 100644
13736--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13737+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13738@@ -24,6 +24,7 @@
13739 */
13740
13741 #include <linux/linkage.h>
13742+#include <asm/alternative-asm.h>
13743
13744 .file "cast5-avx-x86_64-asm_64.S"
13745
13746@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13747 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13748 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13749
13750+ pax_force_retaddr
13751 ret;
13752 ENDPROC(__cast5_enc_blk16)
13753
13754@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13755 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13756 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13757
13758+ pax_force_retaddr
13759 ret;
13760
13761 .L__skip_dec:
13762@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13763 vmovdqu RR4, (6*4*4)(%r11);
13764 vmovdqu RL4, (7*4*4)(%r11);
13765
13766+ pax_force_retaddr
13767 ret;
13768 ENDPROC(cast5_ecb_enc_16way)
13769
13770@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13771 vmovdqu RR4, (6*4*4)(%r11);
13772 vmovdqu RL4, (7*4*4)(%r11);
13773
13774+ pax_force_retaddr
13775 ret;
13776 ENDPROC(cast5_ecb_dec_16way)
13777
13778@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13779 * %rdx: src
13780 */
13781
13782- pushq %r12;
13783+ pushq %r14;
13784
13785 movq %rsi, %r11;
13786- movq %rdx, %r12;
13787+ movq %rdx, %r14;
13788
13789 vmovdqu (0*16)(%rdx), RL1;
13790 vmovdqu (1*16)(%rdx), RR1;
13791@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13792 call __cast5_dec_blk16;
13793
13794 /* xor with src */
13795- vmovq (%r12), RX;
13796+ vmovq (%r14), RX;
13797 vpshufd $0x4f, RX, RX;
13798 vpxor RX, RR1, RR1;
13799- vpxor 0*16+8(%r12), RL1, RL1;
13800- vpxor 1*16+8(%r12), RR2, RR2;
13801- vpxor 2*16+8(%r12), RL2, RL2;
13802- vpxor 3*16+8(%r12), RR3, RR3;
13803- vpxor 4*16+8(%r12), RL3, RL3;
13804- vpxor 5*16+8(%r12), RR4, RR4;
13805- vpxor 6*16+8(%r12), RL4, RL4;
13806+ vpxor 0*16+8(%r14), RL1, RL1;
13807+ vpxor 1*16+8(%r14), RR2, RR2;
13808+ vpxor 2*16+8(%r14), RL2, RL2;
13809+ vpxor 3*16+8(%r14), RR3, RR3;
13810+ vpxor 4*16+8(%r14), RL3, RL3;
13811+ vpxor 5*16+8(%r14), RR4, RR4;
13812+ vpxor 6*16+8(%r14), RL4, RL4;
13813
13814 vmovdqu RR1, (0*16)(%r11);
13815 vmovdqu RL1, (1*16)(%r11);
13816@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13817 vmovdqu RR4, (6*16)(%r11);
13818 vmovdqu RL4, (7*16)(%r11);
13819
13820- popq %r12;
13821+ popq %r14;
13822
13823+ pax_force_retaddr
13824 ret;
13825 ENDPROC(cast5_cbc_dec_16way)
13826
13827@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13828 * %rcx: iv (big endian, 64bit)
13829 */
13830
13831- pushq %r12;
13832+ pushq %r14;
13833
13834 movq %rsi, %r11;
13835- movq %rdx, %r12;
13836+ movq %rdx, %r14;
13837
13838 vpcmpeqd RTMP, RTMP, RTMP;
13839 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13840@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13841 call __cast5_enc_blk16;
13842
13843 /* dst = src ^ iv */
13844- vpxor (0*16)(%r12), RR1, RR1;
13845- vpxor (1*16)(%r12), RL1, RL1;
13846- vpxor (2*16)(%r12), RR2, RR2;
13847- vpxor (3*16)(%r12), RL2, RL2;
13848- vpxor (4*16)(%r12), RR3, RR3;
13849- vpxor (5*16)(%r12), RL3, RL3;
13850- vpxor (6*16)(%r12), RR4, RR4;
13851- vpxor (7*16)(%r12), RL4, RL4;
13852+ vpxor (0*16)(%r14), RR1, RR1;
13853+ vpxor (1*16)(%r14), RL1, RL1;
13854+ vpxor (2*16)(%r14), RR2, RR2;
13855+ vpxor (3*16)(%r14), RL2, RL2;
13856+ vpxor (4*16)(%r14), RR3, RR3;
13857+ vpxor (5*16)(%r14), RL3, RL3;
13858+ vpxor (6*16)(%r14), RR4, RR4;
13859+ vpxor (7*16)(%r14), RL4, RL4;
13860 vmovdqu RR1, (0*16)(%r11);
13861 vmovdqu RL1, (1*16)(%r11);
13862 vmovdqu RR2, (2*16)(%r11);
13863@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13864 vmovdqu RR4, (6*16)(%r11);
13865 vmovdqu RL4, (7*16)(%r11);
13866
13867- popq %r12;
13868+ popq %r14;
13869
13870+ pax_force_retaddr
13871 ret;
13872 ENDPROC(cast5_ctr_16way)
13873diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13874index e3531f8..e123f35 100644
13875--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13876+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13877@@ -24,6 +24,7 @@
13878 */
13879
13880 #include <linux/linkage.h>
13881+#include <asm/alternative-asm.h>
13882 #include "glue_helper-asm-avx.S"
13883
13884 .file "cast6-avx-x86_64-asm_64.S"
13885@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13886 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13887 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13888
13889+ pax_force_retaddr
13890 ret;
13891 ENDPROC(__cast6_enc_blk8)
13892
13893@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13894 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13895 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13896
13897+ pax_force_retaddr
13898 ret;
13899 ENDPROC(__cast6_dec_blk8)
13900
13901@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13902
13903 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13904
13905+ pax_force_retaddr
13906 ret;
13907 ENDPROC(cast6_ecb_enc_8way)
13908
13909@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13910
13911 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13912
13913+ pax_force_retaddr
13914 ret;
13915 ENDPROC(cast6_ecb_dec_8way)
13916
13917@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13918 * %rdx: src
13919 */
13920
13921- pushq %r12;
13922+ pushq %r14;
13923
13924 movq %rsi, %r11;
13925- movq %rdx, %r12;
13926+ movq %rdx, %r14;
13927
13928 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13929
13930 call __cast6_dec_blk8;
13931
13932- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13933+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13934
13935- popq %r12;
13936+ popq %r14;
13937
13938+ pax_force_retaddr
13939 ret;
13940 ENDPROC(cast6_cbc_dec_8way)
13941
13942@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13943 * %rcx: iv (little endian, 128bit)
13944 */
13945
13946- pushq %r12;
13947+ pushq %r14;
13948
13949 movq %rsi, %r11;
13950- movq %rdx, %r12;
13951+ movq %rdx, %r14;
13952
13953 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13954 RD2, RX, RKR, RKM);
13955
13956 call __cast6_enc_blk8;
13957
13958- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13959+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13960
13961- popq %r12;
13962+ popq %r14;
13963
13964+ pax_force_retaddr
13965 ret;
13966 ENDPROC(cast6_ctr_8way)
13967
13968@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13969 /* dst <= regs xor IVs(in dst) */
13970 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13971
13972+ pax_force_retaddr
13973 ret;
13974 ENDPROC(cast6_xts_enc_8way)
13975
13976@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13977 /* dst <= regs xor IVs(in dst) */
13978 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13979
13980+ pax_force_retaddr
13981 ret;
13982 ENDPROC(cast6_xts_dec_8way)
13983diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13984index 26d49eb..c0a8c84 100644
13985--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13986+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13987@@ -45,6 +45,7 @@
13988
13989 #include <asm/inst.h>
13990 #include <linux/linkage.h>
13991+#include <asm/alternative-asm.h>
13992
13993 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13994
13995@@ -309,6 +310,7 @@ do_return:
13996 popq %rsi
13997 popq %rdi
13998 popq %rbx
13999+ pax_force_retaddr
14000 ret
14001
14002 ################################################################
14003diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14004index 5d1e007..098cb4f 100644
14005--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14006+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14007@@ -18,6 +18,7 @@
14008
14009 #include <linux/linkage.h>
14010 #include <asm/inst.h>
14011+#include <asm/alternative-asm.h>
14012
14013 .data
14014
14015@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14016 psrlq $1, T2
14017 pxor T2, T1
14018 pxor T1, DATA
14019+ pax_force_retaddr
14020 ret
14021 ENDPROC(__clmul_gf128mul_ble)
14022
14023@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14024 call __clmul_gf128mul_ble
14025 PSHUFB_XMM BSWAP DATA
14026 movups DATA, (%rdi)
14027+ pax_force_retaddr
14028 ret
14029 ENDPROC(clmul_ghash_mul)
14030
14031@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14032 PSHUFB_XMM BSWAP DATA
14033 movups DATA, (%rdi)
14034 .Lupdate_just_ret:
14035+ pax_force_retaddr
14036 ret
14037 ENDPROC(clmul_ghash_update)
14038diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14039index 9279e0b..c4b3d2c 100644
14040--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14041+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14042@@ -1,4 +1,5 @@
14043 #include <linux/linkage.h>
14044+#include <asm/alternative-asm.h>
14045
14046 # enter salsa20_encrypt_bytes
14047 ENTRY(salsa20_encrypt_bytes)
14048@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14049 add %r11,%rsp
14050 mov %rdi,%rax
14051 mov %rsi,%rdx
14052+ pax_force_retaddr
14053 ret
14054 # bytesatleast65:
14055 ._bytesatleast65:
14056@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14057 add %r11,%rsp
14058 mov %rdi,%rax
14059 mov %rsi,%rdx
14060+ pax_force_retaddr
14061 ret
14062 ENDPROC(salsa20_keysetup)
14063
14064@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14065 add %r11,%rsp
14066 mov %rdi,%rax
14067 mov %rsi,%rdx
14068+ pax_force_retaddr
14069 ret
14070 ENDPROC(salsa20_ivsetup)
14071diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14072index 2f202f4..d9164d6 100644
14073--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14074+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14075@@ -24,6 +24,7 @@
14076 */
14077
14078 #include <linux/linkage.h>
14079+#include <asm/alternative-asm.h>
14080 #include "glue_helper-asm-avx.S"
14081
14082 .file "serpent-avx-x86_64-asm_64.S"
14083@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14084 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14085 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14086
14087+ pax_force_retaddr
14088 ret;
14089 ENDPROC(__serpent_enc_blk8_avx)
14090
14091@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14092 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14093 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14094
14095+ pax_force_retaddr
14096 ret;
14097 ENDPROC(__serpent_dec_blk8_avx)
14098
14099@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14100
14101 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14102
14103+ pax_force_retaddr
14104 ret;
14105 ENDPROC(serpent_ecb_enc_8way_avx)
14106
14107@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14108
14109 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14110
14111+ pax_force_retaddr
14112 ret;
14113 ENDPROC(serpent_ecb_dec_8way_avx)
14114
14115@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14116
14117 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14118
14119+ pax_force_retaddr
14120 ret;
14121 ENDPROC(serpent_cbc_dec_8way_avx)
14122
14123@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14124
14125 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14126
14127+ pax_force_retaddr
14128 ret;
14129 ENDPROC(serpent_ctr_8way_avx)
14130
14131@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14132 /* dst <= regs xor IVs(in dst) */
14133 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14134
14135+ pax_force_retaddr
14136 ret;
14137 ENDPROC(serpent_xts_enc_8way_avx)
14138
14139@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14140 /* dst <= regs xor IVs(in dst) */
14141 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14142
14143+ pax_force_retaddr
14144 ret;
14145 ENDPROC(serpent_xts_dec_8way_avx)
14146diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14147index b222085..abd483c 100644
14148--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14149+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14150@@ -15,6 +15,7 @@
14151 */
14152
14153 #include <linux/linkage.h>
14154+#include <asm/alternative-asm.h>
14155 #include "glue_helper-asm-avx2.S"
14156
14157 .file "serpent-avx2-asm_64.S"
14158@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14159 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14160 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14161
14162+ pax_force_retaddr
14163 ret;
14164 ENDPROC(__serpent_enc_blk16)
14165
14166@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14167 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14168 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14169
14170+ pax_force_retaddr
14171 ret;
14172 ENDPROC(__serpent_dec_blk16)
14173
14174@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14175
14176 vzeroupper;
14177
14178+ pax_force_retaddr
14179 ret;
14180 ENDPROC(serpent_ecb_enc_16way)
14181
14182@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14183
14184 vzeroupper;
14185
14186+ pax_force_retaddr
14187 ret;
14188 ENDPROC(serpent_ecb_dec_16way)
14189
14190@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14191
14192 vzeroupper;
14193
14194+ pax_force_retaddr
14195 ret;
14196 ENDPROC(serpent_cbc_dec_16way)
14197
14198@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14199
14200 vzeroupper;
14201
14202+ pax_force_retaddr
14203 ret;
14204 ENDPROC(serpent_ctr_16way)
14205
14206@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14207
14208 vzeroupper;
14209
14210+ pax_force_retaddr
14211 ret;
14212 ENDPROC(serpent_xts_enc_16way)
14213
14214@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14215
14216 vzeroupper;
14217
14218+ pax_force_retaddr
14219 ret;
14220 ENDPROC(serpent_xts_dec_16way)
14221diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14222index acc066c..1559cc4 100644
14223--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14224+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14225@@ -25,6 +25,7 @@
14226 */
14227
14228 #include <linux/linkage.h>
14229+#include <asm/alternative-asm.h>
14230
14231 .file "serpent-sse2-x86_64-asm_64.S"
14232 .text
14233@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14234 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14235 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14236
14237+ pax_force_retaddr
14238 ret;
14239
14240 .L__enc_xor8:
14241 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14242 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14243
14244+ pax_force_retaddr
14245 ret;
14246 ENDPROC(__serpent_enc_blk_8way)
14247
14248@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14249 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14250 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14251
14252+ pax_force_retaddr
14253 ret;
14254 ENDPROC(serpent_dec_blk_8way)
14255diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14256index a410950..9dfe7ad 100644
14257--- a/arch/x86/crypto/sha1_ssse3_asm.S
14258+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14259@@ -29,6 +29,7 @@
14260 */
14261
14262 #include <linux/linkage.h>
14263+#include <asm/alternative-asm.h>
14264
14265 #define CTX %rdi // arg1
14266 #define BUF %rsi // arg2
14267@@ -75,9 +76,9 @@
14268
14269 push %rbx
14270 push %rbp
14271- push %r12
14272+ push %r14
14273
14274- mov %rsp, %r12
14275+ mov %rsp, %r14
14276 sub $64, %rsp # allocate workspace
14277 and $~15, %rsp # align stack
14278
14279@@ -99,11 +100,12 @@
14280 xor %rax, %rax
14281 rep stosq
14282
14283- mov %r12, %rsp # deallocate workspace
14284+ mov %r14, %rsp # deallocate workspace
14285
14286- pop %r12
14287+ pop %r14
14288 pop %rbp
14289 pop %rbx
14290+ pax_force_retaddr
14291 ret
14292
14293 ENDPROC(\name)
14294diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14295index 642f156..51a513c 100644
14296--- a/arch/x86/crypto/sha256-avx-asm.S
14297+++ b/arch/x86/crypto/sha256-avx-asm.S
14298@@ -49,6 +49,7 @@
14299
14300 #ifdef CONFIG_AS_AVX
14301 #include <linux/linkage.h>
14302+#include <asm/alternative-asm.h>
14303
14304 ## assume buffers not aligned
14305 #define VMOVDQ vmovdqu
14306@@ -460,6 +461,7 @@ done_hash:
14307 popq %r13
14308 popq %rbp
14309 popq %rbx
14310+ pax_force_retaddr
14311 ret
14312 ENDPROC(sha256_transform_avx)
14313
14314diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14315index 9e86944..3795e6a 100644
14316--- a/arch/x86/crypto/sha256-avx2-asm.S
14317+++ b/arch/x86/crypto/sha256-avx2-asm.S
14318@@ -50,6 +50,7 @@
14319
14320 #ifdef CONFIG_AS_AVX2
14321 #include <linux/linkage.h>
14322+#include <asm/alternative-asm.h>
14323
14324 ## assume buffers not aligned
14325 #define VMOVDQ vmovdqu
14326@@ -720,6 +721,7 @@ done_hash:
14327 popq %r12
14328 popq %rbp
14329 popq %rbx
14330+ pax_force_retaddr
14331 ret
14332 ENDPROC(sha256_transform_rorx)
14333
14334diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14335index f833b74..8c62a9e 100644
14336--- a/arch/x86/crypto/sha256-ssse3-asm.S
14337+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14338@@ -47,6 +47,7 @@
14339 ########################################################################
14340
14341 #include <linux/linkage.h>
14342+#include <asm/alternative-asm.h>
14343
14344 ## assume buffers not aligned
14345 #define MOVDQ movdqu
14346@@ -471,6 +472,7 @@ done_hash:
14347 popq %rbp
14348 popq %rbx
14349
14350+ pax_force_retaddr
14351 ret
14352 ENDPROC(sha256_transform_ssse3)
14353
14354diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14355index 974dde9..a823ff9 100644
14356--- a/arch/x86/crypto/sha512-avx-asm.S
14357+++ b/arch/x86/crypto/sha512-avx-asm.S
14358@@ -49,6 +49,7 @@
14359
14360 #ifdef CONFIG_AS_AVX
14361 #include <linux/linkage.h>
14362+#include <asm/alternative-asm.h>
14363
14364 .text
14365
14366@@ -364,6 +365,7 @@ updateblock:
14367 mov frame_RSPSAVE(%rsp), %rsp
14368
14369 nowork:
14370+ pax_force_retaddr
14371 ret
14372 ENDPROC(sha512_transform_avx)
14373
14374diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14375index 568b961..ed20c37 100644
14376--- a/arch/x86/crypto/sha512-avx2-asm.S
14377+++ b/arch/x86/crypto/sha512-avx2-asm.S
14378@@ -51,6 +51,7 @@
14379
14380 #ifdef CONFIG_AS_AVX2
14381 #include <linux/linkage.h>
14382+#include <asm/alternative-asm.h>
14383
14384 .text
14385
14386@@ -678,6 +679,7 @@ done_hash:
14387
14388 # Restore Stack Pointer
14389 mov frame_RSPSAVE(%rsp), %rsp
14390+ pax_force_retaddr
14391 ret
14392 ENDPROC(sha512_transform_rorx)
14393
14394diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14395index fb56855..6edd768 100644
14396--- a/arch/x86/crypto/sha512-ssse3-asm.S
14397+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14398@@ -48,6 +48,7 @@
14399 ########################################################################
14400
14401 #include <linux/linkage.h>
14402+#include <asm/alternative-asm.h>
14403
14404 .text
14405
14406@@ -363,6 +364,7 @@ updateblock:
14407 mov frame_RSPSAVE(%rsp), %rsp
14408
14409 nowork:
14410+ pax_force_retaddr
14411 ret
14412 ENDPROC(sha512_transform_ssse3)
14413
14414diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14415index 0505813..b067311 100644
14416--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14417+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14418@@ -24,6 +24,7 @@
14419 */
14420
14421 #include <linux/linkage.h>
14422+#include <asm/alternative-asm.h>
14423 #include "glue_helper-asm-avx.S"
14424
14425 .file "twofish-avx-x86_64-asm_64.S"
14426@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14427 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14428 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14429
14430+ pax_force_retaddr
14431 ret;
14432 ENDPROC(__twofish_enc_blk8)
14433
14434@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14435 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14436 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14437
14438+ pax_force_retaddr
14439 ret;
14440 ENDPROC(__twofish_dec_blk8)
14441
14442@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14443
14444 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14445
14446+ pax_force_retaddr
14447 ret;
14448 ENDPROC(twofish_ecb_enc_8way)
14449
14450@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14451
14452 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14453
14454+ pax_force_retaddr
14455 ret;
14456 ENDPROC(twofish_ecb_dec_8way)
14457
14458@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14459 * %rdx: src
14460 */
14461
14462- pushq %r12;
14463+ pushq %r14;
14464
14465 movq %rsi, %r11;
14466- movq %rdx, %r12;
14467+ movq %rdx, %r14;
14468
14469 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14470
14471 call __twofish_dec_blk8;
14472
14473- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14474+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14475
14476- popq %r12;
14477+ popq %r14;
14478
14479+ pax_force_retaddr
14480 ret;
14481 ENDPROC(twofish_cbc_dec_8way)
14482
14483@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14484 * %rcx: iv (little endian, 128bit)
14485 */
14486
14487- pushq %r12;
14488+ pushq %r14;
14489
14490 movq %rsi, %r11;
14491- movq %rdx, %r12;
14492+ movq %rdx, %r14;
14493
14494 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14495 RD2, RX0, RX1, RY0);
14496
14497 call __twofish_enc_blk8;
14498
14499- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14500+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14501
14502- popq %r12;
14503+ popq %r14;
14504
14505+ pax_force_retaddr
14506 ret;
14507 ENDPROC(twofish_ctr_8way)
14508
14509@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14510 /* dst <= regs xor IVs(in dst) */
14511 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14512
14513+ pax_force_retaddr
14514 ret;
14515 ENDPROC(twofish_xts_enc_8way)
14516
14517@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14518 /* dst <= regs xor IVs(in dst) */
14519 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14520
14521+ pax_force_retaddr
14522 ret;
14523 ENDPROC(twofish_xts_dec_8way)
14524diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14525index 1c3b7ce..02f578d 100644
14526--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14527+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14528@@ -21,6 +21,7 @@
14529 */
14530
14531 #include <linux/linkage.h>
14532+#include <asm/alternative-asm.h>
14533
14534 .file "twofish-x86_64-asm-3way.S"
14535 .text
14536@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14537 popq %r13;
14538 popq %r14;
14539 popq %r15;
14540+ pax_force_retaddr
14541 ret;
14542
14543 .L__enc_xor3:
14544@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14545 popq %r13;
14546 popq %r14;
14547 popq %r15;
14548+ pax_force_retaddr
14549 ret;
14550 ENDPROC(__twofish_enc_blk_3way)
14551
14552@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14553 popq %r13;
14554 popq %r14;
14555 popq %r15;
14556+ pax_force_retaddr
14557 ret;
14558 ENDPROC(twofish_dec_blk_3way)
14559diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14560index a039d21..524b8b2 100644
14561--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14562+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14563@@ -22,6 +22,7 @@
14564
14565 #include <linux/linkage.h>
14566 #include <asm/asm-offsets.h>
14567+#include <asm/alternative-asm.h>
14568
14569 #define a_offset 0
14570 #define b_offset 4
14571@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14572
14573 popq R1
14574 movq $1,%rax
14575+ pax_force_retaddr
14576 ret
14577 ENDPROC(twofish_enc_blk)
14578
14579@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14580
14581 popq R1
14582 movq $1,%rax
14583+ pax_force_retaddr
14584 ret
14585 ENDPROC(twofish_dec_blk)
14586diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14587index ae6aad1..719d6d9 100644
14588--- a/arch/x86/ia32/ia32_aout.c
14589+++ b/arch/x86/ia32/ia32_aout.c
14590@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14591 unsigned long dump_start, dump_size;
14592 struct user32 dump;
14593
14594+ memset(&dump, 0, sizeof(dump));
14595+
14596 fs = get_fs();
14597 set_fs(KERNEL_DS);
14598 has_dumped = 1;
14599diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14600index f9e181a..300544c 100644
14601--- a/arch/x86/ia32/ia32_signal.c
14602+++ b/arch/x86/ia32/ia32_signal.c
14603@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14604 if (__get_user(set.sig[0], &frame->sc.oldmask)
14605 || (_COMPAT_NSIG_WORDS > 1
14606 && __copy_from_user((((char *) &set.sig) + 4),
14607- &frame->extramask,
14608+ frame->extramask,
14609 sizeof(frame->extramask))))
14610 goto badframe;
14611
14612@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14613 sp -= frame_size;
14614 /* Align the stack pointer according to the i386 ABI,
14615 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14616- sp = ((sp + 4) & -16ul) - 4;
14617+ sp = ((sp - 12) & -16ul) - 4;
14618 return (void __user *) sp;
14619 }
14620
14621@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14622 } else {
14623 /* Return stub is in 32bit vsyscall page */
14624 if (current->mm->context.vdso)
14625- restorer = current->mm->context.vdso +
14626- selected_vdso32->sym___kernel_sigreturn;
14627+ restorer = (void __force_user *)(current->mm->context.vdso +
14628+ selected_vdso32->sym___kernel_sigreturn);
14629 else
14630- restorer = &frame->retcode;
14631+ restorer = frame->retcode;
14632 }
14633
14634 put_user_try {
14635@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14636 * These are actually not used anymore, but left because some
14637 * gdb versions depend on them as a marker.
14638 */
14639- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14640+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14641 } put_user_catch(err);
14642
14643 if (err)
14644@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14645 0xb8,
14646 __NR_ia32_rt_sigreturn,
14647 0x80cd,
14648- 0,
14649+ 0
14650 };
14651
14652 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14653@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14654
14655 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14656 restorer = ksig->ka.sa.sa_restorer;
14657+ else if (current->mm->context.vdso)
14658+ /* Return stub is in 32bit vsyscall page */
14659+ restorer = (void __force_user *)(current->mm->context.vdso +
14660+ selected_vdso32->sym___kernel_rt_sigreturn);
14661 else
14662- restorer = current->mm->context.vdso +
14663- selected_vdso32->sym___kernel_rt_sigreturn;
14664+ restorer = frame->retcode;
14665 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14666
14667 /*
14668 * Not actually used anymore, but left because some gdb
14669 * versions need it.
14670 */
14671- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14672+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14673 } put_user_catch(err);
14674
14675 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14676diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14677index 82e8a1d..4e998d5 100644
14678--- a/arch/x86/ia32/ia32entry.S
14679+++ b/arch/x86/ia32/ia32entry.S
14680@@ -15,8 +15,10 @@
14681 #include <asm/irqflags.h>
14682 #include <asm/asm.h>
14683 #include <asm/smap.h>
14684+#include <asm/pgtable.h>
14685 #include <linux/linkage.h>
14686 #include <linux/err.h>
14687+#include <asm/alternative-asm.h>
14688
14689 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14690 #include <linux/elf-em.h>
14691@@ -62,12 +64,12 @@
14692 */
14693 .macro LOAD_ARGS32 offset, _r9=0
14694 .if \_r9
14695- movl \offset+16(%rsp),%r9d
14696+ movl \offset+R9(%rsp),%r9d
14697 .endif
14698- movl \offset+40(%rsp),%ecx
14699- movl \offset+48(%rsp),%edx
14700- movl \offset+56(%rsp),%esi
14701- movl \offset+64(%rsp),%edi
14702+ movl \offset+RCX(%rsp),%ecx
14703+ movl \offset+RDX(%rsp),%edx
14704+ movl \offset+RSI(%rsp),%esi
14705+ movl \offset+RDI(%rsp),%edi
14706 movl %eax,%eax /* zero extension */
14707 .endm
14708
14709@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14710 ENDPROC(native_irq_enable_sysexit)
14711 #endif
14712
14713+ .macro pax_enter_kernel_user
14714+ pax_set_fptr_mask
14715+#ifdef CONFIG_PAX_MEMORY_UDEREF
14716+ call pax_enter_kernel_user
14717+#endif
14718+ .endm
14719+
14720+ .macro pax_exit_kernel_user
14721+#ifdef CONFIG_PAX_MEMORY_UDEREF
14722+ call pax_exit_kernel_user
14723+#endif
14724+#ifdef CONFIG_PAX_RANDKSTACK
14725+ pushq %rax
14726+ pushq %r11
14727+ call pax_randomize_kstack
14728+ popq %r11
14729+ popq %rax
14730+#endif
14731+ .endm
14732+
14733+ .macro pax_erase_kstack
14734+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14735+ call pax_erase_kstack
14736+#endif
14737+ .endm
14738+
14739 /*
14740 * 32bit SYSENTER instruction entry.
14741 *
14742@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14743 CFI_REGISTER rsp,rbp
14744 SWAPGS_UNSAFE_STACK
14745 movq PER_CPU_VAR(kernel_stack), %rsp
14746- addq $(KERNEL_STACK_OFFSET),%rsp
14747- /*
14748- * No need to follow this irqs on/off section: the syscall
14749- * disabled irqs, here we enable it straight after entry:
14750- */
14751- ENABLE_INTERRUPTS(CLBR_NONE)
14752 movl %ebp,%ebp /* zero extension */
14753 pushq_cfi $__USER32_DS
14754 /*CFI_REL_OFFSET ss,0*/
14755@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14756 CFI_REL_OFFSET rsp,0
14757 pushfq_cfi
14758 /*CFI_REL_OFFSET rflags,0*/
14759- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14760- CFI_REGISTER rip,r10
14761+ orl $X86_EFLAGS_IF,(%rsp)
14762+ GET_THREAD_INFO(%r11)
14763+ movl TI_sysenter_return(%r11), %r11d
14764+ CFI_REGISTER rip,r11
14765 pushq_cfi $__USER32_CS
14766 /*CFI_REL_OFFSET cs,0*/
14767 movl %eax, %eax
14768- pushq_cfi %r10
14769+ pushq_cfi %r11
14770 CFI_REL_OFFSET rip,0
14771 pushq_cfi %rax
14772 cld
14773 SAVE_ARGS 0,1,0
14774+ pax_enter_kernel_user
14775+
14776+#ifdef CONFIG_PAX_RANDKSTACK
14777+ pax_erase_kstack
14778+#endif
14779+
14780+ /*
14781+ * No need to follow this irqs on/off section: the syscall
14782+ * disabled irqs, here we enable it straight after entry:
14783+ */
14784+ ENABLE_INTERRUPTS(CLBR_NONE)
14785 /* no need to do an access_ok check here because rbp has been
14786 32bit zero extended */
14787+
14788+#ifdef CONFIG_PAX_MEMORY_UDEREF
14789+ addq pax_user_shadow_base,%rbp
14790+ ASM_PAX_OPEN_USERLAND
14791+#endif
14792+
14793 ASM_STAC
14794 1: movl (%rbp),%ebp
14795 _ASM_EXTABLE(1b,ia32_badarg)
14796 ASM_CLAC
14797
14798+#ifdef CONFIG_PAX_MEMORY_UDEREF
14799+ ASM_PAX_CLOSE_USERLAND
14800+#endif
14801+
14802 /*
14803 * Sysenter doesn't filter flags, so we need to clear NT
14804 * ourselves. To save a few cycles, we can check whether
14805@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14806 jnz sysenter_fix_flags
14807 sysenter_flags_fixed:
14808
14809- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14810- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14811+ GET_THREAD_INFO(%r11)
14812+ orl $TS_COMPAT,TI_status(%r11)
14813+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14814 CFI_REMEMBER_STATE
14815 jnz sysenter_tracesys
14816 cmpq $(IA32_NR_syscalls-1),%rax
14817@@ -172,15 +218,18 @@ sysenter_do_call:
14818 sysenter_dispatch:
14819 call *ia32_sys_call_table(,%rax,8)
14820 movq %rax,RAX-ARGOFFSET(%rsp)
14821+ GET_THREAD_INFO(%r11)
14822 DISABLE_INTERRUPTS(CLBR_NONE)
14823 TRACE_IRQS_OFF
14824- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14825+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14826 jnz sysexit_audit
14827 sysexit_from_sys_call:
14828- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14829+ pax_exit_kernel_user
14830+ pax_erase_kstack
14831+ andl $~TS_COMPAT,TI_status(%r11)
14832 /* clear IF, that popfq doesn't enable interrupts early */
14833- andl $~0x200,EFLAGS-R11(%rsp)
14834- movl RIP-R11(%rsp),%edx /* User %eip */
14835+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
14836+ movl RIP(%rsp),%edx /* User %eip */
14837 CFI_REGISTER rip,rdx
14838 RESTORE_ARGS 0,24,0,0,0,0
14839 xorq %r8,%r8
14840@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14841 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14842 movl %eax,%edi /* 1st arg: syscall number */
14843 call __audit_syscall_entry
14844+
14845+ pax_erase_kstack
14846+
14847 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14848 cmpq $(IA32_NR_syscalls-1),%rax
14849 ja ia32_badsys
14850@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14851 .endm
14852
14853 .macro auditsys_exit exit
14854- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14855+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14856 jnz ia32_ret_from_sys_call
14857 TRACE_IRQS_ON
14858 ENABLE_INTERRUPTS(CLBR_NONE)
14859@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14860 1: setbe %al /* 1 if error, 0 if not */
14861 movzbl %al,%edi /* zero-extend that into %edi */
14862 call __audit_syscall_exit
14863+ GET_THREAD_INFO(%r11)
14864 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14865 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14866 DISABLE_INTERRUPTS(CLBR_NONE)
14867 TRACE_IRQS_OFF
14868- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14869+ testl %edi,TI_flags(%r11)
14870 jz \exit
14871 CLEAR_RREGS -ARGOFFSET
14872 jmp int_with_check
14873@@ -253,7 +306,7 @@ sysenter_fix_flags:
14874
14875 sysenter_tracesys:
14876 #ifdef CONFIG_AUDITSYSCALL
14877- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14878+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14879 jz sysenter_auditsys
14880 #endif
14881 SAVE_REST
14882@@ -265,6 +318,9 @@ sysenter_tracesys:
14883 RESTORE_REST
14884 cmpq $(IA32_NR_syscalls-1),%rax
14885 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14886+
14887+ pax_erase_kstack
14888+
14889 jmp sysenter_do_call
14890 CFI_ENDPROC
14891 ENDPROC(ia32_sysenter_target)
14892@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14893 ENTRY(ia32_cstar_target)
14894 CFI_STARTPROC32 simple
14895 CFI_SIGNAL_FRAME
14896- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14897+ CFI_DEF_CFA rsp,0
14898 CFI_REGISTER rip,rcx
14899 /*CFI_REGISTER rflags,r11*/
14900 SWAPGS_UNSAFE_STACK
14901 movl %esp,%r8d
14902 CFI_REGISTER rsp,r8
14903 movq PER_CPU_VAR(kernel_stack),%rsp
14904+ SAVE_ARGS 8*6,0,0
14905+ pax_enter_kernel_user
14906+
14907+#ifdef CONFIG_PAX_RANDKSTACK
14908+ pax_erase_kstack
14909+#endif
14910+
14911 /*
14912 * No need to follow this irqs on/off section: the syscall
14913 * disabled irqs and here we enable it straight after entry:
14914 */
14915 ENABLE_INTERRUPTS(CLBR_NONE)
14916- SAVE_ARGS 8,0,0
14917 movl %eax,%eax /* zero extension */
14918 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14919 movq %rcx,RIP-ARGOFFSET(%rsp)
14920@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14921 /* no need to do an access_ok check here because r8 has been
14922 32bit zero extended */
14923 /* hardware stack frame is complete now */
14924+
14925+#ifdef CONFIG_PAX_MEMORY_UDEREF
14926+ ASM_PAX_OPEN_USERLAND
14927+ movq pax_user_shadow_base,%r8
14928+ addq RSP-ARGOFFSET(%rsp),%r8
14929+#endif
14930+
14931 ASM_STAC
14932 1: movl (%r8),%r9d
14933 _ASM_EXTABLE(1b,ia32_badarg)
14934 ASM_CLAC
14935- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14936- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14937+
14938+#ifdef CONFIG_PAX_MEMORY_UDEREF
14939+ ASM_PAX_CLOSE_USERLAND
14940+#endif
14941+
14942+ GET_THREAD_INFO(%r11)
14943+ orl $TS_COMPAT,TI_status(%r11)
14944+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14945 CFI_REMEMBER_STATE
14946 jnz cstar_tracesys
14947 cmpq $IA32_NR_syscalls-1,%rax
14948@@ -335,13 +410,16 @@ cstar_do_call:
14949 cstar_dispatch:
14950 call *ia32_sys_call_table(,%rax,8)
14951 movq %rax,RAX-ARGOFFSET(%rsp)
14952+ GET_THREAD_INFO(%r11)
14953 DISABLE_INTERRUPTS(CLBR_NONE)
14954 TRACE_IRQS_OFF
14955- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14956+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14957 jnz sysretl_audit
14958 sysretl_from_sys_call:
14959- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14960- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14961+ pax_exit_kernel_user
14962+ pax_erase_kstack
14963+ andl $~TS_COMPAT,TI_status(%r11)
14964+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14965 movl RIP-ARGOFFSET(%rsp),%ecx
14966 CFI_REGISTER rip,rcx
14967 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14968@@ -368,7 +446,7 @@ sysretl_audit:
14969
14970 cstar_tracesys:
14971 #ifdef CONFIG_AUDITSYSCALL
14972- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14973+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14974 jz cstar_auditsys
14975 #endif
14976 xchgl %r9d,%ebp
14977@@ -382,11 +460,19 @@ cstar_tracesys:
14978 xchgl %ebp,%r9d
14979 cmpq $(IA32_NR_syscalls-1),%rax
14980 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14981+
14982+ pax_erase_kstack
14983+
14984 jmp cstar_do_call
14985 END(ia32_cstar_target)
14986
14987 ia32_badarg:
14988 ASM_CLAC
14989+
14990+#ifdef CONFIG_PAX_MEMORY_UDEREF
14991+ ASM_PAX_CLOSE_USERLAND
14992+#endif
14993+
14994 movq $-EFAULT,%rax
14995 jmp ia32_sysret
14996 CFI_ENDPROC
14997@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
14998 CFI_REL_OFFSET rip,RIP-RIP
14999 PARAVIRT_ADJUST_EXCEPTION_FRAME
15000 SWAPGS
15001- /*
15002- * No need to follow this irqs on/off section: the syscall
15003- * disabled irqs and here we enable it straight after entry:
15004- */
15005- ENABLE_INTERRUPTS(CLBR_NONE)
15006 movl %eax,%eax
15007 pushq_cfi %rax
15008 cld
15009 /* note the registers are not zero extended to the sf.
15010 this could be a problem. */
15011 SAVE_ARGS 0,1,0
15012- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15013- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15014+ pax_enter_kernel_user
15015+
15016+#ifdef CONFIG_PAX_RANDKSTACK
15017+ pax_erase_kstack
15018+#endif
15019+
15020+ /*
15021+ * No need to follow this irqs on/off section: the syscall
15022+ * disabled irqs and here we enable it straight after entry:
15023+ */
15024+ ENABLE_INTERRUPTS(CLBR_NONE)
15025+ GET_THREAD_INFO(%r11)
15026+ orl $TS_COMPAT,TI_status(%r11)
15027+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15028 jnz ia32_tracesys
15029 cmpq $(IA32_NR_syscalls-1),%rax
15030 ja ia32_badsys
15031@@ -458,6 +551,9 @@ ia32_tracesys:
15032 RESTORE_REST
15033 cmpq $(IA32_NR_syscalls-1),%rax
15034 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15035+
15036+ pax_erase_kstack
15037+
15038 jmp ia32_do_call
15039 END(ia32_syscall)
15040
15041diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15042index 8e0ceec..af13504 100644
15043--- a/arch/x86/ia32/sys_ia32.c
15044+++ b/arch/x86/ia32/sys_ia32.c
15045@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15046 */
15047 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15048 {
15049- typeof(ubuf->st_uid) uid = 0;
15050- typeof(ubuf->st_gid) gid = 0;
15051+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15052+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15053 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15054 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15055 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15056diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15057index 372231c..51b537d 100644
15058--- a/arch/x86/include/asm/alternative-asm.h
15059+++ b/arch/x86/include/asm/alternative-asm.h
15060@@ -18,6 +18,45 @@
15061 .endm
15062 #endif
15063
15064+#ifdef KERNEXEC_PLUGIN
15065+ .macro pax_force_retaddr_bts rip=0
15066+ btsq $63,\rip(%rsp)
15067+ .endm
15068+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15069+ .macro pax_force_retaddr rip=0, reload=0
15070+ btsq $63,\rip(%rsp)
15071+ .endm
15072+ .macro pax_force_fptr ptr
15073+ btsq $63,\ptr
15074+ .endm
15075+ .macro pax_set_fptr_mask
15076+ .endm
15077+#endif
15078+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15079+ .macro pax_force_retaddr rip=0, reload=0
15080+ .if \reload
15081+ pax_set_fptr_mask
15082+ .endif
15083+ orq %r12,\rip(%rsp)
15084+ .endm
15085+ .macro pax_force_fptr ptr
15086+ orq %r12,\ptr
15087+ .endm
15088+ .macro pax_set_fptr_mask
15089+ movabs $0x8000000000000000,%r12
15090+ .endm
15091+#endif
15092+#else
15093+ .macro pax_force_retaddr rip=0, reload=0
15094+ .endm
15095+ .macro pax_force_fptr ptr
15096+ .endm
15097+ .macro pax_force_retaddr_bts rip=0
15098+ .endm
15099+ .macro pax_set_fptr_mask
15100+ .endm
15101+#endif
15102+
15103 .macro altinstruction_entry orig alt feature orig_len alt_len
15104 .long \orig - .
15105 .long \alt - .
15106diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15107index 473bdbe..b1e3377 100644
15108--- a/arch/x86/include/asm/alternative.h
15109+++ b/arch/x86/include/asm/alternative.h
15110@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15111 ".pushsection .discard,\"aw\",@progbits\n" \
15112 DISCARD_ENTRY(1) \
15113 ".popsection\n" \
15114- ".pushsection .altinstr_replacement, \"ax\"\n" \
15115+ ".pushsection .altinstr_replacement, \"a\"\n" \
15116 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15117 ".popsection"
15118
15119@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15120 DISCARD_ENTRY(1) \
15121 DISCARD_ENTRY(2) \
15122 ".popsection\n" \
15123- ".pushsection .altinstr_replacement, \"ax\"\n" \
15124+ ".pushsection .altinstr_replacement, \"a\"\n" \
15125 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15126 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15127 ".popsection"
15128diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15129index 465b309..ab7e51f 100644
15130--- a/arch/x86/include/asm/apic.h
15131+++ b/arch/x86/include/asm/apic.h
15132@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15133
15134 #ifdef CONFIG_X86_LOCAL_APIC
15135
15136-extern unsigned int apic_verbosity;
15137+extern int apic_verbosity;
15138 extern int local_apic_timer_c2_ok;
15139
15140 extern int disable_apic;
15141diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15142index 20370c6..a2eb9b0 100644
15143--- a/arch/x86/include/asm/apm.h
15144+++ b/arch/x86/include/asm/apm.h
15145@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15146 __asm__ __volatile__(APM_DO_ZERO_SEGS
15147 "pushl %%edi\n\t"
15148 "pushl %%ebp\n\t"
15149- "lcall *%%cs:apm_bios_entry\n\t"
15150+ "lcall *%%ss:apm_bios_entry\n\t"
15151 "setc %%al\n\t"
15152 "popl %%ebp\n\t"
15153 "popl %%edi\n\t"
15154@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15155 __asm__ __volatile__(APM_DO_ZERO_SEGS
15156 "pushl %%edi\n\t"
15157 "pushl %%ebp\n\t"
15158- "lcall *%%cs:apm_bios_entry\n\t"
15159+ "lcall *%%ss:apm_bios_entry\n\t"
15160 "setc %%bl\n\t"
15161 "popl %%ebp\n\t"
15162 "popl %%edi\n\t"
15163diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15164index 5e5cd12..51cdc93 100644
15165--- a/arch/x86/include/asm/atomic.h
15166+++ b/arch/x86/include/asm/atomic.h
15167@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15168 }
15169
15170 /**
15171+ * atomic_read_unchecked - read atomic variable
15172+ * @v: pointer of type atomic_unchecked_t
15173+ *
15174+ * Atomically reads the value of @v.
15175+ */
15176+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15177+{
15178+ return ACCESS_ONCE((v)->counter);
15179+}
15180+
15181+/**
15182 * atomic_set - set atomic variable
15183 * @v: pointer of type atomic_t
15184 * @i: required value
15185@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15186 }
15187
15188 /**
15189+ * atomic_set_unchecked - set atomic variable
15190+ * @v: pointer of type atomic_unchecked_t
15191+ * @i: required value
15192+ *
15193+ * Atomically sets the value of @v to @i.
15194+ */
15195+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15196+{
15197+ v->counter = i;
15198+}
15199+
15200+/**
15201 * atomic_add - add integer to atomic variable
15202 * @i: integer value to add
15203 * @v: pointer of type atomic_t
15204@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15205 */
15206 static inline void atomic_add(int i, atomic_t *v)
15207 {
15208- asm volatile(LOCK_PREFIX "addl %1,%0"
15209+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15210+
15211+#ifdef CONFIG_PAX_REFCOUNT
15212+ "jno 0f\n"
15213+ LOCK_PREFIX "subl %1,%0\n"
15214+ "int $4\n0:\n"
15215+ _ASM_EXTABLE(0b, 0b)
15216+#endif
15217+
15218+ : "+m" (v->counter)
15219+ : "ir" (i));
15220+}
15221+
15222+/**
15223+ * atomic_add_unchecked - add integer to atomic variable
15224+ * @i: integer value to add
15225+ * @v: pointer of type atomic_unchecked_t
15226+ *
15227+ * Atomically adds @i to @v.
15228+ */
15229+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15230+{
15231+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15232 : "+m" (v->counter)
15233 : "ir" (i));
15234 }
15235@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15236 */
15237 static inline void atomic_sub(int i, atomic_t *v)
15238 {
15239- asm volatile(LOCK_PREFIX "subl %1,%0"
15240+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15241+
15242+#ifdef CONFIG_PAX_REFCOUNT
15243+ "jno 0f\n"
15244+ LOCK_PREFIX "addl %1,%0\n"
15245+ "int $4\n0:\n"
15246+ _ASM_EXTABLE(0b, 0b)
15247+#endif
15248+
15249+ : "+m" (v->counter)
15250+ : "ir" (i));
15251+}
15252+
15253+/**
15254+ * atomic_sub_unchecked - subtract integer from atomic variable
15255+ * @i: integer value to subtract
15256+ * @v: pointer of type atomic_unchecked_t
15257+ *
15258+ * Atomically subtracts @i from @v.
15259+ */
15260+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15261+{
15262+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15263 : "+m" (v->counter)
15264 : "ir" (i));
15265 }
15266@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15267 */
15268 static inline int atomic_sub_and_test(int i, atomic_t *v)
15269 {
15270- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15271+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15272 }
15273
15274 /**
15275@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15276 */
15277 static inline void atomic_inc(atomic_t *v)
15278 {
15279- asm volatile(LOCK_PREFIX "incl %0"
15280+ asm volatile(LOCK_PREFIX "incl %0\n"
15281+
15282+#ifdef CONFIG_PAX_REFCOUNT
15283+ "jno 0f\n"
15284+ LOCK_PREFIX "decl %0\n"
15285+ "int $4\n0:\n"
15286+ _ASM_EXTABLE(0b, 0b)
15287+#endif
15288+
15289+ : "+m" (v->counter));
15290+}
15291+
15292+/**
15293+ * atomic_inc_unchecked - increment atomic variable
15294+ * @v: pointer of type atomic_unchecked_t
15295+ *
15296+ * Atomically increments @v by 1.
15297+ */
15298+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15299+{
15300+ asm volatile(LOCK_PREFIX "incl %0\n"
15301 : "+m" (v->counter));
15302 }
15303
15304@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15305 */
15306 static inline void atomic_dec(atomic_t *v)
15307 {
15308- asm volatile(LOCK_PREFIX "decl %0"
15309+ asm volatile(LOCK_PREFIX "decl %0\n"
15310+
15311+#ifdef CONFIG_PAX_REFCOUNT
15312+ "jno 0f\n"
15313+ LOCK_PREFIX "incl %0\n"
15314+ "int $4\n0:\n"
15315+ _ASM_EXTABLE(0b, 0b)
15316+#endif
15317+
15318+ : "+m" (v->counter));
15319+}
15320+
15321+/**
15322+ * atomic_dec_unchecked - decrement atomic variable
15323+ * @v: pointer of type atomic_unchecked_t
15324+ *
15325+ * Atomically decrements @v by 1.
15326+ */
15327+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15328+{
15329+ asm volatile(LOCK_PREFIX "decl %0\n"
15330 : "+m" (v->counter));
15331 }
15332
15333@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15334 */
15335 static inline int atomic_dec_and_test(atomic_t *v)
15336 {
15337- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15338+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15339 }
15340
15341 /**
15342@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15343 */
15344 static inline int atomic_inc_and_test(atomic_t *v)
15345 {
15346- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15347+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15348+}
15349+
15350+/**
15351+ * atomic_inc_and_test_unchecked - increment and test
15352+ * @v: pointer of type atomic_unchecked_t
15353+ *
15354+ * Atomically increments @v by 1
15355+ * and returns true if the result is zero, or false for all
15356+ * other cases.
15357+ */
15358+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15359+{
15360+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15361 }
15362
15363 /**
15364@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15365 */
15366 static inline int atomic_add_negative(int i, atomic_t *v)
15367 {
15368- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15369+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15370 }
15371
15372 /**
15373@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15374 *
15375 * Atomically adds @i to @v and returns @i + @v
15376 */
15377-static inline int atomic_add_return(int i, atomic_t *v)
15378+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15379+{
15380+ return i + xadd_check_overflow(&v->counter, i);
15381+}
15382+
15383+/**
15384+ * atomic_add_return_unchecked - add integer and return
15385+ * @i: integer value to add
15386+ * @v: pointer of type atomic_unchecked_t
15387+ *
15388+ * Atomically adds @i to @v and returns @i + @v
15389+ */
15390+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15391 {
15392 return i + xadd(&v->counter, i);
15393 }
15394@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15395 *
15396 * Atomically subtracts @i from @v and returns @v - @i
15397 */
15398-static inline int atomic_sub_return(int i, atomic_t *v)
15399+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15400 {
15401 return atomic_add_return(-i, v);
15402 }
15403
15404 #define atomic_inc_return(v) (atomic_add_return(1, v))
15405+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15406+{
15407+ return atomic_add_return_unchecked(1, v);
15408+}
15409 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15410
15411-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15412+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15413+{
15414+ return cmpxchg(&v->counter, old, new);
15415+}
15416+
15417+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15418 {
15419 return cmpxchg(&v->counter, old, new);
15420 }
15421@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15422 return xchg(&v->counter, new);
15423 }
15424
15425+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15426+{
15427+ return xchg(&v->counter, new);
15428+}
15429+
15430 /**
15431 * __atomic_add_unless - add unless the number is already a given value
15432 * @v: pointer of type atomic_t
15433@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15434 */
15435 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15436 {
15437- int c, old;
15438+ int c, old, new;
15439 c = atomic_read(v);
15440 for (;;) {
15441- if (unlikely(c == (u)))
15442+ if (unlikely(c == u))
15443 break;
15444- old = atomic_cmpxchg((v), c, c + (a));
15445+
15446+ asm volatile("addl %2,%0\n"
15447+
15448+#ifdef CONFIG_PAX_REFCOUNT
15449+ "jno 0f\n"
15450+ "subl %2,%0\n"
15451+ "int $4\n0:\n"
15452+ _ASM_EXTABLE(0b, 0b)
15453+#endif
15454+
15455+ : "=r" (new)
15456+ : "0" (c), "ir" (a));
15457+
15458+ old = atomic_cmpxchg(v, c, new);
15459 if (likely(old == c))
15460 break;
15461 c = old;
15462@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15463 }
15464
15465 /**
15466+ * atomic_inc_not_zero_hint - increment if not null
15467+ * @v: pointer of type atomic_t
15468+ * @hint: probable value of the atomic before the increment
15469+ *
15470+ * This version of atomic_inc_not_zero() gives a hint of probable
15471+ * value of the atomic. This helps processor to not read the memory
15472+ * before doing the atomic read/modify/write cycle, lowering
15473+ * number of bus transactions on some arches.
15474+ *
15475+ * Returns: 0 if increment was not done, 1 otherwise.
15476+ */
15477+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15478+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15479+{
15480+ int val, c = hint, new;
15481+
15482+ /* sanity test, should be removed by compiler if hint is a constant */
15483+ if (!hint)
15484+ return __atomic_add_unless(v, 1, 0);
15485+
15486+ do {
15487+ asm volatile("incl %0\n"
15488+
15489+#ifdef CONFIG_PAX_REFCOUNT
15490+ "jno 0f\n"
15491+ "decl %0\n"
15492+ "int $4\n0:\n"
15493+ _ASM_EXTABLE(0b, 0b)
15494+#endif
15495+
15496+ : "=r" (new)
15497+ : "0" (c));
15498+
15499+ val = atomic_cmpxchg(v, c, new);
15500+ if (val == c)
15501+ return 1;
15502+ c = val;
15503+ } while (c);
15504+
15505+ return 0;
15506+}
15507+
15508+/**
15509 * atomic_inc_short - increment of a short integer
15510 * @v: pointer to type int
15511 *
15512@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15513 }
15514
15515 /* These are x86-specific, used by some header files */
15516-#define atomic_clear_mask(mask, addr) \
15517- asm volatile(LOCK_PREFIX "andl %0,%1" \
15518- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15519+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15520+{
15521+ asm volatile(LOCK_PREFIX "andl %1,%0"
15522+ : "+m" (v->counter)
15523+ : "r" (~(mask))
15524+ : "memory");
15525+}
15526
15527-#define atomic_set_mask(mask, addr) \
15528- asm volatile(LOCK_PREFIX "orl %0,%1" \
15529- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15530- : "memory")
15531+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15532+{
15533+ asm volatile(LOCK_PREFIX "andl %1,%0"
15534+ : "+m" (v->counter)
15535+ : "r" (~(mask))
15536+ : "memory");
15537+}
15538+
15539+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15540+{
15541+ asm volatile(LOCK_PREFIX "orl %1,%0"
15542+ : "+m" (v->counter)
15543+ : "r" (mask)
15544+ : "memory");
15545+}
15546+
15547+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15548+{
15549+ asm volatile(LOCK_PREFIX "orl %1,%0"
15550+ : "+m" (v->counter)
15551+ : "r" (mask)
15552+ : "memory");
15553+}
15554
15555 #ifdef CONFIG_X86_32
15556 # include <asm/atomic64_32.h>
15557diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15558index b154de7..bf18a5a 100644
15559--- a/arch/x86/include/asm/atomic64_32.h
15560+++ b/arch/x86/include/asm/atomic64_32.h
15561@@ -12,6 +12,14 @@ typedef struct {
15562 u64 __aligned(8) counter;
15563 } atomic64_t;
15564
15565+#ifdef CONFIG_PAX_REFCOUNT
15566+typedef struct {
15567+ u64 __aligned(8) counter;
15568+} atomic64_unchecked_t;
15569+#else
15570+typedef atomic64_t atomic64_unchecked_t;
15571+#endif
15572+
15573 #define ATOMIC64_INIT(val) { (val) }
15574
15575 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15576@@ -37,21 +45,31 @@ typedef struct {
15577 ATOMIC64_DECL_ONE(sym##_386)
15578
15579 ATOMIC64_DECL_ONE(add_386);
15580+ATOMIC64_DECL_ONE(add_unchecked_386);
15581 ATOMIC64_DECL_ONE(sub_386);
15582+ATOMIC64_DECL_ONE(sub_unchecked_386);
15583 ATOMIC64_DECL_ONE(inc_386);
15584+ATOMIC64_DECL_ONE(inc_unchecked_386);
15585 ATOMIC64_DECL_ONE(dec_386);
15586+ATOMIC64_DECL_ONE(dec_unchecked_386);
15587 #endif
15588
15589 #define alternative_atomic64(f, out, in...) \
15590 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15591
15592 ATOMIC64_DECL(read);
15593+ATOMIC64_DECL(read_unchecked);
15594 ATOMIC64_DECL(set);
15595+ATOMIC64_DECL(set_unchecked);
15596 ATOMIC64_DECL(xchg);
15597 ATOMIC64_DECL(add_return);
15598+ATOMIC64_DECL(add_return_unchecked);
15599 ATOMIC64_DECL(sub_return);
15600+ATOMIC64_DECL(sub_return_unchecked);
15601 ATOMIC64_DECL(inc_return);
15602+ATOMIC64_DECL(inc_return_unchecked);
15603 ATOMIC64_DECL(dec_return);
15604+ATOMIC64_DECL(dec_return_unchecked);
15605 ATOMIC64_DECL(dec_if_positive);
15606 ATOMIC64_DECL(inc_not_zero);
15607 ATOMIC64_DECL(add_unless);
15608@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15609 }
15610
15611 /**
15612+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15613+ * @p: pointer to type atomic64_unchecked_t
15614+ * @o: expected value
15615+ * @n: new value
15616+ *
15617+ * Atomically sets @v to @n if it was equal to @o and returns
15618+ * the old value.
15619+ */
15620+
15621+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15622+{
15623+ return cmpxchg64(&v->counter, o, n);
15624+}
15625+
15626+/**
15627 * atomic64_xchg - xchg atomic64 variable
15628 * @v: pointer to type atomic64_t
15629 * @n: value to assign
15630@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15631 }
15632
15633 /**
15634+ * atomic64_set_unchecked - set atomic64 variable
15635+ * @v: pointer to type atomic64_unchecked_t
15636+ * @n: value to assign
15637+ *
15638+ * Atomically sets the value of @v to @n.
15639+ */
15640+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15641+{
15642+ unsigned high = (unsigned)(i >> 32);
15643+ unsigned low = (unsigned)i;
15644+ alternative_atomic64(set, /* no output */,
15645+ "S" (v), "b" (low), "c" (high)
15646+ : "eax", "edx", "memory");
15647+}
15648+
15649+/**
15650 * atomic64_read - read atomic64 variable
15651 * @v: pointer to type atomic64_t
15652 *
15653@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15654 }
15655
15656 /**
15657+ * atomic64_read_unchecked - read atomic64 variable
15658+ * @v: pointer to type atomic64_unchecked_t
15659+ *
15660+ * Atomically reads the value of @v and returns it.
15661+ */
15662+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15663+{
15664+ long long r;
15665+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15666+ return r;
15667+ }
15668+
15669+/**
15670 * atomic64_add_return - add and return
15671 * @i: integer value to add
15672 * @v: pointer to type atomic64_t
15673@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15674 return i;
15675 }
15676
15677+/**
15678+ * atomic64_add_return_unchecked - add and return
15679+ * @i: integer value to add
15680+ * @v: pointer to type atomic64_unchecked_t
15681+ *
15682+ * Atomically adds @i to @v and returns @i + *@v
15683+ */
15684+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15685+{
15686+ alternative_atomic64(add_return_unchecked,
15687+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15688+ ASM_NO_INPUT_CLOBBER("memory"));
15689+ return i;
15690+}
15691+
15692 /*
15693 * Other variants with different arithmetic operators:
15694 */
15695@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15696 return a;
15697 }
15698
15699+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15700+{
15701+ long long a;
15702+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15703+ "S" (v) : "memory", "ecx");
15704+ return a;
15705+}
15706+
15707 static inline long long atomic64_dec_return(atomic64_t *v)
15708 {
15709 long long a;
15710@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15711 }
15712
15713 /**
15714+ * atomic64_add_unchecked - add integer to atomic64 variable
15715+ * @i: integer value to add
15716+ * @v: pointer to type atomic64_unchecked_t
15717+ *
15718+ * Atomically adds @i to @v.
15719+ */
15720+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15721+{
15722+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15723+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15724+ ASM_NO_INPUT_CLOBBER("memory"));
15725+ return i;
15726+}
15727+
15728+/**
15729 * atomic64_sub - subtract the atomic64 variable
15730 * @i: integer value to subtract
15731 * @v: pointer to type atomic64_t
15732diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15733index f8d273e..02f39f3 100644
15734--- a/arch/x86/include/asm/atomic64_64.h
15735+++ b/arch/x86/include/asm/atomic64_64.h
15736@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15737 }
15738
15739 /**
15740+ * atomic64_read_unchecked - read atomic64 variable
15741+ * @v: pointer of type atomic64_unchecked_t
15742+ *
15743+ * Atomically reads the value of @v.
15744+ * Doesn't imply a read memory barrier.
15745+ */
15746+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15747+{
15748+ return ACCESS_ONCE((v)->counter);
15749+}
15750+
15751+/**
15752 * atomic64_set - set atomic64 variable
15753 * @v: pointer to type atomic64_t
15754 * @i: required value
15755@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15756 }
15757
15758 /**
15759+ * atomic64_set_unchecked - set atomic64 variable
15760+ * @v: pointer to type atomic64_unchecked_t
15761+ * @i: required value
15762+ *
15763+ * Atomically sets the value of @v to @i.
15764+ */
15765+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15766+{
15767+ v->counter = i;
15768+}
15769+
15770+/**
15771 * atomic64_add - add integer to atomic64 variable
15772 * @i: integer value to add
15773 * @v: pointer to type atomic64_t
15774@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15775 */
15776 static inline void atomic64_add(long i, atomic64_t *v)
15777 {
15778+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15779+
15780+#ifdef CONFIG_PAX_REFCOUNT
15781+ "jno 0f\n"
15782+ LOCK_PREFIX "subq %1,%0\n"
15783+ "int $4\n0:\n"
15784+ _ASM_EXTABLE(0b, 0b)
15785+#endif
15786+
15787+ : "=m" (v->counter)
15788+ : "er" (i), "m" (v->counter));
15789+}
15790+
15791+/**
15792+ * atomic64_add_unchecked - add integer to atomic64 variable
15793+ * @i: integer value to add
15794+ * @v: pointer to type atomic64_unchecked_t
15795+ *
15796+ * Atomically adds @i to @v.
15797+ */
15798+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15799+{
15800 asm volatile(LOCK_PREFIX "addq %1,%0"
15801 : "=m" (v->counter)
15802 : "er" (i), "m" (v->counter));
15803@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15804 */
15805 static inline void atomic64_sub(long i, atomic64_t *v)
15806 {
15807- asm volatile(LOCK_PREFIX "subq %1,%0"
15808+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15809+
15810+#ifdef CONFIG_PAX_REFCOUNT
15811+ "jno 0f\n"
15812+ LOCK_PREFIX "addq %1,%0\n"
15813+ "int $4\n0:\n"
15814+ _ASM_EXTABLE(0b, 0b)
15815+#endif
15816+
15817+ : "=m" (v->counter)
15818+ : "er" (i), "m" (v->counter));
15819+}
15820+
15821+/**
15822+ * atomic64_sub_unchecked - subtract the atomic64 variable
15823+ * @i: integer value to subtract
15824+ * @v: pointer to type atomic64_unchecked_t
15825+ *
15826+ * Atomically subtracts @i from @v.
15827+ */
15828+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15829+{
15830+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15831 : "=m" (v->counter)
15832 : "er" (i), "m" (v->counter));
15833 }
15834@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15835 */
15836 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15837 {
15838- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15839+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15840 }
15841
15842 /**
15843@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15844 */
15845 static inline void atomic64_inc(atomic64_t *v)
15846 {
15847+ asm volatile(LOCK_PREFIX "incq %0\n"
15848+
15849+#ifdef CONFIG_PAX_REFCOUNT
15850+ "jno 0f\n"
15851+ LOCK_PREFIX "decq %0\n"
15852+ "int $4\n0:\n"
15853+ _ASM_EXTABLE(0b, 0b)
15854+#endif
15855+
15856+ : "=m" (v->counter)
15857+ : "m" (v->counter));
15858+}
15859+
15860+/**
15861+ * atomic64_inc_unchecked - increment atomic64 variable
15862+ * @v: pointer to type atomic64_unchecked_t
15863+ *
15864+ * Atomically increments @v by 1.
15865+ */
15866+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15867+{
15868 asm volatile(LOCK_PREFIX "incq %0"
15869 : "=m" (v->counter)
15870 : "m" (v->counter));
15871@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15872 */
15873 static inline void atomic64_dec(atomic64_t *v)
15874 {
15875- asm volatile(LOCK_PREFIX "decq %0"
15876+ asm volatile(LOCK_PREFIX "decq %0\n"
15877+
15878+#ifdef CONFIG_PAX_REFCOUNT
15879+ "jno 0f\n"
15880+ LOCK_PREFIX "incq %0\n"
15881+ "int $4\n0:\n"
15882+ _ASM_EXTABLE(0b, 0b)
15883+#endif
15884+
15885+ : "=m" (v->counter)
15886+ : "m" (v->counter));
15887+}
15888+
15889+/**
15890+ * atomic64_dec_unchecked - decrement atomic64 variable
15891+ * @v: pointer to type atomic64_t
15892+ *
15893+ * Atomically decrements @v by 1.
15894+ */
15895+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15896+{
15897+ asm volatile(LOCK_PREFIX "decq %0\n"
15898 : "=m" (v->counter)
15899 : "m" (v->counter));
15900 }
15901@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15902 */
15903 static inline int atomic64_dec_and_test(atomic64_t *v)
15904 {
15905- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15906+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15907 }
15908
15909 /**
15910@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15911 */
15912 static inline int atomic64_inc_and_test(atomic64_t *v)
15913 {
15914- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15915+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15916 }
15917
15918 /**
15919@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15920 */
15921 static inline int atomic64_add_negative(long i, atomic64_t *v)
15922 {
15923- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15924+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15925 }
15926
15927 /**
15928@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15929 */
15930 static inline long atomic64_add_return(long i, atomic64_t *v)
15931 {
15932+ return i + xadd_check_overflow(&v->counter, i);
15933+}
15934+
15935+/**
15936+ * atomic64_add_return_unchecked - add and return
15937+ * @i: integer value to add
15938+ * @v: pointer to type atomic64_unchecked_t
15939+ *
15940+ * Atomically adds @i to @v and returns @i + @v
15941+ */
15942+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15943+{
15944 return i + xadd(&v->counter, i);
15945 }
15946
15947@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15948 }
15949
15950 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15951+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15952+{
15953+ return atomic64_add_return_unchecked(1, v);
15954+}
15955 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15956
15957 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15958@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15959 return cmpxchg(&v->counter, old, new);
15960 }
15961
15962+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15963+{
15964+ return cmpxchg(&v->counter, old, new);
15965+}
15966+
15967 static inline long atomic64_xchg(atomic64_t *v, long new)
15968 {
15969 return xchg(&v->counter, new);
15970@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15971 */
15972 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15973 {
15974- long c, old;
15975+ long c, old, new;
15976 c = atomic64_read(v);
15977 for (;;) {
15978- if (unlikely(c == (u)))
15979+ if (unlikely(c == u))
15980 break;
15981- old = atomic64_cmpxchg((v), c, c + (a));
15982+
15983+ asm volatile("add %2,%0\n"
15984+
15985+#ifdef CONFIG_PAX_REFCOUNT
15986+ "jno 0f\n"
15987+ "sub %2,%0\n"
15988+ "int $4\n0:\n"
15989+ _ASM_EXTABLE(0b, 0b)
15990+#endif
15991+
15992+ : "=r" (new)
15993+ : "0" (c), "ir" (a));
15994+
15995+ old = atomic64_cmpxchg(v, c, new);
15996 if (likely(old == c))
15997 break;
15998 c = old;
15999 }
16000- return c != (u);
16001+ return c != u;
16002 }
16003
16004 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16005diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16006index 2ab1eb3..1e8cc5d 100644
16007--- a/arch/x86/include/asm/barrier.h
16008+++ b/arch/x86/include/asm/barrier.h
16009@@ -57,7 +57,7 @@
16010 do { \
16011 compiletime_assert_atomic_type(*p); \
16012 smp_mb(); \
16013- ACCESS_ONCE(*p) = (v); \
16014+ ACCESS_ONCE_RW(*p) = (v); \
16015 } while (0)
16016
16017 #define smp_load_acquire(p) \
16018@@ -74,7 +74,7 @@ do { \
16019 do { \
16020 compiletime_assert_atomic_type(*p); \
16021 barrier(); \
16022- ACCESS_ONCE(*p) = (v); \
16023+ ACCESS_ONCE_RW(*p) = (v); \
16024 } while (0)
16025
16026 #define smp_load_acquire(p) \
16027diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16028index cfe3b95..d01b118 100644
16029--- a/arch/x86/include/asm/bitops.h
16030+++ b/arch/x86/include/asm/bitops.h
16031@@ -50,7 +50,7 @@
16032 * a mask operation on a byte.
16033 */
16034 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16035-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16036+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16037 #define CONST_MASK(nr) (1 << ((nr) & 7))
16038
16039 /**
16040@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16041 */
16042 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16043 {
16044- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16045+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16046 }
16047
16048 /**
16049@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16050 */
16051 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16052 {
16053- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16054+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16055 }
16056
16057 /**
16058@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16059 */
16060 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16061 {
16062- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16063+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16064 }
16065
16066 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16067@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16068 *
16069 * Undefined if no bit exists, so code should check against 0 first.
16070 */
16071-static inline unsigned long __ffs(unsigned long word)
16072+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16073 {
16074 asm("rep; bsf %1,%0"
16075 : "=r" (word)
16076@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16077 *
16078 * Undefined if no zero exists, so code should check against ~0UL first.
16079 */
16080-static inline unsigned long ffz(unsigned long word)
16081+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16082 {
16083 asm("rep; bsf %1,%0"
16084 : "=r" (word)
16085@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16086 *
16087 * Undefined if no set bit exists, so code should check against 0 first.
16088 */
16089-static inline unsigned long __fls(unsigned long word)
16090+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16091 {
16092 asm("bsr %1,%0"
16093 : "=r" (word)
16094@@ -434,7 +434,7 @@ static inline int ffs(int x)
16095 * set bit if value is nonzero. The last (most significant) bit is
16096 * at position 32.
16097 */
16098-static inline int fls(int x)
16099+static inline int __intentional_overflow(-1) fls(int x)
16100 {
16101 int r;
16102
16103@@ -476,7 +476,7 @@ static inline int fls(int x)
16104 * at position 64.
16105 */
16106 #ifdef CONFIG_X86_64
16107-static __always_inline int fls64(__u64 x)
16108+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16109 {
16110 int bitpos = -1;
16111 /*
16112diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16113index 4fa687a..60f2d39 100644
16114--- a/arch/x86/include/asm/boot.h
16115+++ b/arch/x86/include/asm/boot.h
16116@@ -6,10 +6,15 @@
16117 #include <uapi/asm/boot.h>
16118
16119 /* Physical address where kernel should be loaded. */
16120-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16121+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16122 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16123 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16124
16125+#ifndef __ASSEMBLY__
16126+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16127+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16128+#endif
16129+
16130 /* Minimum kernel alignment, as a power of two */
16131 #ifdef CONFIG_X86_64
16132 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16133diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16134index 48f99f1..d78ebf9 100644
16135--- a/arch/x86/include/asm/cache.h
16136+++ b/arch/x86/include/asm/cache.h
16137@@ -5,12 +5,13 @@
16138
16139 /* L1 cache line size */
16140 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16141-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16142+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16143
16144 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16145+#define __read_only __attribute__((__section__(".data..read_only")))
16146
16147 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16148-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16149+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16150
16151 #ifdef CONFIG_X86_VSMP
16152 #ifdef CONFIG_SMP
16153diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16154index 76659b6..72b8439 100644
16155--- a/arch/x86/include/asm/calling.h
16156+++ b/arch/x86/include/asm/calling.h
16157@@ -82,107 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16158 #define RSP 152
16159 #define SS 160
16160
16161-#define ARGOFFSET R11
16162-#define SWFRAME ORIG_RAX
16163+#define ARGOFFSET R15
16164
16165 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16166- subq $9*8+\addskip, %rsp
16167- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16168- movq_cfi rdi, 8*8
16169- movq_cfi rsi, 7*8
16170- movq_cfi rdx, 6*8
16171+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16172+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16173+ movq_cfi rdi, RDI
16174+ movq_cfi rsi, RSI
16175+ movq_cfi rdx, RDX
16176
16177 .if \save_rcx
16178- movq_cfi rcx, 5*8
16179+ movq_cfi rcx, RCX
16180 .endif
16181
16182 .if \rax_enosys
16183- movq $-ENOSYS, 4*8(%rsp)
16184+ movq $-ENOSYS, RAX(%rsp)
16185 .else
16186- movq_cfi rax, 4*8
16187+ movq_cfi rax, RAX
16188 .endif
16189
16190 .if \save_r891011
16191- movq_cfi r8, 3*8
16192- movq_cfi r9, 2*8
16193- movq_cfi r10, 1*8
16194- movq_cfi r11, 0*8
16195+ movq_cfi r8, R8
16196+ movq_cfi r9, R9
16197+ movq_cfi r10, R10
16198+ movq_cfi r11, R11
16199 .endif
16200
16201+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16202+ movq_cfi r12, R12
16203+#endif
16204+
16205 .endm
16206
16207-#define ARG_SKIP (9*8)
16208+#define ARG_SKIP ORIG_RAX
16209
16210 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16211 rstor_r8910=1, rstor_rdx=1
16212+
16213+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16214+ movq_cfi_restore R12, r12
16215+#endif
16216+
16217 .if \rstor_r11
16218- movq_cfi_restore 0*8, r11
16219+ movq_cfi_restore R11, r11
16220 .endif
16221
16222 .if \rstor_r8910
16223- movq_cfi_restore 1*8, r10
16224- movq_cfi_restore 2*8, r9
16225- movq_cfi_restore 3*8, r8
16226+ movq_cfi_restore R10, r10
16227+ movq_cfi_restore R9, r9
16228+ movq_cfi_restore R8, r8
16229 .endif
16230
16231 .if \rstor_rax
16232- movq_cfi_restore 4*8, rax
16233+ movq_cfi_restore RAX, rax
16234 .endif
16235
16236 .if \rstor_rcx
16237- movq_cfi_restore 5*8, rcx
16238+ movq_cfi_restore RCX, rcx
16239 .endif
16240
16241 .if \rstor_rdx
16242- movq_cfi_restore 6*8, rdx
16243+ movq_cfi_restore RDX, rdx
16244 .endif
16245
16246- movq_cfi_restore 7*8, rsi
16247- movq_cfi_restore 8*8, rdi
16248+ movq_cfi_restore RSI, rsi
16249+ movq_cfi_restore RDI, rdi
16250
16251- .if ARG_SKIP+\addskip > 0
16252- addq $ARG_SKIP+\addskip, %rsp
16253- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16254+ .if ORIG_RAX+\addskip > 0
16255+ addq $ORIG_RAX+\addskip, %rsp
16256+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16257 .endif
16258 .endm
16259
16260- .macro LOAD_ARGS offset, skiprax=0
16261- movq \offset(%rsp), %r11
16262- movq \offset+8(%rsp), %r10
16263- movq \offset+16(%rsp), %r9
16264- movq \offset+24(%rsp), %r8
16265- movq \offset+40(%rsp), %rcx
16266- movq \offset+48(%rsp), %rdx
16267- movq \offset+56(%rsp), %rsi
16268- movq \offset+64(%rsp), %rdi
16269+ .macro LOAD_ARGS skiprax=0
16270+ movq R11(%rsp), %r11
16271+ movq R10(%rsp), %r10
16272+ movq R9(%rsp), %r9
16273+ movq R8(%rsp), %r8
16274+ movq RCX(%rsp), %rcx
16275+ movq RDX(%rsp), %rdx
16276+ movq RSI(%rsp), %rsi
16277+ movq RDI(%rsp), %rdi
16278 .if \skiprax
16279 .else
16280- movq \offset+72(%rsp), %rax
16281+ movq ORIG_RAX(%rsp), %rax
16282 .endif
16283 .endm
16284
16285-#define REST_SKIP (6*8)
16286-
16287 .macro SAVE_REST
16288- subq $REST_SKIP, %rsp
16289- CFI_ADJUST_CFA_OFFSET REST_SKIP
16290- movq_cfi rbx, 5*8
16291- movq_cfi rbp, 4*8
16292- movq_cfi r12, 3*8
16293- movq_cfi r13, 2*8
16294- movq_cfi r14, 1*8
16295- movq_cfi r15, 0*8
16296+ movq_cfi rbx, RBX
16297+ movq_cfi rbp, RBP
16298+
16299+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16300+ movq_cfi r12, R12
16301+#endif
16302+
16303+ movq_cfi r13, R13
16304+ movq_cfi r14, R14
16305+ movq_cfi r15, R15
16306 .endm
16307
16308 .macro RESTORE_REST
16309- movq_cfi_restore 0*8, r15
16310- movq_cfi_restore 1*8, r14
16311- movq_cfi_restore 2*8, r13
16312- movq_cfi_restore 3*8, r12
16313- movq_cfi_restore 4*8, rbp
16314- movq_cfi_restore 5*8, rbx
16315- addq $REST_SKIP, %rsp
16316- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16317+ movq_cfi_restore R15, r15
16318+ movq_cfi_restore R14, r14
16319+ movq_cfi_restore R13, r13
16320+
16321+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16322+ movq_cfi_restore R12, r12
16323+#endif
16324+
16325+ movq_cfi_restore RBP, rbp
16326+ movq_cfi_restore RBX, rbx
16327 .endm
16328
16329 .macro SAVE_ALL
16330diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16331index f50de69..2b0a458 100644
16332--- a/arch/x86/include/asm/checksum_32.h
16333+++ b/arch/x86/include/asm/checksum_32.h
16334@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16335 int len, __wsum sum,
16336 int *src_err_ptr, int *dst_err_ptr);
16337
16338+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16339+ int len, __wsum sum,
16340+ int *src_err_ptr, int *dst_err_ptr);
16341+
16342+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16343+ int len, __wsum sum,
16344+ int *src_err_ptr, int *dst_err_ptr);
16345+
16346 /*
16347 * Note: when you get a NULL pointer exception here this means someone
16348 * passed in an incorrect kernel address to one of these functions.
16349@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16350
16351 might_sleep();
16352 stac();
16353- ret = csum_partial_copy_generic((__force void *)src, dst,
16354+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16355 len, sum, err_ptr, NULL);
16356 clac();
16357
16358@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16359 might_sleep();
16360 if (access_ok(VERIFY_WRITE, dst, len)) {
16361 stac();
16362- ret = csum_partial_copy_generic(src, (__force void *)dst,
16363+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16364 len, sum, NULL, err_ptr);
16365 clac();
16366 return ret;
16367diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16368index 99c105d7..2f667ac 100644
16369--- a/arch/x86/include/asm/cmpxchg.h
16370+++ b/arch/x86/include/asm/cmpxchg.h
16371@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16372 __compiletime_error("Bad argument size for cmpxchg");
16373 extern void __xadd_wrong_size(void)
16374 __compiletime_error("Bad argument size for xadd");
16375+extern void __xadd_check_overflow_wrong_size(void)
16376+ __compiletime_error("Bad argument size for xadd_check_overflow");
16377 extern void __add_wrong_size(void)
16378 __compiletime_error("Bad argument size for add");
16379+extern void __add_check_overflow_wrong_size(void)
16380+ __compiletime_error("Bad argument size for add_check_overflow");
16381
16382 /*
16383 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16384@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16385 __ret; \
16386 })
16387
16388+#ifdef CONFIG_PAX_REFCOUNT
16389+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16390+ ({ \
16391+ __typeof__ (*(ptr)) __ret = (arg); \
16392+ switch (sizeof(*(ptr))) { \
16393+ case __X86_CASE_L: \
16394+ asm volatile (lock #op "l %0, %1\n" \
16395+ "jno 0f\n" \
16396+ "mov %0,%1\n" \
16397+ "int $4\n0:\n" \
16398+ _ASM_EXTABLE(0b, 0b) \
16399+ : "+r" (__ret), "+m" (*(ptr)) \
16400+ : : "memory", "cc"); \
16401+ break; \
16402+ case __X86_CASE_Q: \
16403+ asm volatile (lock #op "q %q0, %1\n" \
16404+ "jno 0f\n" \
16405+ "mov %0,%1\n" \
16406+ "int $4\n0:\n" \
16407+ _ASM_EXTABLE(0b, 0b) \
16408+ : "+r" (__ret), "+m" (*(ptr)) \
16409+ : : "memory", "cc"); \
16410+ break; \
16411+ default: \
16412+ __ ## op ## _check_overflow_wrong_size(); \
16413+ } \
16414+ __ret; \
16415+ })
16416+#else
16417+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16418+#endif
16419+
16420 /*
16421 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16422 * Since this is generally used to protect other memory information, we
16423@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16424 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16425 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16426
16427+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16428+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16429+
16430 #define __add(ptr, inc, lock) \
16431 ({ \
16432 __typeof__ (*(ptr)) __ret = (inc); \
16433diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16434index 59c6c40..5e0b22c 100644
16435--- a/arch/x86/include/asm/compat.h
16436+++ b/arch/x86/include/asm/compat.h
16437@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16438 typedef u32 compat_uint_t;
16439 typedef u32 compat_ulong_t;
16440 typedef u64 __attribute__((aligned(4))) compat_u64;
16441-typedef u32 compat_uptr_t;
16442+typedef u32 __user compat_uptr_t;
16443
16444 struct compat_timespec {
16445 compat_time_t tv_sec;
16446diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16447index aede2c3..40d7a8f 100644
16448--- a/arch/x86/include/asm/cpufeature.h
16449+++ b/arch/x86/include/asm/cpufeature.h
16450@@ -212,7 +212,7 @@
16451 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16452 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16453 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16454-
16455+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16456
16457 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16458 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16459@@ -220,7 +220,7 @@
16460 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16461 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16462 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16463-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16464+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16465 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16466 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16467 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16468@@ -388,6 +388,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16469 #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
16470 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16471 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16472+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16473
16474 #if __GNUC__ >= 4
16475 extern void warn_pre_alternatives(void);
16476@@ -439,7 +440,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16477
16478 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16479 t_warn:
16480- warn_pre_alternatives();
16481+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16482+ warn_pre_alternatives();
16483 return false;
16484 #endif
16485
16486@@ -459,7 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16487 ".section .discard,\"aw\",@progbits\n"
16488 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16489 ".previous\n"
16490- ".section .altinstr_replacement,\"ax\"\n"
16491+ ".section .altinstr_replacement,\"a\"\n"
16492 "3: movb $1,%0\n"
16493 "4:\n"
16494 ".previous\n"
16495@@ -496,7 +498,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16496 " .byte 2b - 1b\n" /* src len */
16497 " .byte 4f - 3f\n" /* repl len */
16498 ".previous\n"
16499- ".section .altinstr_replacement,\"ax\"\n"
16500+ ".section .altinstr_replacement,\"a\"\n"
16501 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16502 "4:\n"
16503 ".previous\n"
16504@@ -529,7 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16505 ".section .discard,\"aw\",@progbits\n"
16506 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16507 ".previous\n"
16508- ".section .altinstr_replacement,\"ax\"\n"
16509+ ".section .altinstr_replacement,\"a\"\n"
16510 "3: movb $0,%0\n"
16511 "4:\n"
16512 ".previous\n"
16513@@ -543,7 +545,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16514 ".section .discard,\"aw\",@progbits\n"
16515 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16516 ".previous\n"
16517- ".section .altinstr_replacement,\"ax\"\n"
16518+ ".section .altinstr_replacement,\"a\"\n"
16519 "5: movb $1,%0\n"
16520 "6:\n"
16521 ".previous\n"
16522diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16523index a94b82e..59ecefa 100644
16524--- a/arch/x86/include/asm/desc.h
16525+++ b/arch/x86/include/asm/desc.h
16526@@ -4,6 +4,7 @@
16527 #include <asm/desc_defs.h>
16528 #include <asm/ldt.h>
16529 #include <asm/mmu.h>
16530+#include <asm/pgtable.h>
16531
16532 #include <linux/smp.h>
16533 #include <linux/percpu.h>
16534@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16535
16536 desc->type = (info->read_exec_only ^ 1) << 1;
16537 desc->type |= info->contents << 2;
16538+ desc->type |= info->seg_not_present ^ 1;
16539
16540 desc->s = 1;
16541 desc->dpl = 0x3;
16542@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16543 }
16544
16545 extern struct desc_ptr idt_descr;
16546-extern gate_desc idt_table[];
16547-extern struct desc_ptr debug_idt_descr;
16548-extern gate_desc debug_idt_table[];
16549-
16550-struct gdt_page {
16551- struct desc_struct gdt[GDT_ENTRIES];
16552-} __attribute__((aligned(PAGE_SIZE)));
16553-
16554-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16555+extern gate_desc idt_table[IDT_ENTRIES];
16556+extern const struct desc_ptr debug_idt_descr;
16557+extern gate_desc debug_idt_table[IDT_ENTRIES];
16558
16559+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16560 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16561 {
16562- return per_cpu(gdt_page, cpu).gdt;
16563+ return cpu_gdt_table[cpu];
16564 }
16565
16566 #ifdef CONFIG_X86_64
16567@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16568 unsigned long base, unsigned dpl, unsigned flags,
16569 unsigned short seg)
16570 {
16571- gate->a = (seg << 16) | (base & 0xffff);
16572- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16573+ gate->gate.offset_low = base;
16574+ gate->gate.seg = seg;
16575+ gate->gate.reserved = 0;
16576+ gate->gate.type = type;
16577+ gate->gate.s = 0;
16578+ gate->gate.dpl = dpl;
16579+ gate->gate.p = 1;
16580+ gate->gate.offset_high = base >> 16;
16581 }
16582
16583 #endif
16584@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16585
16586 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16587 {
16588+ pax_open_kernel();
16589 memcpy(&idt[entry], gate, sizeof(*gate));
16590+ pax_close_kernel();
16591 }
16592
16593 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16594 {
16595+ pax_open_kernel();
16596 memcpy(&ldt[entry], desc, 8);
16597+ pax_close_kernel();
16598 }
16599
16600 static inline void
16601@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16602 default: size = sizeof(*gdt); break;
16603 }
16604
16605+ pax_open_kernel();
16606 memcpy(&gdt[entry], desc, size);
16607+ pax_close_kernel();
16608 }
16609
16610 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16611@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16612
16613 static inline void native_load_tr_desc(void)
16614 {
16615+ pax_open_kernel();
16616 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16617+ pax_close_kernel();
16618 }
16619
16620 static inline void native_load_gdt(const struct desc_ptr *dtr)
16621@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16622 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16623 unsigned int i;
16624
16625+ pax_open_kernel();
16626 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16627 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16628+ pax_close_kernel();
16629 }
16630
16631 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16632@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16633 preempt_enable();
16634 }
16635
16636-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16637+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16638 {
16639 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16640 }
16641@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16642 }
16643
16644 #ifdef CONFIG_X86_64
16645-static inline void set_nmi_gate(int gate, void *addr)
16646+static inline void set_nmi_gate(int gate, const void *addr)
16647 {
16648 gate_desc s;
16649
16650@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16651 #endif
16652
16653 #ifdef CONFIG_TRACING
16654-extern struct desc_ptr trace_idt_descr;
16655-extern gate_desc trace_idt_table[];
16656+extern const struct desc_ptr trace_idt_descr;
16657+extern gate_desc trace_idt_table[IDT_ENTRIES];
16658 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16659 {
16660 write_idt_entry(trace_idt_table, entry, gate);
16661 }
16662
16663-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16664+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16665 unsigned dpl, unsigned ist, unsigned seg)
16666 {
16667 gate_desc s;
16668@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16669 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16670 #endif
16671
16672-static inline void _set_gate(int gate, unsigned type, void *addr,
16673+static inline void _set_gate(int gate, unsigned type, const void *addr,
16674 unsigned dpl, unsigned ist, unsigned seg)
16675 {
16676 gate_desc s;
16677@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16678 #define set_intr_gate(n, addr) \
16679 do { \
16680 BUG_ON((unsigned)n > 0xFF); \
16681- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16682+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16683 __KERNEL_CS); \
16684- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16685+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16686 0, 0, __KERNEL_CS); \
16687 } while (0)
16688
16689@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16690 /*
16691 * This routine sets up an interrupt gate at directory privilege level 3.
16692 */
16693-static inline void set_system_intr_gate(unsigned int n, void *addr)
16694+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16695 {
16696 BUG_ON((unsigned)n > 0xFF);
16697 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16698 }
16699
16700-static inline void set_system_trap_gate(unsigned int n, void *addr)
16701+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16702 {
16703 BUG_ON((unsigned)n > 0xFF);
16704 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16705 }
16706
16707-static inline void set_trap_gate(unsigned int n, void *addr)
16708+static inline void set_trap_gate(unsigned int n, const void *addr)
16709 {
16710 BUG_ON((unsigned)n > 0xFF);
16711 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16712@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16713 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16714 {
16715 BUG_ON((unsigned)n > 0xFF);
16716- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16717+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16718 }
16719
16720-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16721+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16722 {
16723 BUG_ON((unsigned)n > 0xFF);
16724 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16725 }
16726
16727-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16728+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16729 {
16730 BUG_ON((unsigned)n > 0xFF);
16731 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16732@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16733 else
16734 load_idt((const struct desc_ptr *)&idt_descr);
16735 }
16736+
16737+#ifdef CONFIG_X86_32
16738+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16739+{
16740+ struct desc_struct d;
16741+
16742+ if (likely(limit))
16743+ limit = (limit - 1UL) >> PAGE_SHIFT;
16744+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16745+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16746+}
16747+#endif
16748+
16749 #endif /* _ASM_X86_DESC_H */
16750diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16751index 278441f..b95a174 100644
16752--- a/arch/x86/include/asm/desc_defs.h
16753+++ b/arch/x86/include/asm/desc_defs.h
16754@@ -31,6 +31,12 @@ struct desc_struct {
16755 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16756 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16757 };
16758+ struct {
16759+ u16 offset_low;
16760+ u16 seg;
16761+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16762+ unsigned offset_high: 16;
16763+ } gate;
16764 };
16765 } __attribute__((packed));
16766
16767diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16768index ced283a..ffe04cc 100644
16769--- a/arch/x86/include/asm/div64.h
16770+++ b/arch/x86/include/asm/div64.h
16771@@ -39,7 +39,7 @@
16772 __mod; \
16773 })
16774
16775-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16776+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16777 {
16778 union {
16779 u64 v64;
16780diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16781index ca3347a..1a5082a 100644
16782--- a/arch/x86/include/asm/elf.h
16783+++ b/arch/x86/include/asm/elf.h
16784@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16785
16786 #include <asm/vdso.h>
16787
16788-#ifdef CONFIG_X86_64
16789-extern unsigned int vdso64_enabled;
16790-#endif
16791 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16792 extern unsigned int vdso32_enabled;
16793 #endif
16794@@ -249,7 +246,25 @@ extern int force_personality32;
16795 the loader. We need to make sure that it is out of the way of the program
16796 that it will "exec", and that there is sufficient room for the brk. */
16797
16798+#ifdef CONFIG_PAX_SEGMEXEC
16799+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16800+#else
16801 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16802+#endif
16803+
16804+#ifdef CONFIG_PAX_ASLR
16805+#ifdef CONFIG_X86_32
16806+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16807+
16808+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16809+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16810+#else
16811+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16812+
16813+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16814+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16815+#endif
16816+#endif
16817
16818 /* This yields a mask that user programs can use to figure out what
16819 instruction set this CPU supports. This could be done in user space,
16820@@ -298,17 +313,13 @@ do { \
16821
16822 #define ARCH_DLINFO \
16823 do { \
16824- if (vdso64_enabled) \
16825- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16826- (unsigned long __force)current->mm->context.vdso); \
16827+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16828 } while (0)
16829
16830 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16831 #define ARCH_DLINFO_X32 \
16832 do { \
16833- if (vdso64_enabled) \
16834- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16835- (unsigned long __force)current->mm->context.vdso); \
16836+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16837 } while (0)
16838
16839 #define AT_SYSINFO 32
16840@@ -323,10 +334,10 @@ else \
16841
16842 #endif /* !CONFIG_X86_32 */
16843
16844-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16845+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16846
16847 #define VDSO_ENTRY \
16848- ((unsigned long)current->mm->context.vdso + \
16849+ (current->mm->context.vdso + \
16850 selected_vdso32->sym___kernel_vsyscall)
16851
16852 struct linux_binprm;
16853@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16854 int uses_interp);
16855 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16856
16857-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16858-#define arch_randomize_brk arch_randomize_brk
16859-
16860 /*
16861 * True on X86_32 or when emulating IA32 on X86_64
16862 */
16863diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16864index 77a99ac..39ff7f5 100644
16865--- a/arch/x86/include/asm/emergency-restart.h
16866+++ b/arch/x86/include/asm/emergency-restart.h
16867@@ -1,6 +1,6 @@
16868 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16869 #define _ASM_X86_EMERGENCY_RESTART_H
16870
16871-extern void machine_emergency_restart(void);
16872+extern void machine_emergency_restart(void) __noreturn;
16873
16874 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16875diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16876index 1c7eefe..d0e4702 100644
16877--- a/arch/x86/include/asm/floppy.h
16878+++ b/arch/x86/include/asm/floppy.h
16879@@ -229,18 +229,18 @@ static struct fd_routine_l {
16880 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16881 } fd_routine[] = {
16882 {
16883- request_dma,
16884- free_dma,
16885- get_dma_residue,
16886- dma_mem_alloc,
16887- hard_dma_setup
16888+ ._request_dma = request_dma,
16889+ ._free_dma = free_dma,
16890+ ._get_dma_residue = get_dma_residue,
16891+ ._dma_mem_alloc = dma_mem_alloc,
16892+ ._dma_setup = hard_dma_setup
16893 },
16894 {
16895- vdma_request_dma,
16896- vdma_nop,
16897- vdma_get_dma_residue,
16898- vdma_mem_alloc,
16899- vdma_dma_setup
16900+ ._request_dma = vdma_request_dma,
16901+ ._free_dma = vdma_nop,
16902+ ._get_dma_residue = vdma_get_dma_residue,
16903+ ._dma_mem_alloc = vdma_mem_alloc,
16904+ ._dma_setup = vdma_dma_setup
16905 }
16906 };
16907
16908diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16909index e97622f..d0ba77a 100644
16910--- a/arch/x86/include/asm/fpu-internal.h
16911+++ b/arch/x86/include/asm/fpu-internal.h
16912@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16913 #define user_insn(insn, output, input...) \
16914 ({ \
16915 int err; \
16916+ pax_open_userland(); \
16917 asm volatile(ASM_STAC "\n" \
16918- "1:" #insn "\n\t" \
16919+ "1:" \
16920+ __copyuser_seg \
16921+ #insn "\n\t" \
16922 "2: " ASM_CLAC "\n" \
16923 ".section .fixup,\"ax\"\n" \
16924 "3: movl $-1,%[err]\n" \
16925@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16926 _ASM_EXTABLE(1b, 3b) \
16927 : [err] "=r" (err), output \
16928 : "0"(0), input); \
16929+ pax_close_userland(); \
16930 err; \
16931 })
16932
16933@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16934 "fnclex\n\t"
16935 "emms\n\t"
16936 "fildl %P[addr]" /* set F?P to defined value */
16937- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16938+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16939 }
16940
16941 return fpu_restore_checking(&tsk->thread.fpu);
16942diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16943index b4c1f54..e290c08 100644
16944--- a/arch/x86/include/asm/futex.h
16945+++ b/arch/x86/include/asm/futex.h
16946@@ -12,6 +12,7 @@
16947 #include <asm/smap.h>
16948
16949 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16950+ typecheck(u32 __user *, uaddr); \
16951 asm volatile("\t" ASM_STAC "\n" \
16952 "1:\t" insn "\n" \
16953 "2:\t" ASM_CLAC "\n" \
16954@@ -20,15 +21,16 @@
16955 "\tjmp\t2b\n" \
16956 "\t.previous\n" \
16957 _ASM_EXTABLE(1b, 3b) \
16958- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16959+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16960 : "i" (-EFAULT), "0" (oparg), "1" (0))
16961
16962 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16963+ typecheck(u32 __user *, uaddr); \
16964 asm volatile("\t" ASM_STAC "\n" \
16965 "1:\tmovl %2, %0\n" \
16966 "\tmovl\t%0, %3\n" \
16967 "\t" insn "\n" \
16968- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16969+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16970 "\tjnz\t1b\n" \
16971 "3:\t" ASM_CLAC "\n" \
16972 "\t.section .fixup,\"ax\"\n" \
16973@@ -38,7 +40,7 @@
16974 _ASM_EXTABLE(1b, 4b) \
16975 _ASM_EXTABLE(2b, 4b) \
16976 : "=&a" (oldval), "=&r" (ret), \
16977- "+m" (*uaddr), "=&r" (tem) \
16978+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16979 : "r" (oparg), "i" (-EFAULT), "1" (0))
16980
16981 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16982@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16983
16984 pagefault_disable();
16985
16986+ pax_open_userland();
16987 switch (op) {
16988 case FUTEX_OP_SET:
16989- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16990+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16991 break;
16992 case FUTEX_OP_ADD:
16993- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16994+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16995 uaddr, oparg);
16996 break;
16997 case FUTEX_OP_OR:
16998@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16999 default:
17000 ret = -ENOSYS;
17001 }
17002+ pax_close_userland();
17003
17004 pagefault_enable();
17005
17006diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17007index 9662290..49ca5e5 100644
17008--- a/arch/x86/include/asm/hw_irq.h
17009+++ b/arch/x86/include/asm/hw_irq.h
17010@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
17011 #endif /* CONFIG_X86_LOCAL_APIC */
17012
17013 /* Statistics */
17014-extern atomic_t irq_err_count;
17015-extern atomic_t irq_mis_count;
17016+extern atomic_unchecked_t irq_err_count;
17017+extern atomic_unchecked_t irq_mis_count;
17018
17019 /* EISA */
17020 extern void eisa_set_level_irq(unsigned int irq);
17021diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17022index ccffa53..3c90c87 100644
17023--- a/arch/x86/include/asm/i8259.h
17024+++ b/arch/x86/include/asm/i8259.h
17025@@ -62,7 +62,7 @@ struct legacy_pic {
17026 void (*init)(int auto_eoi);
17027 int (*irq_pending)(unsigned int irq);
17028 void (*make_irq)(unsigned int irq);
17029-};
17030+} __do_const;
17031
17032 extern struct legacy_pic *legacy_pic;
17033 extern struct legacy_pic null_legacy_pic;
17034diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17035index 34a5b93..27e40a6 100644
17036--- a/arch/x86/include/asm/io.h
17037+++ b/arch/x86/include/asm/io.h
17038@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17039 "m" (*(volatile type __force *)addr) barrier); }
17040
17041 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17042-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17043-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17044+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17045+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17046
17047 build_mmio_read(__readb, "b", unsigned char, "=q", )
17048-build_mmio_read(__readw, "w", unsigned short, "=r", )
17049-build_mmio_read(__readl, "l", unsigned int, "=r", )
17050+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17051+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17052
17053 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17054 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17055@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17056 * this function
17057 */
17058
17059-static inline phys_addr_t virt_to_phys(volatile void *address)
17060+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17061 {
17062 return __pa(address);
17063 }
17064@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17065 return ioremap_nocache(offset, size);
17066 }
17067
17068-extern void iounmap(volatile void __iomem *addr);
17069+extern void iounmap(const volatile void __iomem *addr);
17070
17071 extern void set_iounmap_nonlazy(void);
17072
17073@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17074
17075 #include <linux/vmalloc.h>
17076
17077+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17078+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17079+{
17080+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17081+}
17082+
17083+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17084+{
17085+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17086+}
17087+
17088 /*
17089 * Convert a virtual cached pointer to an uncached pointer
17090 */
17091diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17092index 0a8b519..80e7d5b 100644
17093--- a/arch/x86/include/asm/irqflags.h
17094+++ b/arch/x86/include/asm/irqflags.h
17095@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17096 sti; \
17097 sysexit
17098
17099+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17100+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17101+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17102+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17103+
17104 #else
17105 #define INTERRUPT_RETURN iret
17106 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17107diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17108index 4421b5d..8543006 100644
17109--- a/arch/x86/include/asm/kprobes.h
17110+++ b/arch/x86/include/asm/kprobes.h
17111@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17112 #define RELATIVEJUMP_SIZE 5
17113 #define RELATIVECALL_OPCODE 0xe8
17114 #define RELATIVE_ADDR_SIZE 4
17115-#define MAX_STACK_SIZE 64
17116-#define MIN_STACK_SIZE(ADDR) \
17117- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17118- THREAD_SIZE - (unsigned long)(ADDR))) \
17119- ? (MAX_STACK_SIZE) \
17120- : (((unsigned long)current_thread_info()) + \
17121- THREAD_SIZE - (unsigned long)(ADDR)))
17122+#define MAX_STACK_SIZE 64UL
17123+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17124
17125 #define flush_insn_slot(p) do { } while (0)
17126
17127diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
17128index d89c6b8..e711c69 100644
17129--- a/arch/x86/include/asm/kvm_host.h
17130+++ b/arch/x86/include/asm/kvm_host.h
17131@@ -51,7 +51,7 @@
17132 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
17133
17134 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
17135-#define CR3_PCID_INVD (1UL << 63)
17136+#define CR3_PCID_INVD (1ULL << 63)
17137 #define CR4_RESERVED_BITS \
17138 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
17139 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
17140diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17141index 4ad6560..75c7bdd 100644
17142--- a/arch/x86/include/asm/local.h
17143+++ b/arch/x86/include/asm/local.h
17144@@ -10,33 +10,97 @@ typedef struct {
17145 atomic_long_t a;
17146 } local_t;
17147
17148+typedef struct {
17149+ atomic_long_unchecked_t a;
17150+} local_unchecked_t;
17151+
17152 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17153
17154 #define local_read(l) atomic_long_read(&(l)->a)
17155+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17156 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17157+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17158
17159 static inline void local_inc(local_t *l)
17160 {
17161- asm volatile(_ASM_INC "%0"
17162+ asm volatile(_ASM_INC "%0\n"
17163+
17164+#ifdef CONFIG_PAX_REFCOUNT
17165+ "jno 0f\n"
17166+ _ASM_DEC "%0\n"
17167+ "int $4\n0:\n"
17168+ _ASM_EXTABLE(0b, 0b)
17169+#endif
17170+
17171+ : "+m" (l->a.counter));
17172+}
17173+
17174+static inline void local_inc_unchecked(local_unchecked_t *l)
17175+{
17176+ asm volatile(_ASM_INC "%0\n"
17177 : "+m" (l->a.counter));
17178 }
17179
17180 static inline void local_dec(local_t *l)
17181 {
17182- asm volatile(_ASM_DEC "%0"
17183+ asm volatile(_ASM_DEC "%0\n"
17184+
17185+#ifdef CONFIG_PAX_REFCOUNT
17186+ "jno 0f\n"
17187+ _ASM_INC "%0\n"
17188+ "int $4\n0:\n"
17189+ _ASM_EXTABLE(0b, 0b)
17190+#endif
17191+
17192+ : "+m" (l->a.counter));
17193+}
17194+
17195+static inline void local_dec_unchecked(local_unchecked_t *l)
17196+{
17197+ asm volatile(_ASM_DEC "%0\n"
17198 : "+m" (l->a.counter));
17199 }
17200
17201 static inline void local_add(long i, local_t *l)
17202 {
17203- asm volatile(_ASM_ADD "%1,%0"
17204+ asm volatile(_ASM_ADD "%1,%0\n"
17205+
17206+#ifdef CONFIG_PAX_REFCOUNT
17207+ "jno 0f\n"
17208+ _ASM_SUB "%1,%0\n"
17209+ "int $4\n0:\n"
17210+ _ASM_EXTABLE(0b, 0b)
17211+#endif
17212+
17213+ : "+m" (l->a.counter)
17214+ : "ir" (i));
17215+}
17216+
17217+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17218+{
17219+ asm volatile(_ASM_ADD "%1,%0\n"
17220 : "+m" (l->a.counter)
17221 : "ir" (i));
17222 }
17223
17224 static inline void local_sub(long i, local_t *l)
17225 {
17226- asm volatile(_ASM_SUB "%1,%0"
17227+ asm volatile(_ASM_SUB "%1,%0\n"
17228+
17229+#ifdef CONFIG_PAX_REFCOUNT
17230+ "jno 0f\n"
17231+ _ASM_ADD "%1,%0\n"
17232+ "int $4\n0:\n"
17233+ _ASM_EXTABLE(0b, 0b)
17234+#endif
17235+
17236+ : "+m" (l->a.counter)
17237+ : "ir" (i));
17238+}
17239+
17240+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17241+{
17242+ asm volatile(_ASM_SUB "%1,%0\n"
17243 : "+m" (l->a.counter)
17244 : "ir" (i));
17245 }
17246@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17247 */
17248 static inline int local_sub_and_test(long i, local_t *l)
17249 {
17250- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17251+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17252 }
17253
17254 /**
17255@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17256 */
17257 static inline int local_dec_and_test(local_t *l)
17258 {
17259- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17260+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17261 }
17262
17263 /**
17264@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17265 */
17266 static inline int local_inc_and_test(local_t *l)
17267 {
17268- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17269+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17270 }
17271
17272 /**
17273@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17274 */
17275 static inline int local_add_negative(long i, local_t *l)
17276 {
17277- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17278+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17279 }
17280
17281 /**
17282@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17283 static inline long local_add_return(long i, local_t *l)
17284 {
17285 long __i = i;
17286+ asm volatile(_ASM_XADD "%0, %1\n"
17287+
17288+#ifdef CONFIG_PAX_REFCOUNT
17289+ "jno 0f\n"
17290+ _ASM_MOV "%0,%1\n"
17291+ "int $4\n0:\n"
17292+ _ASM_EXTABLE(0b, 0b)
17293+#endif
17294+
17295+ : "+r" (i), "+m" (l->a.counter)
17296+ : : "memory");
17297+ return i + __i;
17298+}
17299+
17300+/**
17301+ * local_add_return_unchecked - add and return
17302+ * @i: integer value to add
17303+ * @l: pointer to type local_unchecked_t
17304+ *
17305+ * Atomically adds @i to @l and returns @i + @l
17306+ */
17307+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17308+{
17309+ long __i = i;
17310 asm volatile(_ASM_XADD "%0, %1;"
17311 : "+r" (i), "+m" (l->a.counter)
17312 : : "memory");
17313@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17314
17315 #define local_cmpxchg(l, o, n) \
17316 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17317+#define local_cmpxchg_unchecked(l, o, n) \
17318+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17319 /* Always has a lock prefix */
17320 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17321
17322diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17323new file mode 100644
17324index 0000000..2bfd3ba
17325--- /dev/null
17326+++ b/arch/x86/include/asm/mman.h
17327@@ -0,0 +1,15 @@
17328+#ifndef _X86_MMAN_H
17329+#define _X86_MMAN_H
17330+
17331+#include <uapi/asm/mman.h>
17332+
17333+#ifdef __KERNEL__
17334+#ifndef __ASSEMBLY__
17335+#ifdef CONFIG_X86_32
17336+#define arch_mmap_check i386_mmap_check
17337+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17338+#endif
17339+#endif
17340+#endif
17341+
17342+#endif /* X86_MMAN_H */
17343diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17344index 876e74e..e20bfb1 100644
17345--- a/arch/x86/include/asm/mmu.h
17346+++ b/arch/x86/include/asm/mmu.h
17347@@ -9,7 +9,7 @@
17348 * we put the segment information here.
17349 */
17350 typedef struct {
17351- void *ldt;
17352+ struct desc_struct *ldt;
17353 int size;
17354
17355 #ifdef CONFIG_X86_64
17356@@ -18,7 +18,19 @@ typedef struct {
17357 #endif
17358
17359 struct mutex lock;
17360- void __user *vdso;
17361+ unsigned long vdso;
17362+
17363+#ifdef CONFIG_X86_32
17364+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17365+ unsigned long user_cs_base;
17366+ unsigned long user_cs_limit;
17367+
17368+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17369+ cpumask_t cpu_user_cs_mask;
17370+#endif
17371+
17372+#endif
17373+#endif
17374 } mm_context_t;
17375
17376 #ifdef CONFIG_SMP
17377diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17378index 4b75d59..8ffacb6 100644
17379--- a/arch/x86/include/asm/mmu_context.h
17380+++ b/arch/x86/include/asm/mmu_context.h
17381@@ -27,6 +27,20 @@ void destroy_context(struct mm_struct *mm);
17382
17383 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17384 {
17385+
17386+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17387+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17388+ unsigned int i;
17389+ pgd_t *pgd;
17390+
17391+ pax_open_kernel();
17392+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17393+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17394+ set_pgd_batched(pgd+i, native_make_pgd(0));
17395+ pax_close_kernel();
17396+ }
17397+#endif
17398+
17399 #ifdef CONFIG_SMP
17400 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17401 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17402@@ -37,16 +51,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17403 struct task_struct *tsk)
17404 {
17405 unsigned cpu = smp_processor_id();
17406+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17407+ int tlbstate = TLBSTATE_OK;
17408+#endif
17409
17410 if (likely(prev != next)) {
17411 #ifdef CONFIG_SMP
17412+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17413+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17414+#endif
17415 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17416 this_cpu_write(cpu_tlbstate.active_mm, next);
17417 #endif
17418 cpumask_set_cpu(cpu, mm_cpumask(next));
17419
17420 /* Re-load page tables */
17421+#ifdef CONFIG_PAX_PER_CPU_PGD
17422+ pax_open_kernel();
17423+
17424+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17425+ if (static_cpu_has(X86_FEATURE_PCID))
17426+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17427+ else
17428+#endif
17429+
17430+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17431+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17432+ pax_close_kernel();
17433+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17434+
17435+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17436+ if (static_cpu_has(X86_FEATURE_PCID)) {
17437+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17438+ u64 descriptor[2];
17439+ descriptor[0] = PCID_USER;
17440+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17441+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17442+ descriptor[0] = PCID_KERNEL;
17443+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17444+ }
17445+ } else {
17446+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17447+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17448+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17449+ else
17450+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17451+ }
17452+ } else
17453+#endif
17454+
17455+ load_cr3(get_cpu_pgd(cpu, kernel));
17456+#else
17457 load_cr3(next->pgd);
17458+#endif
17459 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17460
17461 /* Stop flush ipis for the previous mm */
17462@@ -64,9 +121,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17463 */
17464 if (unlikely(prev->context.ldt != next->context.ldt))
17465 load_LDT_nolock(&next->context);
17466+
17467+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17468+ if (!(__supported_pte_mask & _PAGE_NX)) {
17469+ smp_mb__before_atomic();
17470+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17471+ smp_mb__after_atomic();
17472+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17473+ }
17474+#endif
17475+
17476+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17477+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17478+ prev->context.user_cs_limit != next->context.user_cs_limit))
17479+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17480+#ifdef CONFIG_SMP
17481+ else if (unlikely(tlbstate != TLBSTATE_OK))
17482+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17483+#endif
17484+#endif
17485+
17486 }
17487+ else {
17488+
17489+#ifdef CONFIG_PAX_PER_CPU_PGD
17490+ pax_open_kernel();
17491+
17492+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17493+ if (static_cpu_has(X86_FEATURE_PCID))
17494+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17495+ else
17496+#endif
17497+
17498+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17499+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17500+ pax_close_kernel();
17501+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17502+
17503+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17504+ if (static_cpu_has(X86_FEATURE_PCID)) {
17505+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17506+ u64 descriptor[2];
17507+ descriptor[0] = PCID_USER;
17508+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17509+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17510+ descriptor[0] = PCID_KERNEL;
17511+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17512+ }
17513+ } else {
17514+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17515+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17516+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17517+ else
17518+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17519+ }
17520+ } else
17521+#endif
17522+
17523+ load_cr3(get_cpu_pgd(cpu, kernel));
17524+#endif
17525+
17526 #ifdef CONFIG_SMP
17527- else {
17528 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17529 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17530
17531@@ -83,12 +198,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17532 * tlb flush IPI delivery. We must reload CR3
17533 * to make sure to use no freed page tables.
17534 */
17535+
17536+#ifndef CONFIG_PAX_PER_CPU_PGD
17537 load_cr3(next->pgd);
17538 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17539+#endif
17540+
17541 load_LDT_nolock(&next->context);
17542+
17543+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17544+ if (!(__supported_pte_mask & _PAGE_NX))
17545+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17546+#endif
17547+
17548+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17549+#ifdef CONFIG_PAX_PAGEEXEC
17550+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17551+#endif
17552+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17553+#endif
17554+
17555 }
17556+#endif
17557 }
17558-#endif
17559 }
17560
17561 #define activate_mm(prev, next) \
17562diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17563index e3b7819..b257c64 100644
17564--- a/arch/x86/include/asm/module.h
17565+++ b/arch/x86/include/asm/module.h
17566@@ -5,6 +5,7 @@
17567
17568 #ifdef CONFIG_X86_64
17569 /* X86_64 does not define MODULE_PROC_FAMILY */
17570+#define MODULE_PROC_FAMILY ""
17571 #elif defined CONFIG_M486
17572 #define MODULE_PROC_FAMILY "486 "
17573 #elif defined CONFIG_M586
17574@@ -57,8 +58,20 @@
17575 #error unknown processor family
17576 #endif
17577
17578-#ifdef CONFIG_X86_32
17579-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17580+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17581+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17582+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17583+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17584+#else
17585+#define MODULE_PAX_KERNEXEC ""
17586 #endif
17587
17588+#ifdef CONFIG_PAX_MEMORY_UDEREF
17589+#define MODULE_PAX_UDEREF "UDEREF "
17590+#else
17591+#define MODULE_PAX_UDEREF ""
17592+#endif
17593+
17594+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17595+
17596 #endif /* _ASM_X86_MODULE_H */
17597diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17598index 5f2fc44..106caa6 100644
17599--- a/arch/x86/include/asm/nmi.h
17600+++ b/arch/x86/include/asm/nmi.h
17601@@ -36,26 +36,35 @@ enum {
17602
17603 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17604
17605+struct nmiaction;
17606+
17607+struct nmiwork {
17608+ const struct nmiaction *action;
17609+ u64 max_duration;
17610+ struct irq_work irq_work;
17611+};
17612+
17613 struct nmiaction {
17614 struct list_head list;
17615 nmi_handler_t handler;
17616- u64 max_duration;
17617- struct irq_work irq_work;
17618 unsigned long flags;
17619 const char *name;
17620-};
17621+ struct nmiwork *work;
17622+} __do_const;
17623
17624 #define register_nmi_handler(t, fn, fg, n, init...) \
17625 ({ \
17626- static struct nmiaction init fn##_na = { \
17627+ static struct nmiwork fn##_nw; \
17628+ static const struct nmiaction init fn##_na = { \
17629 .handler = (fn), \
17630 .name = (n), \
17631 .flags = (fg), \
17632+ .work = &fn##_nw, \
17633 }; \
17634 __register_nmi_handler((t), &fn##_na); \
17635 })
17636
17637-int __register_nmi_handler(unsigned int, struct nmiaction *);
17638+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17639
17640 void unregister_nmi_handler(unsigned int, const char *);
17641
17642diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17643index 802dde3..9183e68 100644
17644--- a/arch/x86/include/asm/page.h
17645+++ b/arch/x86/include/asm/page.h
17646@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17647 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17648
17649 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17650+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17651
17652 #define __boot_va(x) __va(x)
17653 #define __boot_pa(x) __pa(x)
17654@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17655 * virt_to_page(kaddr) returns a valid pointer if and only if
17656 * virt_addr_valid(kaddr) returns true.
17657 */
17658-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17659 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17660 extern bool __virt_addr_valid(unsigned long kaddr);
17661 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17662
17663+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17664+#define virt_to_page(kaddr) \
17665+ ({ \
17666+ const void *__kaddr = (const void *)(kaddr); \
17667+ BUG_ON(!virt_addr_valid(__kaddr)); \
17668+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17669+ })
17670+#else
17671+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17672+#endif
17673+
17674 #endif /* __ASSEMBLY__ */
17675
17676 #include <asm-generic/memory_model.h>
17677diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17678index b3bebf9..e1f5d95 100644
17679--- a/arch/x86/include/asm/page_64.h
17680+++ b/arch/x86/include/asm/page_64.h
17681@@ -7,9 +7,9 @@
17682
17683 /* duplicated to the one in bootmem.h */
17684 extern unsigned long max_pfn;
17685-extern unsigned long phys_base;
17686+extern const unsigned long phys_base;
17687
17688-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17689+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17690 {
17691 unsigned long y = x - __START_KERNEL_map;
17692
17693diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17694index 32444ae..1a1624b 100644
17695--- a/arch/x86/include/asm/paravirt.h
17696+++ b/arch/x86/include/asm/paravirt.h
17697@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17698 return (pmd_t) { ret };
17699 }
17700
17701-static inline pmdval_t pmd_val(pmd_t pmd)
17702+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17703 {
17704 pmdval_t ret;
17705
17706@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17707 val);
17708 }
17709
17710+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17711+{
17712+ pgdval_t val = native_pgd_val(pgd);
17713+
17714+ if (sizeof(pgdval_t) > sizeof(long))
17715+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17716+ val, (u64)val >> 32);
17717+ else
17718+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17719+ val);
17720+}
17721+
17722 static inline void pgd_clear(pgd_t *pgdp)
17723 {
17724 set_pgd(pgdp, __pgd(0));
17725@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17726 pv_mmu_ops.set_fixmap(idx, phys, flags);
17727 }
17728
17729+#ifdef CONFIG_PAX_KERNEXEC
17730+static inline unsigned long pax_open_kernel(void)
17731+{
17732+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17733+}
17734+
17735+static inline unsigned long pax_close_kernel(void)
17736+{
17737+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17738+}
17739+#else
17740+static inline unsigned long pax_open_kernel(void) { return 0; }
17741+static inline unsigned long pax_close_kernel(void) { return 0; }
17742+#endif
17743+
17744 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17745
17746 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17747@@ -906,7 +933,7 @@ extern void default_banner(void);
17748
17749 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17750 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17751-#define PARA_INDIRECT(addr) *%cs:addr
17752+#define PARA_INDIRECT(addr) *%ss:addr
17753 #endif
17754
17755 #define INTERRUPT_RETURN \
17756@@ -981,6 +1008,21 @@ extern void default_banner(void);
17757 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17758 CLBR_NONE, \
17759 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17760+
17761+#define GET_CR0_INTO_RDI \
17762+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17763+ mov %rax,%rdi
17764+
17765+#define SET_RDI_INTO_CR0 \
17766+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17767+
17768+#define GET_CR3_INTO_RDI \
17769+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17770+ mov %rax,%rdi
17771+
17772+#define SET_RDI_INTO_CR3 \
17773+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17774+
17775 #endif /* CONFIG_X86_32 */
17776
17777 #endif /* __ASSEMBLY__ */
17778diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17779index 7549b8b..f0edfda 100644
17780--- a/arch/x86/include/asm/paravirt_types.h
17781+++ b/arch/x86/include/asm/paravirt_types.h
17782@@ -84,7 +84,7 @@ struct pv_init_ops {
17783 */
17784 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17785 unsigned long addr, unsigned len);
17786-};
17787+} __no_const __no_randomize_layout;
17788
17789
17790 struct pv_lazy_ops {
17791@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17792 void (*enter)(void);
17793 void (*leave)(void);
17794 void (*flush)(void);
17795-};
17796+} __no_randomize_layout;
17797
17798 struct pv_time_ops {
17799 unsigned long long (*sched_clock)(void);
17800 unsigned long long (*steal_clock)(int cpu);
17801 unsigned long (*get_tsc_khz)(void);
17802-};
17803+} __no_const __no_randomize_layout;
17804
17805 struct pv_cpu_ops {
17806 /* hooks for various privileged instructions */
17807@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17808
17809 void (*start_context_switch)(struct task_struct *prev);
17810 void (*end_context_switch)(struct task_struct *next);
17811-};
17812+} __no_const __no_randomize_layout;
17813
17814 struct pv_irq_ops {
17815 /*
17816@@ -215,7 +215,7 @@ struct pv_irq_ops {
17817 #ifdef CONFIG_X86_64
17818 void (*adjust_exception_frame)(void);
17819 #endif
17820-};
17821+} __no_randomize_layout;
17822
17823 struct pv_apic_ops {
17824 #ifdef CONFIG_X86_LOCAL_APIC
17825@@ -223,7 +223,7 @@ struct pv_apic_ops {
17826 unsigned long start_eip,
17827 unsigned long start_esp);
17828 #endif
17829-};
17830+} __no_const __no_randomize_layout;
17831
17832 struct pv_mmu_ops {
17833 unsigned long (*read_cr2)(void);
17834@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17835 struct paravirt_callee_save make_pud;
17836
17837 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17838+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17839 #endif /* PAGETABLE_LEVELS == 4 */
17840 #endif /* PAGETABLE_LEVELS >= 3 */
17841
17842@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17843 an mfn. We can tell which is which from the index. */
17844 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17845 phys_addr_t phys, pgprot_t flags);
17846-};
17847+
17848+#ifdef CONFIG_PAX_KERNEXEC
17849+ unsigned long (*pax_open_kernel)(void);
17850+ unsigned long (*pax_close_kernel)(void);
17851+#endif
17852+
17853+} __no_randomize_layout;
17854
17855 struct arch_spinlock;
17856 #ifdef CONFIG_SMP
17857@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17858 struct pv_lock_ops {
17859 struct paravirt_callee_save lock_spinning;
17860 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17861-};
17862+} __no_randomize_layout;
17863
17864 /* This contains all the paravirt structures: we get a convenient
17865 * number for each function using the offset which we use to indicate
17866- * what to patch. */
17867+ * what to patch.
17868+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17869+ */
17870+
17871 struct paravirt_patch_template {
17872 struct pv_init_ops pv_init_ops;
17873 struct pv_time_ops pv_time_ops;
17874@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17875 struct pv_apic_ops pv_apic_ops;
17876 struct pv_mmu_ops pv_mmu_ops;
17877 struct pv_lock_ops pv_lock_ops;
17878-};
17879+} __no_randomize_layout;
17880
17881 extern struct pv_info pv_info;
17882 extern struct pv_init_ops pv_init_ops;
17883diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17884index c4412e9..90e88c5 100644
17885--- a/arch/x86/include/asm/pgalloc.h
17886+++ b/arch/x86/include/asm/pgalloc.h
17887@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17888 pmd_t *pmd, pte_t *pte)
17889 {
17890 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17891+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17892+}
17893+
17894+static inline void pmd_populate_user(struct mm_struct *mm,
17895+ pmd_t *pmd, pte_t *pte)
17896+{
17897+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17898 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17899 }
17900
17901@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17902
17903 #ifdef CONFIG_X86_PAE
17904 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17905+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17906+{
17907+ pud_populate(mm, pudp, pmd);
17908+}
17909 #else /* !CONFIG_X86_PAE */
17910 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17911 {
17912 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17913 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17914 }
17915+
17916+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17917+{
17918+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17919+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17920+}
17921 #endif /* CONFIG_X86_PAE */
17922
17923 #if PAGETABLE_LEVELS > 3
17924@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17925 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17926 }
17927
17928+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17929+{
17930+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17931+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17932+}
17933+
17934 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17935 {
17936 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17937diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17938index 206a87f..1623b06 100644
17939--- a/arch/x86/include/asm/pgtable-2level.h
17940+++ b/arch/x86/include/asm/pgtable-2level.h
17941@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17942
17943 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17944 {
17945+ pax_open_kernel();
17946 *pmdp = pmd;
17947+ pax_close_kernel();
17948 }
17949
17950 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17951diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17952index 81bb91b..9392125 100644
17953--- a/arch/x86/include/asm/pgtable-3level.h
17954+++ b/arch/x86/include/asm/pgtable-3level.h
17955@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17956
17957 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17958 {
17959+ pax_open_kernel();
17960 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17961+ pax_close_kernel();
17962 }
17963
17964 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17965 {
17966+ pax_open_kernel();
17967 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17968+ pax_close_kernel();
17969 }
17970
17971 /*
17972diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17973index e8a5454..1539359 100644
17974--- a/arch/x86/include/asm/pgtable.h
17975+++ b/arch/x86/include/asm/pgtable.h
17976@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17977
17978 #ifndef __PAGETABLE_PUD_FOLDED
17979 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17980+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17981 #define pgd_clear(pgd) native_pgd_clear(pgd)
17982 #endif
17983
17984@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17985
17986 #define arch_end_context_switch(prev) do {} while(0)
17987
17988+#define pax_open_kernel() native_pax_open_kernel()
17989+#define pax_close_kernel() native_pax_close_kernel()
17990 #endif /* CONFIG_PARAVIRT */
17991
17992+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17993+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17994+
17995+#ifdef CONFIG_PAX_KERNEXEC
17996+static inline unsigned long native_pax_open_kernel(void)
17997+{
17998+ unsigned long cr0;
17999+
18000+ preempt_disable();
18001+ barrier();
18002+ cr0 = read_cr0() ^ X86_CR0_WP;
18003+ BUG_ON(cr0 & X86_CR0_WP);
18004+ write_cr0(cr0);
18005+ barrier();
18006+ return cr0 ^ X86_CR0_WP;
18007+}
18008+
18009+static inline unsigned long native_pax_close_kernel(void)
18010+{
18011+ unsigned long cr0;
18012+
18013+ barrier();
18014+ cr0 = read_cr0() ^ X86_CR0_WP;
18015+ BUG_ON(!(cr0 & X86_CR0_WP));
18016+ write_cr0(cr0);
18017+ barrier();
18018+ preempt_enable_no_resched();
18019+ return cr0 ^ X86_CR0_WP;
18020+}
18021+#else
18022+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18023+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18024+#endif
18025+
18026 /*
18027 * The following only work if pte_present() is true.
18028 * Undefined behaviour if not..
18029 */
18030+static inline int pte_user(pte_t pte)
18031+{
18032+ return pte_val(pte) & _PAGE_USER;
18033+}
18034+
18035 static inline int pte_dirty(pte_t pte)
18036 {
18037 return pte_flags(pte) & _PAGE_DIRTY;
18038@@ -161,6 +203,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18039 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18040 }
18041
18042+static inline unsigned long pgd_pfn(pgd_t pgd)
18043+{
18044+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18045+}
18046+
18047 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18048
18049 static inline int pmd_large(pmd_t pte)
18050@@ -214,9 +261,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18051 return pte_clear_flags(pte, _PAGE_RW);
18052 }
18053
18054+static inline pte_t pte_mkread(pte_t pte)
18055+{
18056+ return __pte(pte_val(pte) | _PAGE_USER);
18057+}
18058+
18059 static inline pte_t pte_mkexec(pte_t pte)
18060 {
18061- return pte_clear_flags(pte, _PAGE_NX);
18062+#ifdef CONFIG_X86_PAE
18063+ if (__supported_pte_mask & _PAGE_NX)
18064+ return pte_clear_flags(pte, _PAGE_NX);
18065+ else
18066+#endif
18067+ return pte_set_flags(pte, _PAGE_USER);
18068+}
18069+
18070+static inline pte_t pte_exprotect(pte_t pte)
18071+{
18072+#ifdef CONFIG_X86_PAE
18073+ if (__supported_pte_mask & _PAGE_NX)
18074+ return pte_set_flags(pte, _PAGE_NX);
18075+ else
18076+#endif
18077+ return pte_clear_flags(pte, _PAGE_USER);
18078 }
18079
18080 static inline pte_t pte_mkdirty(pte_t pte)
18081@@ -446,6 +513,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18082 #endif
18083
18084 #ifndef __ASSEMBLY__
18085+
18086+#ifdef CONFIG_PAX_PER_CPU_PGD
18087+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18088+enum cpu_pgd_type {kernel = 0, user = 1};
18089+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18090+{
18091+ return cpu_pgd[cpu][type];
18092+}
18093+#endif
18094+
18095 #include <linux/mm_types.h>
18096 #include <linux/mmdebug.h>
18097 #include <linux/log2.h>
18098@@ -592,7 +669,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18099 * Currently stuck as a macro due to indirect forward reference to
18100 * linux/mmzone.h's __section_mem_map_addr() definition:
18101 */
18102-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18103+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18104
18105 /* Find an entry in the second-level page table.. */
18106 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18107@@ -632,7 +709,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18108 * Currently stuck as a macro due to indirect forward reference to
18109 * linux/mmzone.h's __section_mem_map_addr() definition:
18110 */
18111-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18112+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18113
18114 /* to find an entry in a page-table-directory. */
18115 static inline unsigned long pud_index(unsigned long address)
18116@@ -647,7 +724,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18117
18118 static inline int pgd_bad(pgd_t pgd)
18119 {
18120- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18121+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18122 }
18123
18124 static inline int pgd_none(pgd_t pgd)
18125@@ -670,7 +747,12 @@ static inline int pgd_none(pgd_t pgd)
18126 * pgd_offset() returns a (pgd_t *)
18127 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18128 */
18129-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18130+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18131+
18132+#ifdef CONFIG_PAX_PER_CPU_PGD
18133+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18134+#endif
18135+
18136 /*
18137 * a shortcut which implies the use of the kernel's pgd, instead
18138 * of a process's
18139@@ -681,6 +763,23 @@ static inline int pgd_none(pgd_t pgd)
18140 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18141 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18142
18143+#ifdef CONFIG_X86_32
18144+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18145+#else
18146+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18147+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18148+
18149+#ifdef CONFIG_PAX_MEMORY_UDEREF
18150+#ifdef __ASSEMBLY__
18151+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18152+#else
18153+extern unsigned long pax_user_shadow_base;
18154+extern pgdval_t clone_pgd_mask;
18155+#endif
18156+#endif
18157+
18158+#endif
18159+
18160 #ifndef __ASSEMBLY__
18161
18162 extern int direct_gbpages;
18163@@ -847,11 +946,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18164 * dst and src can be on the same page, but the range must not overlap,
18165 * and must not cross a page boundary.
18166 */
18167-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18168+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18169 {
18170- memcpy(dst, src, count * sizeof(pgd_t));
18171+ pax_open_kernel();
18172+ while (count--)
18173+ *dst++ = *src++;
18174+ pax_close_kernel();
18175 }
18176
18177+#ifdef CONFIG_PAX_PER_CPU_PGD
18178+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18179+#endif
18180+
18181+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18182+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18183+#else
18184+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18185+#endif
18186+
18187 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18188 static inline int page_level_shift(enum pg_level level)
18189 {
18190diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18191index b6c0b40..3535d47 100644
18192--- a/arch/x86/include/asm/pgtable_32.h
18193+++ b/arch/x86/include/asm/pgtable_32.h
18194@@ -25,9 +25,6 @@
18195 struct mm_struct;
18196 struct vm_area_struct;
18197
18198-extern pgd_t swapper_pg_dir[1024];
18199-extern pgd_t initial_page_table[1024];
18200-
18201 static inline void pgtable_cache_init(void) { }
18202 static inline void check_pgt_cache(void) { }
18203 void paging_init(void);
18204@@ -45,6 +42,12 @@ void paging_init(void);
18205 # include <asm/pgtable-2level.h>
18206 #endif
18207
18208+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18209+extern pgd_t initial_page_table[PTRS_PER_PGD];
18210+#ifdef CONFIG_X86_PAE
18211+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18212+#endif
18213+
18214 #if defined(CONFIG_HIGHPTE)
18215 #define pte_offset_map(dir, address) \
18216 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18217@@ -59,12 +62,17 @@ void paging_init(void);
18218 /* Clear a kernel PTE and flush it from the TLB */
18219 #define kpte_clear_flush(ptep, vaddr) \
18220 do { \
18221+ pax_open_kernel(); \
18222 pte_clear(&init_mm, (vaddr), (ptep)); \
18223+ pax_close_kernel(); \
18224 __flush_tlb_one((vaddr)); \
18225 } while (0)
18226
18227 #endif /* !__ASSEMBLY__ */
18228
18229+#define HAVE_ARCH_UNMAPPED_AREA
18230+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18231+
18232 /*
18233 * kern_addr_valid() is (1) for FLATMEM and (0) for
18234 * SPARSEMEM and DISCONTIGMEM
18235diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18236index 9fb2f2b..b04b4bf 100644
18237--- a/arch/x86/include/asm/pgtable_32_types.h
18238+++ b/arch/x86/include/asm/pgtable_32_types.h
18239@@ -8,7 +8,7 @@
18240 */
18241 #ifdef CONFIG_X86_PAE
18242 # include <asm/pgtable-3level_types.h>
18243-# define PMD_SIZE (1UL << PMD_SHIFT)
18244+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18245 # define PMD_MASK (~(PMD_SIZE - 1))
18246 #else
18247 # include <asm/pgtable-2level_types.h>
18248@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18249 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18250 #endif
18251
18252+#ifdef CONFIG_PAX_KERNEXEC
18253+#ifndef __ASSEMBLY__
18254+extern unsigned char MODULES_EXEC_VADDR[];
18255+extern unsigned char MODULES_EXEC_END[];
18256+#endif
18257+#include <asm/boot.h>
18258+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18259+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18260+#else
18261+#define ktla_ktva(addr) (addr)
18262+#define ktva_ktla(addr) (addr)
18263+#endif
18264+
18265 #define MODULES_VADDR VMALLOC_START
18266 #define MODULES_END VMALLOC_END
18267 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18268diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18269index 4572b2f..4430113 100644
18270--- a/arch/x86/include/asm/pgtable_64.h
18271+++ b/arch/x86/include/asm/pgtable_64.h
18272@@ -16,11 +16,16 @@
18273
18274 extern pud_t level3_kernel_pgt[512];
18275 extern pud_t level3_ident_pgt[512];
18276+extern pud_t level3_vmalloc_start_pgt[512];
18277+extern pud_t level3_vmalloc_end_pgt[512];
18278+extern pud_t level3_vmemmap_pgt[512];
18279+extern pud_t level2_vmemmap_pgt[512];
18280 extern pmd_t level2_kernel_pgt[512];
18281 extern pmd_t level2_fixmap_pgt[512];
18282-extern pmd_t level2_ident_pgt[512];
18283+extern pmd_t level2_ident_pgt[512*2];
18284 extern pte_t level1_fixmap_pgt[512];
18285-extern pgd_t init_level4_pgt[];
18286+extern pte_t level1_vsyscall_pgt[512];
18287+extern pgd_t init_level4_pgt[512];
18288
18289 #define swapper_pg_dir init_level4_pgt
18290
18291@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18292
18293 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18294 {
18295+ pax_open_kernel();
18296 *pmdp = pmd;
18297+ pax_close_kernel();
18298 }
18299
18300 static inline void native_pmd_clear(pmd_t *pmd)
18301@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18302
18303 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18304 {
18305+ pax_open_kernel();
18306 *pudp = pud;
18307+ pax_close_kernel();
18308 }
18309
18310 static inline void native_pud_clear(pud_t *pud)
18311@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18312
18313 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18314 {
18315+ pax_open_kernel();
18316+ *pgdp = pgd;
18317+ pax_close_kernel();
18318+}
18319+
18320+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18321+{
18322 *pgdp = pgd;
18323 }
18324
18325diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18326index 602b602..acb53ed 100644
18327--- a/arch/x86/include/asm/pgtable_64_types.h
18328+++ b/arch/x86/include/asm/pgtable_64_types.h
18329@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18330 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18331 #define MODULES_END _AC(0xffffffffff000000, UL)
18332 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18333+#define MODULES_EXEC_VADDR MODULES_VADDR
18334+#define MODULES_EXEC_END MODULES_END
18335 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18336 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18337 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18338 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18339
18340+#define ktla_ktva(addr) (addr)
18341+#define ktva_ktla(addr) (addr)
18342+
18343 #define EARLY_DYNAMIC_PAGE_TABLES 64
18344
18345 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18346diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18347index 25bcd4a..bf3f815 100644
18348--- a/arch/x86/include/asm/pgtable_types.h
18349+++ b/arch/x86/include/asm/pgtable_types.h
18350@@ -110,8 +110,10 @@
18351
18352 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18353 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18354-#else
18355+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18356 #define _PAGE_NX (_AT(pteval_t, 0))
18357+#else
18358+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18359 #endif
18360
18361 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18362@@ -167,6 +169,9 @@ enum page_cache_mode {
18363 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18364 _PAGE_ACCESSED)
18365
18366+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18367+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18368+
18369 #define __PAGE_KERNEL_EXEC \
18370 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18371 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18372@@ -174,7 +179,7 @@ enum page_cache_mode {
18373 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18374 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18375 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18376-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18377+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18378 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18379 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18380 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18381@@ -220,7 +225,7 @@ enum page_cache_mode {
18382 #ifdef CONFIG_X86_64
18383 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18384 #else
18385-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18386+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18387 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18388 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18389 #endif
18390@@ -259,7 +264,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18391 {
18392 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18393 }
18394+#endif
18395
18396+#if PAGETABLE_LEVELS == 3
18397+#include <asm-generic/pgtable-nopud.h>
18398+#endif
18399+
18400+#if PAGETABLE_LEVELS == 2
18401+#include <asm-generic/pgtable-nopmd.h>
18402+#endif
18403+
18404+#ifndef __ASSEMBLY__
18405 #if PAGETABLE_LEVELS > 3
18406 typedef struct { pudval_t pud; } pud_t;
18407
18408@@ -273,8 +288,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18409 return pud.pud;
18410 }
18411 #else
18412-#include <asm-generic/pgtable-nopud.h>
18413-
18414 static inline pudval_t native_pud_val(pud_t pud)
18415 {
18416 return native_pgd_val(pud.pgd);
18417@@ -294,8 +307,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18418 return pmd.pmd;
18419 }
18420 #else
18421-#include <asm-generic/pgtable-nopmd.h>
18422-
18423 static inline pmdval_t native_pmd_val(pmd_t pmd)
18424 {
18425 return native_pgd_val(pmd.pud.pgd);
18426@@ -402,7 +413,6 @@ typedef struct page *pgtable_t;
18427
18428 extern pteval_t __supported_pte_mask;
18429 extern void set_nx(void);
18430-extern int nx_enabled;
18431
18432 #define pgprot_writecombine pgprot_writecombine
18433 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18434diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18435index 8f327184..368fb29 100644
18436--- a/arch/x86/include/asm/preempt.h
18437+++ b/arch/x86/include/asm/preempt.h
18438@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18439 */
18440 static __always_inline bool __preempt_count_dec_and_test(void)
18441 {
18442- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18443+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18444 }
18445
18446 /*
18447diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18448index a092a0c..8e9640b 100644
18449--- a/arch/x86/include/asm/processor.h
18450+++ b/arch/x86/include/asm/processor.h
18451@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18452 /* Index into per_cpu list: */
18453 u16 cpu_index;
18454 u32 microcode;
18455-};
18456+} __randomize_layout;
18457
18458 #define X86_VENDOR_INTEL 0
18459 #define X86_VENDOR_CYRIX 1
18460@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18461 : "memory");
18462 }
18463
18464+/* invpcid (%rdx),%rax */
18465+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18466+
18467+#define INVPCID_SINGLE_ADDRESS 0UL
18468+#define INVPCID_SINGLE_CONTEXT 1UL
18469+#define INVPCID_ALL_GLOBAL 2UL
18470+#define INVPCID_ALL_NONGLOBAL 3UL
18471+
18472+#define PCID_KERNEL 0UL
18473+#define PCID_USER 1UL
18474+#define PCID_NOFLUSH (1UL << 63)
18475+
18476 static inline void load_cr3(pgd_t *pgdir)
18477 {
18478- write_cr3(__pa(pgdir));
18479+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18480 }
18481
18482 #ifdef CONFIG_X86_32
18483@@ -282,7 +294,7 @@ struct tss_struct {
18484
18485 } ____cacheline_aligned;
18486
18487-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18488+extern struct tss_struct init_tss[NR_CPUS];
18489
18490 /*
18491 * Save the original ist values for checking stack pointers during debugging
18492@@ -479,6 +491,7 @@ struct thread_struct {
18493 unsigned short ds;
18494 unsigned short fsindex;
18495 unsigned short gsindex;
18496+ unsigned short ss;
18497 #endif
18498 #ifdef CONFIG_X86_32
18499 unsigned long ip;
18500@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18501 extern unsigned long mmu_cr4_features;
18502 extern u32 *trampoline_cr4_features;
18503
18504-static inline void set_in_cr4(unsigned long mask)
18505-{
18506- unsigned long cr4;
18507-
18508- mmu_cr4_features |= mask;
18509- if (trampoline_cr4_features)
18510- *trampoline_cr4_features = mmu_cr4_features;
18511- cr4 = read_cr4();
18512- cr4 |= mask;
18513- write_cr4(cr4);
18514-}
18515-
18516-static inline void clear_in_cr4(unsigned long mask)
18517-{
18518- unsigned long cr4;
18519-
18520- mmu_cr4_features &= ~mask;
18521- if (trampoline_cr4_features)
18522- *trampoline_cr4_features = mmu_cr4_features;
18523- cr4 = read_cr4();
18524- cr4 &= ~mask;
18525- write_cr4(cr4);
18526-}
18527+extern void set_in_cr4(unsigned long mask);
18528+extern void clear_in_cr4(unsigned long mask);
18529
18530 typedef struct {
18531 unsigned long seg;
18532@@ -838,11 +830,18 @@ static inline void spin_lock_prefetch(const void *x)
18533 */
18534 #define TASK_SIZE PAGE_OFFSET
18535 #define TASK_SIZE_MAX TASK_SIZE
18536+
18537+#ifdef CONFIG_PAX_SEGMEXEC
18538+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18539+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18540+#else
18541 #define STACK_TOP TASK_SIZE
18542-#define STACK_TOP_MAX STACK_TOP
18543+#endif
18544+
18545+#define STACK_TOP_MAX TASK_SIZE
18546
18547 #define INIT_THREAD { \
18548- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18549+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18550 .vm86_info = NULL, \
18551 .sysenter_cs = __KERNEL_CS, \
18552 .io_bitmap_ptr = NULL, \
18553@@ -856,7 +855,7 @@ static inline void spin_lock_prefetch(const void *x)
18554 */
18555 #define INIT_TSS { \
18556 .x86_tss = { \
18557- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18558+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18559 .ss0 = __KERNEL_DS, \
18560 .ss1 = __KERNEL_CS, \
18561 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18562@@ -867,11 +866,7 @@ static inline void spin_lock_prefetch(const void *x)
18563 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18564
18565 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18566-#define KSTK_TOP(info) \
18567-({ \
18568- unsigned long *__ptr = (unsigned long *)(info); \
18569- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18570-})
18571+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18572
18573 /*
18574 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18575@@ -886,7 +881,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18576 #define task_pt_regs(task) \
18577 ({ \
18578 struct pt_regs *__regs__; \
18579- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18580+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18581 __regs__ - 1; \
18582 })
18583
18584@@ -902,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18585 * particular problem by preventing anything from being mapped
18586 * at the maximum canonical address.
18587 */
18588-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18589+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18590
18591 /* This decides where the kernel will search for a free chunk of vm
18592 * space during mmap's.
18593 */
18594 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18595- 0xc0000000 : 0xFFFFe000)
18596+ 0xc0000000 : 0xFFFFf000)
18597
18598 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18599 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18600@@ -919,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18601 #define STACK_TOP_MAX TASK_SIZE_MAX
18602
18603 #define INIT_THREAD { \
18604- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18605+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18606 }
18607
18608 #define INIT_TSS { \
18609- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18610+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18611 }
18612
18613 /*
18614@@ -951,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18615 */
18616 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18617
18618+#ifdef CONFIG_PAX_SEGMEXEC
18619+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18620+#endif
18621+
18622 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18623
18624 /* Get/set a process' ability to use the timestamp counter instruction */
18625@@ -995,7 +994,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18626 return 0;
18627 }
18628
18629-extern unsigned long arch_align_stack(unsigned long sp);
18630+#define arch_align_stack(x) ((x) & ~0xfUL)
18631 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18632
18633 void default_idle(void);
18634@@ -1005,6 +1004,6 @@ bool xen_set_default_idle(void);
18635 #define xen_set_default_idle 0
18636 #endif
18637
18638-void stop_this_cpu(void *dummy);
18639+void stop_this_cpu(void *dummy) __noreturn;
18640 void df_debug(struct pt_regs *regs, long error_code);
18641 #endif /* _ASM_X86_PROCESSOR_H */
18642diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18643index 86fc2bb..bd5049a 100644
18644--- a/arch/x86/include/asm/ptrace.h
18645+++ b/arch/x86/include/asm/ptrace.h
18646@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18647 }
18648
18649 /*
18650- * user_mode_vm(regs) determines whether a register set came from user mode.
18651+ * user_mode(regs) determines whether a register set came from user mode.
18652 * This is true if V8086 mode was enabled OR if the register set was from
18653 * protected mode with RPL-3 CS value. This tricky test checks that with
18654 * one comparison. Many places in the kernel can bypass this full check
18655- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18656+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18657+ * be used.
18658 */
18659-static inline int user_mode(struct pt_regs *regs)
18660+static inline int user_mode_novm(struct pt_regs *regs)
18661 {
18662 #ifdef CONFIG_X86_32
18663 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18664 #else
18665- return !!(regs->cs & 3);
18666+ return !!(regs->cs & SEGMENT_RPL_MASK);
18667 #endif
18668 }
18669
18670-static inline int user_mode_vm(struct pt_regs *regs)
18671+static inline int user_mode(struct pt_regs *regs)
18672 {
18673 #ifdef CONFIG_X86_32
18674 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18675 USER_RPL;
18676 #else
18677- return user_mode(regs);
18678+ return user_mode_novm(regs);
18679 #endif
18680 }
18681
18682@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18683 #ifdef CONFIG_X86_64
18684 static inline bool user_64bit_mode(struct pt_regs *regs)
18685 {
18686+ unsigned long cs = regs->cs & 0xffff;
18687 #ifndef CONFIG_PARAVIRT
18688 /*
18689 * On non-paravirt systems, this is the only long mode CPL 3
18690 * selector. We do not allow long mode selectors in the LDT.
18691 */
18692- return regs->cs == __USER_CS;
18693+ return cs == __USER_CS;
18694 #else
18695 /* Headers are too twisted for this to go in paravirt.h. */
18696- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18697+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18698 #endif
18699 }
18700
18701@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18702 * Traps from the kernel do not save sp and ss.
18703 * Use the helper function to retrieve sp.
18704 */
18705- if (offset == offsetof(struct pt_regs, sp) &&
18706- regs->cs == __KERNEL_CS)
18707- return kernel_stack_pointer(regs);
18708+ if (offset == offsetof(struct pt_regs, sp)) {
18709+ unsigned long cs = regs->cs & 0xffff;
18710+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18711+ return kernel_stack_pointer(regs);
18712+ }
18713 #endif
18714 return *(unsigned long *)((unsigned long)regs + offset);
18715 }
18716diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18717index ae0e241..e80b10b 100644
18718--- a/arch/x86/include/asm/qrwlock.h
18719+++ b/arch/x86/include/asm/qrwlock.h
18720@@ -7,8 +7,8 @@
18721 #define queue_write_unlock queue_write_unlock
18722 static inline void queue_write_unlock(struct qrwlock *lock)
18723 {
18724- barrier();
18725- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18726+ barrier();
18727+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18728 }
18729 #endif
18730
18731diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18732index 9c6b890..5305f53 100644
18733--- a/arch/x86/include/asm/realmode.h
18734+++ b/arch/x86/include/asm/realmode.h
18735@@ -22,16 +22,14 @@ struct real_mode_header {
18736 #endif
18737 /* APM/BIOS reboot */
18738 u32 machine_real_restart_asm;
18739-#ifdef CONFIG_X86_64
18740 u32 machine_real_restart_seg;
18741-#endif
18742 };
18743
18744 /* This must match data at trampoline_32/64.S */
18745 struct trampoline_header {
18746 #ifdef CONFIG_X86_32
18747 u32 start;
18748- u16 gdt_pad;
18749+ u16 boot_cs;
18750 u16 gdt_limit;
18751 u32 gdt_base;
18752 #else
18753diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18754index a82c4f1..ac45053 100644
18755--- a/arch/x86/include/asm/reboot.h
18756+++ b/arch/x86/include/asm/reboot.h
18757@@ -6,13 +6,13 @@
18758 struct pt_regs;
18759
18760 struct machine_ops {
18761- void (*restart)(char *cmd);
18762- void (*halt)(void);
18763- void (*power_off)(void);
18764+ void (* __noreturn restart)(char *cmd);
18765+ void (* __noreturn halt)(void);
18766+ void (* __noreturn power_off)(void);
18767 void (*shutdown)(void);
18768 void (*crash_shutdown)(struct pt_regs *);
18769- void (*emergency_restart)(void);
18770-};
18771+ void (* __noreturn emergency_restart)(void);
18772+} __no_const;
18773
18774 extern struct machine_ops machine_ops;
18775
18776diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18777index 8f7866a..e442f20 100644
18778--- a/arch/x86/include/asm/rmwcc.h
18779+++ b/arch/x86/include/asm/rmwcc.h
18780@@ -3,7 +3,34 @@
18781
18782 #ifdef CC_HAVE_ASM_GOTO
18783
18784-#define __GEN_RMWcc(fullop, var, cc, ...) \
18785+#ifdef CONFIG_PAX_REFCOUNT
18786+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18787+do { \
18788+ asm_volatile_goto (fullop \
18789+ ";jno 0f\n" \
18790+ fullantiop \
18791+ ";int $4\n0:\n" \
18792+ _ASM_EXTABLE(0b, 0b) \
18793+ ";j" cc " %l[cc_label]" \
18794+ : : "m" (var), ## __VA_ARGS__ \
18795+ : "memory" : cc_label); \
18796+ return 0; \
18797+cc_label: \
18798+ return 1; \
18799+} while (0)
18800+#else
18801+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18802+do { \
18803+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18804+ : : "m" (var), ## __VA_ARGS__ \
18805+ : "memory" : cc_label); \
18806+ return 0; \
18807+cc_label: \
18808+ return 1; \
18809+} while (0)
18810+#endif
18811+
18812+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18813 do { \
18814 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18815 : : "m" (var), ## __VA_ARGS__ \
18816@@ -13,15 +40,46 @@ cc_label: \
18817 return 1; \
18818 } while (0)
18819
18820-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18821- __GEN_RMWcc(op " " arg0, var, cc)
18822+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18823+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18824
18825-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18826- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18827+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18828+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18829+
18830+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18831+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18832+
18833+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18834+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18835
18836 #else /* !CC_HAVE_ASM_GOTO */
18837
18838-#define __GEN_RMWcc(fullop, var, cc, ...) \
18839+#ifdef CONFIG_PAX_REFCOUNT
18840+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18841+do { \
18842+ char c; \
18843+ asm volatile (fullop \
18844+ ";jno 0f\n" \
18845+ fullantiop \
18846+ ";int $4\n0:\n" \
18847+ _ASM_EXTABLE(0b, 0b) \
18848+ "; set" cc " %1" \
18849+ : "+m" (var), "=qm" (c) \
18850+ : __VA_ARGS__ : "memory"); \
18851+ return c != 0; \
18852+} while (0)
18853+#else
18854+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18855+do { \
18856+ char c; \
18857+ asm volatile (fullop "; set" cc " %1" \
18858+ : "+m" (var), "=qm" (c) \
18859+ : __VA_ARGS__ : "memory"); \
18860+ return c != 0; \
18861+} while (0)
18862+#endif
18863+
18864+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18865 do { \
18866 char c; \
18867 asm volatile (fullop "; set" cc " %1" \
18868@@ -30,11 +88,17 @@ do { \
18869 return c != 0; \
18870 } while (0)
18871
18872-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18873- __GEN_RMWcc(op " " arg0, var, cc)
18874+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18875+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18876+
18877+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18878+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18879+
18880+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18881+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18882
18883-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18884- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18885+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18886+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18887
18888 #endif /* CC_HAVE_ASM_GOTO */
18889
18890diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18891index cad82c9..2e5c5c1 100644
18892--- a/arch/x86/include/asm/rwsem.h
18893+++ b/arch/x86/include/asm/rwsem.h
18894@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18895 {
18896 asm volatile("# beginning down_read\n\t"
18897 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18898+
18899+#ifdef CONFIG_PAX_REFCOUNT
18900+ "jno 0f\n"
18901+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18902+ "int $4\n0:\n"
18903+ _ASM_EXTABLE(0b, 0b)
18904+#endif
18905+
18906 /* adds 0x00000001 */
18907 " jns 1f\n"
18908 " call call_rwsem_down_read_failed\n"
18909@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18910 "1:\n\t"
18911 " mov %1,%2\n\t"
18912 " add %3,%2\n\t"
18913+
18914+#ifdef CONFIG_PAX_REFCOUNT
18915+ "jno 0f\n"
18916+ "sub %3,%2\n"
18917+ "int $4\n0:\n"
18918+ _ASM_EXTABLE(0b, 0b)
18919+#endif
18920+
18921 " jle 2f\n\t"
18922 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18923 " jnz 1b\n\t"
18924@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18925 long tmp;
18926 asm volatile("# beginning down_write\n\t"
18927 LOCK_PREFIX " xadd %1,(%2)\n\t"
18928+
18929+#ifdef CONFIG_PAX_REFCOUNT
18930+ "jno 0f\n"
18931+ "mov %1,(%2)\n"
18932+ "int $4\n0:\n"
18933+ _ASM_EXTABLE(0b, 0b)
18934+#endif
18935+
18936 /* adds 0xffff0001, returns the old value */
18937 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18938 /* was the active mask 0 before? */
18939@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18940 long tmp;
18941 asm volatile("# beginning __up_read\n\t"
18942 LOCK_PREFIX " xadd %1,(%2)\n\t"
18943+
18944+#ifdef CONFIG_PAX_REFCOUNT
18945+ "jno 0f\n"
18946+ "mov %1,(%2)\n"
18947+ "int $4\n0:\n"
18948+ _ASM_EXTABLE(0b, 0b)
18949+#endif
18950+
18951 /* subtracts 1, returns the old value */
18952 " jns 1f\n\t"
18953 " call call_rwsem_wake\n" /* expects old value in %edx */
18954@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18955 long tmp;
18956 asm volatile("# beginning __up_write\n\t"
18957 LOCK_PREFIX " xadd %1,(%2)\n\t"
18958+
18959+#ifdef CONFIG_PAX_REFCOUNT
18960+ "jno 0f\n"
18961+ "mov %1,(%2)\n"
18962+ "int $4\n0:\n"
18963+ _ASM_EXTABLE(0b, 0b)
18964+#endif
18965+
18966 /* subtracts 0xffff0001, returns the old value */
18967 " jns 1f\n\t"
18968 " call call_rwsem_wake\n" /* expects old value in %edx */
18969@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18970 {
18971 asm volatile("# beginning __downgrade_write\n\t"
18972 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18973+
18974+#ifdef CONFIG_PAX_REFCOUNT
18975+ "jno 0f\n"
18976+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18977+ "int $4\n0:\n"
18978+ _ASM_EXTABLE(0b, 0b)
18979+#endif
18980+
18981 /*
18982 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18983 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18984@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18985 */
18986 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18987 {
18988- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18989+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18990+
18991+#ifdef CONFIG_PAX_REFCOUNT
18992+ "jno 0f\n"
18993+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18994+ "int $4\n0:\n"
18995+ _ASM_EXTABLE(0b, 0b)
18996+#endif
18997+
18998 : "+m" (sem->count)
18999 : "er" (delta));
19000 }
19001@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19002 */
19003 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19004 {
19005- return delta + xadd(&sem->count, delta);
19006+ return delta + xadd_check_overflow(&sem->count, delta);
19007 }
19008
19009 #endif /* __KERNEL__ */
19010diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19011index db257a5..b91bc77 100644
19012--- a/arch/x86/include/asm/segment.h
19013+++ b/arch/x86/include/asm/segment.h
19014@@ -73,10 +73,15 @@
19015 * 26 - ESPFIX small SS
19016 * 27 - per-cpu [ offset to per-cpu data area ]
19017 * 28 - stack_canary-20 [ for stack protector ]
19018- * 29 - unused
19019- * 30 - unused
19020+ * 29 - PCI BIOS CS
19021+ * 30 - PCI BIOS DS
19022 * 31 - TSS for double fault handler
19023 */
19024+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19025+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19026+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19027+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19028+
19029 #define GDT_ENTRY_TLS_MIN 6
19030 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19031
19032@@ -88,6 +93,8 @@
19033
19034 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19035
19036+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19037+
19038 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19039
19040 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19041@@ -113,6 +120,12 @@
19042 #define __KERNEL_STACK_CANARY 0
19043 #endif
19044
19045+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19046+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19047+
19048+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19049+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19050+
19051 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19052
19053 /*
19054@@ -140,7 +153,7 @@
19055 */
19056
19057 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19058-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19059+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19060
19061
19062 #else
19063@@ -164,6 +177,8 @@
19064 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19065 #define __USER32_DS __USER_DS
19066
19067+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19068+
19069 #define GDT_ENTRY_TSS 8 /* needs two entries */
19070 #define GDT_ENTRY_LDT 10 /* needs two entries */
19071 #define GDT_ENTRY_TLS_MIN 12
19072@@ -172,6 +187,8 @@
19073 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19074 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19075
19076+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19077+
19078 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19079 #define FS_TLS 0
19080 #define GS_TLS 1
19081@@ -179,12 +196,14 @@
19082 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19083 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19084
19085-#define GDT_ENTRIES 16
19086+#define GDT_ENTRIES 17
19087
19088 #endif
19089
19090 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19091+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19092 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19093+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19094 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19095 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19096 #ifndef CONFIG_PARAVIRT
19097@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19098 {
19099 unsigned long __limit;
19100 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19101- return __limit + 1;
19102+ return __limit;
19103 }
19104
19105 #endif /* !__ASSEMBLY__ */
19106diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19107index 8d3120f..352b440 100644
19108--- a/arch/x86/include/asm/smap.h
19109+++ b/arch/x86/include/asm/smap.h
19110@@ -25,11 +25,40 @@
19111
19112 #include <asm/alternative-asm.h>
19113
19114+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19115+#define ASM_PAX_OPEN_USERLAND \
19116+ 661: jmp 663f; \
19117+ .pushsection .altinstr_replacement, "a" ; \
19118+ 662: pushq %rax; nop; \
19119+ .popsection ; \
19120+ .pushsection .altinstructions, "a" ; \
19121+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19122+ .popsection ; \
19123+ call __pax_open_userland; \
19124+ popq %rax; \
19125+ 663:
19126+
19127+#define ASM_PAX_CLOSE_USERLAND \
19128+ 661: jmp 663f; \
19129+ .pushsection .altinstr_replacement, "a" ; \
19130+ 662: pushq %rax; nop; \
19131+ .popsection; \
19132+ .pushsection .altinstructions, "a" ; \
19133+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19134+ .popsection; \
19135+ call __pax_close_userland; \
19136+ popq %rax; \
19137+ 663:
19138+#else
19139+#define ASM_PAX_OPEN_USERLAND
19140+#define ASM_PAX_CLOSE_USERLAND
19141+#endif
19142+
19143 #ifdef CONFIG_X86_SMAP
19144
19145 #define ASM_CLAC \
19146 661: ASM_NOP3 ; \
19147- .pushsection .altinstr_replacement, "ax" ; \
19148+ .pushsection .altinstr_replacement, "a" ; \
19149 662: __ASM_CLAC ; \
19150 .popsection ; \
19151 .pushsection .altinstructions, "a" ; \
19152@@ -38,7 +67,7 @@
19153
19154 #define ASM_STAC \
19155 661: ASM_NOP3 ; \
19156- .pushsection .altinstr_replacement, "ax" ; \
19157+ .pushsection .altinstr_replacement, "a" ; \
19158 662: __ASM_STAC ; \
19159 .popsection ; \
19160 .pushsection .altinstructions, "a" ; \
19161@@ -56,6 +85,37 @@
19162
19163 #include <asm/alternative.h>
19164
19165+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19166+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19167+
19168+extern void __pax_open_userland(void);
19169+static __always_inline unsigned long pax_open_userland(void)
19170+{
19171+
19172+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19173+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19174+ :
19175+ : [open] "i" (__pax_open_userland)
19176+ : "memory", "rax");
19177+#endif
19178+
19179+ return 0;
19180+}
19181+
19182+extern void __pax_close_userland(void);
19183+static __always_inline unsigned long pax_close_userland(void)
19184+{
19185+
19186+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19187+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19188+ :
19189+ : [close] "i" (__pax_close_userland)
19190+ : "memory", "rax");
19191+#endif
19192+
19193+ return 0;
19194+}
19195+
19196 #ifdef CONFIG_X86_SMAP
19197
19198 static __always_inline void clac(void)
19199diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19200index 8cd1cc3..827e09e 100644
19201--- a/arch/x86/include/asm/smp.h
19202+++ b/arch/x86/include/asm/smp.h
19203@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19204 /* cpus sharing the last level cache: */
19205 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19206 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19207-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19208+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19209
19210 static inline struct cpumask *cpu_sibling_mask(int cpu)
19211 {
19212@@ -78,7 +78,7 @@ struct smp_ops {
19213
19214 void (*send_call_func_ipi)(const struct cpumask *mask);
19215 void (*send_call_func_single_ipi)(int cpu);
19216-};
19217+} __no_const;
19218
19219 /* Globals due to paravirt */
19220 extern void set_cpu_sibling_map(int cpu);
19221@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19222 extern int safe_smp_processor_id(void);
19223
19224 #elif defined(CONFIG_X86_64_SMP)
19225-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19226-
19227-#define stack_smp_processor_id() \
19228-({ \
19229- struct thread_info *ti; \
19230- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19231- ti->cpu; \
19232-})
19233+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19234+#define stack_smp_processor_id() raw_smp_processor_id()
19235 #define safe_smp_processor_id() smp_processor_id()
19236
19237 #endif
19238diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19239index 6a99859..03cb807 100644
19240--- a/arch/x86/include/asm/stackprotector.h
19241+++ b/arch/x86/include/asm/stackprotector.h
19242@@ -47,7 +47,7 @@
19243 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19244 */
19245 #define GDT_STACK_CANARY_INIT \
19246- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19247+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19248
19249 /*
19250 * Initialize the stackprotector canary value.
19251@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19252
19253 static inline void load_stack_canary_segment(void)
19254 {
19255-#ifdef CONFIG_X86_32
19256+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19257 asm volatile ("mov %0, %%gs" : : "r" (0));
19258 #endif
19259 }
19260diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19261index 70bbe39..4ae2bd4 100644
19262--- a/arch/x86/include/asm/stacktrace.h
19263+++ b/arch/x86/include/asm/stacktrace.h
19264@@ -11,28 +11,20 @@
19265
19266 extern int kstack_depth_to_print;
19267
19268-struct thread_info;
19269+struct task_struct;
19270 struct stacktrace_ops;
19271
19272-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19273- unsigned long *stack,
19274- unsigned long bp,
19275- const struct stacktrace_ops *ops,
19276- void *data,
19277- unsigned long *end,
19278- int *graph);
19279+typedef unsigned long walk_stack_t(struct task_struct *task,
19280+ void *stack_start,
19281+ unsigned long *stack,
19282+ unsigned long bp,
19283+ const struct stacktrace_ops *ops,
19284+ void *data,
19285+ unsigned long *end,
19286+ int *graph);
19287
19288-extern unsigned long
19289-print_context_stack(struct thread_info *tinfo,
19290- unsigned long *stack, unsigned long bp,
19291- const struct stacktrace_ops *ops, void *data,
19292- unsigned long *end, int *graph);
19293-
19294-extern unsigned long
19295-print_context_stack_bp(struct thread_info *tinfo,
19296- unsigned long *stack, unsigned long bp,
19297- const struct stacktrace_ops *ops, void *data,
19298- unsigned long *end, int *graph);
19299+extern walk_stack_t print_context_stack;
19300+extern walk_stack_t print_context_stack_bp;
19301
19302 /* Generic stack tracer with callbacks */
19303
19304@@ -40,7 +32,7 @@ struct stacktrace_ops {
19305 void (*address)(void *data, unsigned long address, int reliable);
19306 /* On negative return stop dumping */
19307 int (*stack)(void *data, char *name);
19308- walk_stack_t walk_stack;
19309+ walk_stack_t *walk_stack;
19310 };
19311
19312 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19313diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19314index 751bf4b..a1278b5 100644
19315--- a/arch/x86/include/asm/switch_to.h
19316+++ b/arch/x86/include/asm/switch_to.h
19317@@ -112,7 +112,7 @@ do { \
19318 "call __switch_to\n\t" \
19319 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19320 __switch_canary \
19321- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19322+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19323 "movq %%rax,%%rdi\n\t" \
19324 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19325 "jnz ret_from_fork\n\t" \
19326@@ -123,7 +123,7 @@ do { \
19327 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19328 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19329 [_tif_fork] "i" (_TIF_FORK), \
19330- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19331+ [thread_info] "m" (current_tinfo), \
19332 [current_task] "m" (current_task) \
19333 __switch_canary_iparam \
19334 : "memory", "cc" __EXTRA_CLOBBER)
19335diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19336index 547e344..6be1175 100644
19337--- a/arch/x86/include/asm/thread_info.h
19338+++ b/arch/x86/include/asm/thread_info.h
19339@@ -24,7 +24,6 @@ struct exec_domain;
19340 #include <linux/atomic.h>
19341
19342 struct thread_info {
19343- struct task_struct *task; /* main task structure */
19344 struct exec_domain *exec_domain; /* execution domain */
19345 __u32 flags; /* low level flags */
19346 __u32 status; /* thread synchronous flags */
19347@@ -33,13 +32,13 @@ struct thread_info {
19348 mm_segment_t addr_limit;
19349 struct restart_block restart_block;
19350 void __user *sysenter_return;
19351+ unsigned long lowest_stack;
19352 unsigned int sig_on_uaccess_error:1;
19353 unsigned int uaccess_err:1; /* uaccess failed */
19354 };
19355
19356-#define INIT_THREAD_INFO(tsk) \
19357+#define INIT_THREAD_INFO \
19358 { \
19359- .task = &tsk, \
19360 .exec_domain = &default_exec_domain, \
19361 .flags = 0, \
19362 .cpu = 0, \
19363@@ -50,7 +49,7 @@ struct thread_info {
19364 }, \
19365 }
19366
19367-#define init_thread_info (init_thread_union.thread_info)
19368+#define init_thread_info (init_thread_union.stack)
19369 #define init_stack (init_thread_union.stack)
19370
19371 #else /* !__ASSEMBLY__ */
19372@@ -91,6 +90,7 @@ struct thread_info {
19373 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19374 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19375 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19376+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19377
19378 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19379 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19380@@ -115,17 +115,18 @@ struct thread_info {
19381 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19382 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19383 #define _TIF_X32 (1 << TIF_X32)
19384+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19385
19386 /* work to do in syscall_trace_enter() */
19387 #define _TIF_WORK_SYSCALL_ENTRY \
19388 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19389 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19390- _TIF_NOHZ)
19391+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19392
19393 /* work to do in syscall_trace_leave() */
19394 #define _TIF_WORK_SYSCALL_EXIT \
19395 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19396- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19397+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19398
19399 /* work to do on interrupt/exception return */
19400 #define _TIF_WORK_MASK \
19401@@ -136,7 +137,7 @@ struct thread_info {
19402 /* work to do on any return to user space */
19403 #define _TIF_ALLWORK_MASK \
19404 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19405- _TIF_NOHZ)
19406+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19407
19408 /* Only used for 64 bit */
19409 #define _TIF_DO_NOTIFY_MASK \
19410@@ -151,7 +152,6 @@ struct thread_info {
19411 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19412
19413 #define STACK_WARN (THREAD_SIZE/8)
19414-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19415
19416 /*
19417 * macros/functions for gaining access to the thread information structure
19418@@ -162,26 +162,18 @@ struct thread_info {
19419
19420 DECLARE_PER_CPU(unsigned long, kernel_stack);
19421
19422+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19423+
19424 static inline struct thread_info *current_thread_info(void)
19425 {
19426- struct thread_info *ti;
19427- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19428- KERNEL_STACK_OFFSET - THREAD_SIZE);
19429- return ti;
19430+ return this_cpu_read_stable(current_tinfo);
19431 }
19432
19433 #else /* !__ASSEMBLY__ */
19434
19435 /* how to get the thread information struct from ASM */
19436 #define GET_THREAD_INFO(reg) \
19437- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19438- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19439-
19440-/*
19441- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19442- * a certain register (to be used in assembler memory operands).
19443- */
19444-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19445+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19446
19447 #endif
19448
19449@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19450 extern void arch_task_cache_init(void);
19451 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19452 extern void arch_release_task_struct(struct task_struct *tsk);
19453+
19454+#define __HAVE_THREAD_FUNCTIONS
19455+#define task_thread_info(task) (&(task)->tinfo)
19456+#define task_stack_page(task) ((task)->stack)
19457+#define setup_thread_stack(p, org) do {} while (0)
19458+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19459+
19460 #endif
19461 #endif /* _ASM_X86_THREAD_INFO_H */
19462diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19463index 04905bf..1178cdf 100644
19464--- a/arch/x86/include/asm/tlbflush.h
19465+++ b/arch/x86/include/asm/tlbflush.h
19466@@ -17,18 +17,44 @@
19467
19468 static inline void __native_flush_tlb(void)
19469 {
19470+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19471+ u64 descriptor[2];
19472+
19473+ descriptor[0] = PCID_KERNEL;
19474+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19475+ return;
19476+ }
19477+
19478+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19479+ if (static_cpu_has(X86_FEATURE_PCID)) {
19480+ unsigned int cpu = raw_get_cpu();
19481+
19482+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19483+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19484+ raw_put_cpu_no_resched();
19485+ return;
19486+ }
19487+#endif
19488+
19489 native_write_cr3(native_read_cr3());
19490 }
19491
19492 static inline void __native_flush_tlb_global_irq_disabled(void)
19493 {
19494- unsigned long cr4;
19495+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19496+ u64 descriptor[2];
19497
19498- cr4 = native_read_cr4();
19499- /* clear PGE */
19500- native_write_cr4(cr4 & ~X86_CR4_PGE);
19501- /* write old PGE again and flush TLBs */
19502- native_write_cr4(cr4);
19503+ descriptor[0] = PCID_KERNEL;
19504+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19505+ } else {
19506+ unsigned long cr4;
19507+
19508+ cr4 = native_read_cr4();
19509+ /* clear PGE */
19510+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19511+ /* write old PGE again and flush TLBs */
19512+ native_write_cr4(cr4);
19513+ }
19514 }
19515
19516 static inline void __native_flush_tlb_global(void)
19517@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19518
19519 static inline void __native_flush_tlb_single(unsigned long addr)
19520 {
19521+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19522+ u64 descriptor[2];
19523+
19524+ descriptor[0] = PCID_KERNEL;
19525+ descriptor[1] = addr;
19526+
19527+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19528+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19529+ if (addr < TASK_SIZE_MAX)
19530+ descriptor[1] += pax_user_shadow_base;
19531+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19532+ }
19533+
19534+ descriptor[0] = PCID_USER;
19535+ descriptor[1] = addr;
19536+#endif
19537+
19538+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19539+ return;
19540+ }
19541+
19542+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19543+ if (static_cpu_has(X86_FEATURE_PCID)) {
19544+ unsigned int cpu = raw_get_cpu();
19545+
19546+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19547+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19548+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19549+ raw_put_cpu_no_resched();
19550+
19551+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19552+ addr += pax_user_shadow_base;
19553+ }
19554+#endif
19555+
19556 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19557 }
19558
19559diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19560index 0d592e0..7430aad 100644
19561--- a/arch/x86/include/asm/uaccess.h
19562+++ b/arch/x86/include/asm/uaccess.h
19563@@ -7,6 +7,7 @@
19564 #include <linux/compiler.h>
19565 #include <linux/thread_info.h>
19566 #include <linux/string.h>
19567+#include <linux/spinlock.h>
19568 #include <asm/asm.h>
19569 #include <asm/page.h>
19570 #include <asm/smap.h>
19571@@ -29,7 +30,12 @@
19572
19573 #define get_ds() (KERNEL_DS)
19574 #define get_fs() (current_thread_info()->addr_limit)
19575+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19576+void __set_fs(mm_segment_t x);
19577+void set_fs(mm_segment_t x);
19578+#else
19579 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19580+#endif
19581
19582 #define segment_eq(a, b) ((a).seg == (b).seg)
19583
19584@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19585 * checks that the pointer is in the user space range - after calling
19586 * this function, memory access functions may still return -EFAULT.
19587 */
19588-#define access_ok(type, addr, size) \
19589- likely(!__range_not_ok(addr, size, user_addr_max()))
19590+extern int _cond_resched(void);
19591+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19592+#define access_ok(type, addr, size) \
19593+({ \
19594+ unsigned long __size = size; \
19595+ unsigned long __addr = (unsigned long)addr; \
19596+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19597+ if (__ret_ao && __size) { \
19598+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19599+ unsigned long __end_ao = __addr + __size - 1; \
19600+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19601+ while (__addr_ao <= __end_ao) { \
19602+ char __c_ao; \
19603+ __addr_ao += PAGE_SIZE; \
19604+ if (__size > PAGE_SIZE) \
19605+ _cond_resched(); \
19606+ if (__get_user(__c_ao, (char __user *)__addr)) \
19607+ break; \
19608+ if (type != VERIFY_WRITE) { \
19609+ __addr = __addr_ao; \
19610+ continue; \
19611+ } \
19612+ if (__put_user(__c_ao, (char __user *)__addr)) \
19613+ break; \
19614+ __addr = __addr_ao; \
19615+ } \
19616+ } \
19617+ } \
19618+ __ret_ao; \
19619+})
19620
19621 /*
19622 * The exception table consists of pairs of addresses relative to the
19623@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19624 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19625 __chk_user_ptr(ptr); \
19626 might_fault(); \
19627+ pax_open_userland(); \
19628 asm volatile("call __get_user_%P3" \
19629 : "=a" (__ret_gu), "=r" (__val_gu) \
19630 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19631 (x) = (__typeof__(*(ptr))) __val_gu; \
19632+ pax_close_userland(); \
19633 __ret_gu; \
19634 })
19635
19636@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19637 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19638 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19639
19640-
19641+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19642+#define __copyuser_seg "gs;"
19643+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19644+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19645+#else
19646+#define __copyuser_seg
19647+#define __COPYUSER_SET_ES
19648+#define __COPYUSER_RESTORE_ES
19649+#endif
19650
19651 #ifdef CONFIG_X86_32
19652 #define __put_user_asm_u64(x, addr, err, errret) \
19653 asm volatile(ASM_STAC "\n" \
19654- "1: movl %%eax,0(%2)\n" \
19655- "2: movl %%edx,4(%2)\n" \
19656+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19657+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19658 "3: " ASM_CLAC "\n" \
19659 ".section .fixup,\"ax\"\n" \
19660 "4: movl %3,%0\n" \
19661@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19662
19663 #define __put_user_asm_ex_u64(x, addr) \
19664 asm volatile(ASM_STAC "\n" \
19665- "1: movl %%eax,0(%1)\n" \
19666- "2: movl %%edx,4(%1)\n" \
19667+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19668+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19669 "3: " ASM_CLAC "\n" \
19670 _ASM_EXTABLE_EX(1b, 2b) \
19671 _ASM_EXTABLE_EX(2b, 3b) \
19672@@ -257,7 +301,8 @@ extern void __put_user_8(void);
19673 __typeof__(*(ptr)) __pu_val; \
19674 __chk_user_ptr(ptr); \
19675 might_fault(); \
19676- __pu_val = x; \
19677+ __pu_val = (x); \
19678+ pax_open_userland(); \
19679 switch (sizeof(*(ptr))) { \
19680 case 1: \
19681 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19682@@ -275,6 +320,7 @@ extern void __put_user_8(void);
19683 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19684 break; \
19685 } \
19686+ pax_close_userland(); \
19687 __ret_pu; \
19688 })
19689
19690@@ -355,8 +401,10 @@ do { \
19691 } while (0)
19692
19693 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19694+do { \
19695+ pax_open_userland(); \
19696 asm volatile(ASM_STAC "\n" \
19697- "1: mov"itype" %2,%"rtype"1\n" \
19698+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19699 "2: " ASM_CLAC "\n" \
19700 ".section .fixup,\"ax\"\n" \
19701 "3: mov %3,%0\n" \
19702@@ -364,8 +412,10 @@ do { \
19703 " jmp 2b\n" \
19704 ".previous\n" \
19705 _ASM_EXTABLE(1b, 3b) \
19706- : "=r" (err), ltype(x) \
19707- : "m" (__m(addr)), "i" (errret), "0" (err))
19708+ : "=r" (err), ltype (x) \
19709+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19710+ pax_close_userland(); \
19711+} while (0)
19712
19713 #define __get_user_size_ex(x, ptr, size) \
19714 do { \
19715@@ -389,7 +439,7 @@ do { \
19716 } while (0)
19717
19718 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19719- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19720+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19721 "2:\n" \
19722 _ASM_EXTABLE_EX(1b, 2b) \
19723 : ltype(x) : "m" (__m(addr)))
19724@@ -406,13 +456,24 @@ do { \
19725 int __gu_err; \
19726 unsigned long __gu_val; \
19727 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19728- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19729+ (x) = (__typeof__(*(ptr)))__gu_val; \
19730 __gu_err; \
19731 })
19732
19733 /* FIXME: this hack is definitely wrong -AK */
19734 struct __large_struct { unsigned long buf[100]; };
19735-#define __m(x) (*(struct __large_struct __user *)(x))
19736+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19737+#define ____m(x) \
19738+({ \
19739+ unsigned long ____x = (unsigned long)(x); \
19740+ if (____x < pax_user_shadow_base) \
19741+ ____x += pax_user_shadow_base; \
19742+ (typeof(x))____x; \
19743+})
19744+#else
19745+#define ____m(x) (x)
19746+#endif
19747+#define __m(x) (*(struct __large_struct __user *)____m(x))
19748
19749 /*
19750 * Tell gcc we read from memory instead of writing: this is because
19751@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
19752 * aliasing issues.
19753 */
19754 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19755+do { \
19756+ pax_open_userland(); \
19757 asm volatile(ASM_STAC "\n" \
19758- "1: mov"itype" %"rtype"1,%2\n" \
19759+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19760 "2: " ASM_CLAC "\n" \
19761 ".section .fixup,\"ax\"\n" \
19762 "3: mov %3,%0\n" \
19763@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
19764 ".previous\n" \
19765 _ASM_EXTABLE(1b, 3b) \
19766 : "=r"(err) \
19767- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19768+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19769+ pax_close_userland(); \
19770+} while (0)
19771
19772 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19773- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19774+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19775 "2:\n" \
19776 _ASM_EXTABLE_EX(1b, 2b) \
19777 : : ltype(x), "m" (__m(addr)))
19778@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
19779 */
19780 #define uaccess_try do { \
19781 current_thread_info()->uaccess_err = 0; \
19782+ pax_open_userland(); \
19783 stac(); \
19784 barrier();
19785
19786 #define uaccess_catch(err) \
19787 clac(); \
19788+ pax_close_userland(); \
19789 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19790 } while (0)
19791
19792@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
19793 * On error, the variable @x is set to zero.
19794 */
19795
19796+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19797+#define __get_user(x, ptr) get_user((x), (ptr))
19798+#else
19799 #define __get_user(x, ptr) \
19800 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19801+#endif
19802
19803 /**
19804 * __put_user: - Write a simple value into user space, with less checking.
19805@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
19806 * Returns zero on success, or -EFAULT on error.
19807 */
19808
19809+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19810+#define __put_user(x, ptr) put_user((x), (ptr))
19811+#else
19812 #define __put_user(x, ptr) \
19813 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19814+#endif
19815
19816 #define __get_user_unaligned __get_user
19817 #define __put_user_unaligned __put_user
19818@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
19819 #define get_user_ex(x, ptr) do { \
19820 unsigned long __gue_val; \
19821 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19822- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19823+ (x) = (__typeof__(*(ptr)))__gue_val; \
19824 } while (0)
19825
19826 #define put_user_try uaccess_try
19827@@ -531,7 +606,7 @@ extern __must_check long strlen_user(const char __user *str);
19828 extern __must_check long strnlen_user(const char __user *str, long n);
19829
19830 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19831-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19832+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19833
19834 extern void __cmpxchg_wrong_size(void)
19835 __compiletime_error("Bad argument size for cmpxchg");
19836@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
19837 __typeof__(ptr) __uval = (uval); \
19838 __typeof__(*(ptr)) __old = (old); \
19839 __typeof__(*(ptr)) __new = (new); \
19840+ pax_open_userland(); \
19841 switch (size) { \
19842 case 1: \
19843 { \
19844 asm volatile("\t" ASM_STAC "\n" \
19845- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19846+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19847 "2:\t" ASM_CLAC "\n" \
19848 "\t.section .fixup, \"ax\"\n" \
19849 "3:\tmov %3, %0\n" \
19850 "\tjmp 2b\n" \
19851 "\t.previous\n" \
19852 _ASM_EXTABLE(1b, 3b) \
19853- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19854+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19855 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19856 : "memory" \
19857 ); \
19858@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
19859 case 2: \
19860 { \
19861 asm volatile("\t" ASM_STAC "\n" \
19862- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19863+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19864 "2:\t" ASM_CLAC "\n" \
19865 "\t.section .fixup, \"ax\"\n" \
19866 "3:\tmov %3, %0\n" \
19867 "\tjmp 2b\n" \
19868 "\t.previous\n" \
19869 _ASM_EXTABLE(1b, 3b) \
19870- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19871+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19872 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19873 : "memory" \
19874 ); \
19875@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
19876 case 4: \
19877 { \
19878 asm volatile("\t" ASM_STAC "\n" \
19879- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19880+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19881 "2:\t" ASM_CLAC "\n" \
19882 "\t.section .fixup, \"ax\"\n" \
19883 "3:\tmov %3, %0\n" \
19884 "\tjmp 2b\n" \
19885 "\t.previous\n" \
19886 _ASM_EXTABLE(1b, 3b) \
19887- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19888+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19889 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19890 : "memory" \
19891 ); \
19892@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
19893 __cmpxchg_wrong_size(); \
19894 \
19895 asm volatile("\t" ASM_STAC "\n" \
19896- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19897+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19898 "2:\t" ASM_CLAC "\n" \
19899 "\t.section .fixup, \"ax\"\n" \
19900 "3:\tmov %3, %0\n" \
19901 "\tjmp 2b\n" \
19902 "\t.previous\n" \
19903 _ASM_EXTABLE(1b, 3b) \
19904- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19905+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19906 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19907 : "memory" \
19908 ); \
19909@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
19910 default: \
19911 __cmpxchg_wrong_size(); \
19912 } \
19913+ pax_close_userland(); \
19914 *__uval = __old; \
19915 __ret; \
19916 })
19917@@ -636,17 +713,6 @@ extern struct movsl_mask {
19918
19919 #define ARCH_HAS_NOCACHE_UACCESS 1
19920
19921-#ifdef CONFIG_X86_32
19922-# include <asm/uaccess_32.h>
19923-#else
19924-# include <asm/uaccess_64.h>
19925-#endif
19926-
19927-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19928- unsigned n);
19929-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19930- unsigned n);
19931-
19932 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19933 # define copy_user_diag __compiletime_error
19934 #else
19935@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19936 extern void copy_user_diag("copy_from_user() buffer size is too small")
19937 copy_from_user_overflow(void);
19938 extern void copy_user_diag("copy_to_user() buffer size is too small")
19939-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19940+copy_to_user_overflow(void);
19941
19942 #undef copy_user_diag
19943
19944@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19945
19946 extern void
19947 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19948-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19949+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19950 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19951
19952 #else
19953@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19954
19955 #endif
19956
19957+#ifdef CONFIG_X86_32
19958+# include <asm/uaccess_32.h>
19959+#else
19960+# include <asm/uaccess_64.h>
19961+#endif
19962+
19963 static inline unsigned long __must_check
19964 copy_from_user(void *to, const void __user *from, unsigned long n)
19965 {
19966- int sz = __compiletime_object_size(to);
19967+ size_t sz = __compiletime_object_size(to);
19968
19969 might_fault();
19970
19971@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19972 * case, and do only runtime checking for non-constant sizes.
19973 */
19974
19975- if (likely(sz < 0 || sz >= n))
19976- n = _copy_from_user(to, from, n);
19977- else if(__builtin_constant_p(n))
19978- copy_from_user_overflow();
19979- else
19980- __copy_from_user_overflow(sz, n);
19981+ if (likely(sz != (size_t)-1 && sz < n)) {
19982+ if(__builtin_constant_p(n))
19983+ copy_from_user_overflow();
19984+ else
19985+ __copy_from_user_overflow(sz, n);
19986+ } else if (access_ok(VERIFY_READ, from, n))
19987+ n = __copy_from_user(to, from, n);
19988+ else if ((long)n > 0)
19989+ memset(to, 0, n);
19990
19991 return n;
19992 }
19993@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19994 static inline unsigned long __must_check
19995 copy_to_user(void __user *to, const void *from, unsigned long n)
19996 {
19997- int sz = __compiletime_object_size(from);
19998+ size_t sz = __compiletime_object_size(from);
19999
20000 might_fault();
20001
20002 /* See the comment in copy_from_user() above. */
20003- if (likely(sz < 0 || sz >= n))
20004- n = _copy_to_user(to, from, n);
20005- else if(__builtin_constant_p(n))
20006- copy_to_user_overflow();
20007- else
20008- __copy_to_user_overflow(sz, n);
20009+ if (likely(sz != (size_t)-1 && sz < n)) {
20010+ if(__builtin_constant_p(n))
20011+ copy_to_user_overflow();
20012+ else
20013+ __copy_to_user_overflow(sz, n);
20014+ } else if (access_ok(VERIFY_WRITE, to, n))
20015+ n = __copy_to_user(to, from, n);
20016
20017 return n;
20018 }
20019diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20020index 3c03a5d..edb68ae 100644
20021--- a/arch/x86/include/asm/uaccess_32.h
20022+++ b/arch/x86/include/asm/uaccess_32.h
20023@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20024 * anything, so this is accurate.
20025 */
20026
20027-static __always_inline unsigned long __must_check
20028+static __always_inline __size_overflow(3) unsigned long __must_check
20029 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20030 {
20031+ if ((long)n < 0)
20032+ return n;
20033+
20034+ check_object_size(from, n, true);
20035+
20036 if (__builtin_constant_p(n)) {
20037 unsigned long ret;
20038
20039@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20040 __copy_to_user(void __user *to, const void *from, unsigned long n)
20041 {
20042 might_fault();
20043+
20044 return __copy_to_user_inatomic(to, from, n);
20045 }
20046
20047-static __always_inline unsigned long
20048+static __always_inline __size_overflow(3) unsigned long
20049 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20050 {
20051+ if ((long)n < 0)
20052+ return n;
20053+
20054 /* Avoid zeroing the tail if the copy fails..
20055 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20056 * but as the zeroing behaviour is only significant when n is not
20057@@ -137,6 +146,12 @@ static __always_inline unsigned long
20058 __copy_from_user(void *to, const void __user *from, unsigned long n)
20059 {
20060 might_fault();
20061+
20062+ if ((long)n < 0)
20063+ return n;
20064+
20065+ check_object_size(to, n, false);
20066+
20067 if (__builtin_constant_p(n)) {
20068 unsigned long ret;
20069
20070@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20071 const void __user *from, unsigned long n)
20072 {
20073 might_fault();
20074+
20075+ if ((long)n < 0)
20076+ return n;
20077+
20078 if (__builtin_constant_p(n)) {
20079 unsigned long ret;
20080
20081@@ -181,7 +200,10 @@ static __always_inline unsigned long
20082 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20083 unsigned long n)
20084 {
20085- return __copy_from_user_ll_nocache_nozero(to, from, n);
20086+ if ((long)n < 0)
20087+ return n;
20088+
20089+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20090 }
20091
20092 #endif /* _ASM_X86_UACCESS_32_H */
20093diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20094index 12a26b9..c36fff5 100644
20095--- a/arch/x86/include/asm/uaccess_64.h
20096+++ b/arch/x86/include/asm/uaccess_64.h
20097@@ -10,6 +10,9 @@
20098 #include <asm/alternative.h>
20099 #include <asm/cpufeature.h>
20100 #include <asm/page.h>
20101+#include <asm/pgtable.h>
20102+
20103+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20104
20105 /*
20106 * Copy To/From Userspace
20107@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20108 __must_check unsigned long
20109 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20110
20111-static __always_inline __must_check unsigned long
20112-copy_user_generic(void *to, const void *from, unsigned len)
20113+static __always_inline __must_check __size_overflow(3) unsigned long
20114+copy_user_generic(void *to, const void *from, unsigned long len)
20115 {
20116 unsigned ret;
20117
20118@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20119 }
20120
20121 __must_check unsigned long
20122-copy_in_user(void __user *to, const void __user *from, unsigned len);
20123+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20124
20125 static __always_inline __must_check
20126-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20127+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20128 {
20129- int ret = 0;
20130+ size_t sz = __compiletime_object_size(dst);
20131+ unsigned ret = 0;
20132+
20133+ if (size > INT_MAX)
20134+ return size;
20135+
20136+ check_object_size(dst, size, false);
20137+
20138+#ifdef CONFIG_PAX_MEMORY_UDEREF
20139+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20140+ return size;
20141+#endif
20142+
20143+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20144+ if(__builtin_constant_p(size))
20145+ copy_from_user_overflow();
20146+ else
20147+ __copy_from_user_overflow(sz, size);
20148+ return size;
20149+ }
20150
20151 if (!__builtin_constant_p(size))
20152- return copy_user_generic(dst, (__force void *)src, size);
20153+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20154 switch (size) {
20155- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20156+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20157 ret, "b", "b", "=q", 1);
20158 return ret;
20159- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20160+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20161 ret, "w", "w", "=r", 2);
20162 return ret;
20163- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20164+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20165 ret, "l", "k", "=r", 4);
20166 return ret;
20167- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20168+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20169 ret, "q", "", "=r", 8);
20170 return ret;
20171 case 10:
20172- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20173+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20174 ret, "q", "", "=r", 10);
20175 if (unlikely(ret))
20176 return ret;
20177 __get_user_asm(*(u16 *)(8 + (char *)dst),
20178- (u16 __user *)(8 + (char __user *)src),
20179+ (const u16 __user *)(8 + (const char __user *)src),
20180 ret, "w", "w", "=r", 2);
20181 return ret;
20182 case 16:
20183- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20184+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20185 ret, "q", "", "=r", 16);
20186 if (unlikely(ret))
20187 return ret;
20188 __get_user_asm(*(u64 *)(8 + (char *)dst),
20189- (u64 __user *)(8 + (char __user *)src),
20190+ (const u64 __user *)(8 + (const char __user *)src),
20191 ret, "q", "", "=r", 8);
20192 return ret;
20193 default:
20194- return copy_user_generic(dst, (__force void *)src, size);
20195+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20196 }
20197 }
20198
20199 static __always_inline __must_check
20200-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20201+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20202 {
20203 might_fault();
20204 return __copy_from_user_nocheck(dst, src, size);
20205 }
20206
20207 static __always_inline __must_check
20208-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20209+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20210 {
20211- int ret = 0;
20212+ size_t sz = __compiletime_object_size(src);
20213+ unsigned ret = 0;
20214+
20215+ if (size > INT_MAX)
20216+ return size;
20217+
20218+ check_object_size(src, size, true);
20219+
20220+#ifdef CONFIG_PAX_MEMORY_UDEREF
20221+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20222+ return size;
20223+#endif
20224+
20225+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20226+ if(__builtin_constant_p(size))
20227+ copy_to_user_overflow();
20228+ else
20229+ __copy_to_user_overflow(sz, size);
20230+ return size;
20231+ }
20232
20233 if (!__builtin_constant_p(size))
20234- return copy_user_generic((__force void *)dst, src, size);
20235+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20236 switch (size) {
20237- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20238+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20239 ret, "b", "b", "iq", 1);
20240 return ret;
20241- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20242+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20243 ret, "w", "w", "ir", 2);
20244 return ret;
20245- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20246+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20247 ret, "l", "k", "ir", 4);
20248 return ret;
20249- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20250+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20251 ret, "q", "", "er", 8);
20252 return ret;
20253 case 10:
20254- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20255+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20256 ret, "q", "", "er", 10);
20257 if (unlikely(ret))
20258 return ret;
20259 asm("":::"memory");
20260- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20261+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20262 ret, "w", "w", "ir", 2);
20263 return ret;
20264 case 16:
20265- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20266+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20267 ret, "q", "", "er", 16);
20268 if (unlikely(ret))
20269 return ret;
20270 asm("":::"memory");
20271- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20272+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20273 ret, "q", "", "er", 8);
20274 return ret;
20275 default:
20276- return copy_user_generic((__force void *)dst, src, size);
20277+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20278 }
20279 }
20280
20281 static __always_inline __must_check
20282-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20283+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20284 {
20285 might_fault();
20286 return __copy_to_user_nocheck(dst, src, size);
20287 }
20288
20289 static __always_inline __must_check
20290-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20291+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20292 {
20293- int ret = 0;
20294+ unsigned ret = 0;
20295
20296 might_fault();
20297+
20298+ if (size > INT_MAX)
20299+ return size;
20300+
20301+#ifdef CONFIG_PAX_MEMORY_UDEREF
20302+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20303+ return size;
20304+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20305+ return size;
20306+#endif
20307+
20308 if (!__builtin_constant_p(size))
20309- return copy_user_generic((__force void *)dst,
20310- (__force void *)src, size);
20311+ return copy_user_generic((__force_kernel void *)____m(dst),
20312+ (__force_kernel const void *)____m(src), size);
20313 switch (size) {
20314 case 1: {
20315 u8 tmp;
20316- __get_user_asm(tmp, (u8 __user *)src,
20317+ __get_user_asm(tmp, (const u8 __user *)src,
20318 ret, "b", "b", "=q", 1);
20319 if (likely(!ret))
20320 __put_user_asm(tmp, (u8 __user *)dst,
20321@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20322 }
20323 case 2: {
20324 u16 tmp;
20325- __get_user_asm(tmp, (u16 __user *)src,
20326+ __get_user_asm(tmp, (const u16 __user *)src,
20327 ret, "w", "w", "=r", 2);
20328 if (likely(!ret))
20329 __put_user_asm(tmp, (u16 __user *)dst,
20330@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20331
20332 case 4: {
20333 u32 tmp;
20334- __get_user_asm(tmp, (u32 __user *)src,
20335+ __get_user_asm(tmp, (const u32 __user *)src,
20336 ret, "l", "k", "=r", 4);
20337 if (likely(!ret))
20338 __put_user_asm(tmp, (u32 __user *)dst,
20339@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20340 }
20341 case 8: {
20342 u64 tmp;
20343- __get_user_asm(tmp, (u64 __user *)src,
20344+ __get_user_asm(tmp, (const u64 __user *)src,
20345 ret, "q", "", "=r", 8);
20346 if (likely(!ret))
20347 __put_user_asm(tmp, (u64 __user *)dst,
20348@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20349 return ret;
20350 }
20351 default:
20352- return copy_user_generic((__force void *)dst,
20353- (__force void *)src, size);
20354+ return copy_user_generic((__force_kernel void *)____m(dst),
20355+ (__force_kernel const void *)____m(src), size);
20356 }
20357 }
20358
20359-static __must_check __always_inline int
20360-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20361+static __must_check __always_inline unsigned long
20362+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20363 {
20364 return __copy_from_user_nocheck(dst, src, size);
20365 }
20366
20367-static __must_check __always_inline int
20368-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20369+static __must_check __always_inline unsigned long
20370+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20371 {
20372 return __copy_to_user_nocheck(dst, src, size);
20373 }
20374
20375-extern long __copy_user_nocache(void *dst, const void __user *src,
20376- unsigned size, int zerorest);
20377+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20378+ unsigned long size, int zerorest);
20379
20380-static inline int
20381-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20382+static inline unsigned long
20383+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20384 {
20385 might_fault();
20386+
20387+ if (size > INT_MAX)
20388+ return size;
20389+
20390+#ifdef CONFIG_PAX_MEMORY_UDEREF
20391+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20392+ return size;
20393+#endif
20394+
20395 return __copy_user_nocache(dst, src, size, 1);
20396 }
20397
20398-static inline int
20399+static inline unsigned long
20400 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20401- unsigned size)
20402+ unsigned long size)
20403 {
20404+ if (size > INT_MAX)
20405+ return size;
20406+
20407+#ifdef CONFIG_PAX_MEMORY_UDEREF
20408+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20409+ return size;
20410+#endif
20411+
20412 return __copy_user_nocache(dst, src, size, 0);
20413 }
20414
20415 unsigned long
20416-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20417+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20418
20419 #endif /* _ASM_X86_UACCESS_64_H */
20420diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20421index 5b238981..77fdd78 100644
20422--- a/arch/x86/include/asm/word-at-a-time.h
20423+++ b/arch/x86/include/asm/word-at-a-time.h
20424@@ -11,7 +11,7 @@
20425 * and shift, for example.
20426 */
20427 struct word_at_a_time {
20428- const unsigned long one_bits, high_bits;
20429+ unsigned long one_bits, high_bits;
20430 };
20431
20432 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20433diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20434index f58a9c7..dc378042a 100644
20435--- a/arch/x86/include/asm/x86_init.h
20436+++ b/arch/x86/include/asm/x86_init.h
20437@@ -129,7 +129,7 @@ struct x86_init_ops {
20438 struct x86_init_timers timers;
20439 struct x86_init_iommu iommu;
20440 struct x86_init_pci pci;
20441-};
20442+} __no_const;
20443
20444 /**
20445 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20446@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20447 void (*setup_percpu_clockev)(void);
20448 void (*early_percpu_clock_init)(void);
20449 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20450-};
20451+} __no_const;
20452
20453 struct timespec;
20454
20455@@ -168,7 +168,7 @@ struct x86_platform_ops {
20456 void (*save_sched_clock_state)(void);
20457 void (*restore_sched_clock_state)(void);
20458 void (*apic_post_init)(void);
20459-};
20460+} __no_const;
20461
20462 struct pci_dev;
20463 struct msi_msg;
20464@@ -182,7 +182,7 @@ struct x86_msi_ops {
20465 void (*teardown_msi_irqs)(struct pci_dev *dev);
20466 void (*restore_msi_irqs)(struct pci_dev *dev);
20467 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20468-};
20469+} __no_const;
20470
20471 struct IO_APIC_route_entry;
20472 struct io_apic_irq_attr;
20473@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20474 unsigned int destination, int vector,
20475 struct io_apic_irq_attr *attr);
20476 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20477-};
20478+} __no_const;
20479
20480 extern struct x86_init_ops x86_init;
20481 extern struct x86_cpuinit_ops x86_cpuinit;
20482diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20483index 5eea099..ff7ef8d 100644
20484--- a/arch/x86/include/asm/xen/page.h
20485+++ b/arch/x86/include/asm/xen/page.h
20486@@ -83,7 +83,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20487 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20488 * cases needing an extended handling.
20489 */
20490-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20491+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20492 {
20493 unsigned long mfn;
20494
20495diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20496index c9a6d68..cb57f42 100644
20497--- a/arch/x86/include/asm/xsave.h
20498+++ b/arch/x86/include/asm/xsave.h
20499@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20500 if (unlikely(err))
20501 return -EFAULT;
20502
20503+ pax_open_userland();
20504 __asm__ __volatile__(ASM_STAC "\n"
20505- "1:"XSAVE"\n"
20506+ "1:"
20507+ __copyuser_seg
20508+ XSAVE"\n"
20509 "2: " ASM_CLAC "\n"
20510 xstate_fault
20511 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20512 : "memory");
20513+ pax_close_userland();
20514 return err;
20515 }
20516
20517@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20518 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20519 {
20520 int err = 0;
20521- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20522+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20523 u32 lmask = mask;
20524 u32 hmask = mask >> 32;
20525
20526+ pax_open_userland();
20527 __asm__ __volatile__(ASM_STAC "\n"
20528- "1:"XRSTOR"\n"
20529+ "1:"
20530+ __copyuser_seg
20531+ XRSTOR"\n"
20532 "2: " ASM_CLAC "\n"
20533 xstate_fault
20534 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20535 : "memory"); /* memory required? */
20536+ pax_close_userland();
20537 return err;
20538 }
20539
20540diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20541index d993e33..8db1b18 100644
20542--- a/arch/x86/include/uapi/asm/e820.h
20543+++ b/arch/x86/include/uapi/asm/e820.h
20544@@ -58,7 +58,7 @@ struct e820map {
20545 #define ISA_START_ADDRESS 0xa0000
20546 #define ISA_END_ADDRESS 0x100000
20547
20548-#define BIOS_BEGIN 0x000a0000
20549+#define BIOS_BEGIN 0x000c0000
20550 #define BIOS_END 0x00100000
20551
20552 #define BIOS_ROM_BASE 0xffe00000
20553diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20554index 7b0a55a..ad115bf 100644
20555--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20556+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20557@@ -49,7 +49,6 @@
20558 #define EFLAGS 144
20559 #define RSP 152
20560 #define SS 160
20561-#define ARGOFFSET R11
20562 #endif /* __ASSEMBLY__ */
20563
20564 /* top of stack page */
20565diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20566index 5d4502c..a567e09 100644
20567--- a/arch/x86/kernel/Makefile
20568+++ b/arch/x86/kernel/Makefile
20569@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20570 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20571 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20572 obj-y += probe_roms.o
20573-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20574+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20575 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20576 obj-$(CONFIG_X86_64) += mcount_64.o
20577 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20578diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20579index b5ddc96..490b4e4 100644
20580--- a/arch/x86/kernel/acpi/boot.c
20581+++ b/arch/x86/kernel/acpi/boot.c
20582@@ -1351,7 +1351,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20583 * If your system is blacklisted here, but you find that acpi=force
20584 * works for you, please contact linux-acpi@vger.kernel.org
20585 */
20586-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20587+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20588 /*
20589 * Boxes that need ACPI disabled
20590 */
20591@@ -1426,7 +1426,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20592 };
20593
20594 /* second table for DMI checks that should run after early-quirks */
20595-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20596+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20597 /*
20598 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20599 * which includes some code which overrides all temperature
20600diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20601index 3136820..e2c6577 100644
20602--- a/arch/x86/kernel/acpi/sleep.c
20603+++ b/arch/x86/kernel/acpi/sleep.c
20604@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20605 #else /* CONFIG_64BIT */
20606 #ifdef CONFIG_SMP
20607 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20608+
20609+ pax_open_kernel();
20610 early_gdt_descr.address =
20611 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20612+ pax_close_kernel();
20613+
20614 initial_gs = per_cpu_offset(smp_processor_id());
20615 #endif
20616 initial_code = (unsigned long)wakeup_long64;
20617diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20618index 665c6b7..eae4d56 100644
20619--- a/arch/x86/kernel/acpi/wakeup_32.S
20620+++ b/arch/x86/kernel/acpi/wakeup_32.S
20621@@ -29,13 +29,11 @@ wakeup_pmode_return:
20622 # and restore the stack ... but you need gdt for this to work
20623 movl saved_context_esp, %esp
20624
20625- movl %cs:saved_magic, %eax
20626- cmpl $0x12345678, %eax
20627+ cmpl $0x12345678, saved_magic
20628 jne bogus_magic
20629
20630 # jump to place where we left off
20631- movl saved_eip, %eax
20632- jmp *%eax
20633+ jmp *(saved_eip)
20634
20635 bogus_magic:
20636 jmp bogus_magic
20637diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20638index 703130f..27a155d 100644
20639--- a/arch/x86/kernel/alternative.c
20640+++ b/arch/x86/kernel/alternative.c
20641@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20642 */
20643 for (a = start; a < end; a++) {
20644 instr = (u8 *)&a->instr_offset + a->instr_offset;
20645+
20646+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20647+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20648+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20649+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20650+#endif
20651+
20652 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20653 BUG_ON(a->replacementlen > a->instrlen);
20654 BUG_ON(a->instrlen > sizeof(insnbuf));
20655@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20656 add_nops(insnbuf + a->replacementlen,
20657 a->instrlen - a->replacementlen);
20658
20659+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20660+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20661+ instr = ktva_ktla(instr);
20662+#endif
20663+
20664 text_poke_early(instr, insnbuf, a->instrlen);
20665 }
20666 }
20667@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20668 for (poff = start; poff < end; poff++) {
20669 u8 *ptr = (u8 *)poff + *poff;
20670
20671+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20672+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20673+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20674+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20675+#endif
20676+
20677 if (!*poff || ptr < text || ptr >= text_end)
20678 continue;
20679 /* turn DS segment override prefix into lock prefix */
20680- if (*ptr == 0x3e)
20681+ if (*ktla_ktva(ptr) == 0x3e)
20682 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20683 }
20684 mutex_unlock(&text_mutex);
20685@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20686 for (poff = start; poff < end; poff++) {
20687 u8 *ptr = (u8 *)poff + *poff;
20688
20689+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20690+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20691+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20692+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20693+#endif
20694+
20695 if (!*poff || ptr < text || ptr >= text_end)
20696 continue;
20697 /* turn lock prefix into DS segment override prefix */
20698- if (*ptr == 0xf0)
20699+ if (*ktla_ktva(ptr) == 0xf0)
20700 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20701 }
20702 mutex_unlock(&text_mutex);
20703@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20704
20705 BUG_ON(p->len > MAX_PATCH_LEN);
20706 /* prep the buffer with the original instructions */
20707- memcpy(insnbuf, p->instr, p->len);
20708+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20709 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20710 (unsigned long)p->instr, p->len);
20711
20712@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20713 if (!uniproc_patched || num_possible_cpus() == 1)
20714 free_init_pages("SMP alternatives",
20715 (unsigned long)__smp_locks,
20716- (unsigned long)__smp_locks_end);
20717+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20718 #endif
20719
20720 apply_paravirt(__parainstructions, __parainstructions_end);
20721@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20722 * instructions. And on the local CPU you need to be protected again NMI or MCE
20723 * handlers seeing an inconsistent instruction while you patch.
20724 */
20725-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20726+void *__kprobes text_poke_early(void *addr, const void *opcode,
20727 size_t len)
20728 {
20729 unsigned long flags;
20730 local_irq_save(flags);
20731- memcpy(addr, opcode, len);
20732+
20733+ pax_open_kernel();
20734+ memcpy(ktla_ktva(addr), opcode, len);
20735 sync_core();
20736+ pax_close_kernel();
20737+
20738 local_irq_restore(flags);
20739 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20740 that causes hangs on some VIA CPUs. */
20741@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20742 */
20743 void *text_poke(void *addr, const void *opcode, size_t len)
20744 {
20745- unsigned long flags;
20746- char *vaddr;
20747+ unsigned char *vaddr = ktla_ktva(addr);
20748 struct page *pages[2];
20749- int i;
20750+ size_t i;
20751
20752 if (!core_kernel_text((unsigned long)addr)) {
20753- pages[0] = vmalloc_to_page(addr);
20754- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20755+ pages[0] = vmalloc_to_page(vaddr);
20756+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20757 } else {
20758- pages[0] = virt_to_page(addr);
20759+ pages[0] = virt_to_page(vaddr);
20760 WARN_ON(!PageReserved(pages[0]));
20761- pages[1] = virt_to_page(addr + PAGE_SIZE);
20762+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20763 }
20764 BUG_ON(!pages[0]);
20765- local_irq_save(flags);
20766- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20767- if (pages[1])
20768- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20769- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20770- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20771- clear_fixmap(FIX_TEXT_POKE0);
20772- if (pages[1])
20773- clear_fixmap(FIX_TEXT_POKE1);
20774- local_flush_tlb();
20775- sync_core();
20776- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20777- that causes hangs on some VIA CPUs. */
20778+ text_poke_early(addr, opcode, len);
20779 for (i = 0; i < len; i++)
20780- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20781- local_irq_restore(flags);
20782+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20783 return addr;
20784 }
20785
20786@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20787 if (likely(!bp_patching_in_progress))
20788 return 0;
20789
20790- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20791+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20792 return 0;
20793
20794 /* set up the specified breakpoint handler */
20795@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20796 */
20797 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20798 {
20799- unsigned char int3 = 0xcc;
20800+ const unsigned char int3 = 0xcc;
20801
20802 bp_int3_handler = handler;
20803 bp_int3_addr = (u8 *)addr + sizeof(int3);
20804diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20805index 29b5b18..3bdfc29 100644
20806--- a/arch/x86/kernel/apic/apic.c
20807+++ b/arch/x86/kernel/apic/apic.c
20808@@ -201,7 +201,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20809 /*
20810 * Debug level, exported for io_apic.c
20811 */
20812-unsigned int apic_verbosity;
20813+int apic_verbosity;
20814
20815 int pic_mode;
20816
20817@@ -1991,7 +1991,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20818 apic_write(APIC_ESR, 0);
20819 v = apic_read(APIC_ESR);
20820 ack_APIC_irq();
20821- atomic_inc(&irq_err_count);
20822+ atomic_inc_unchecked(&irq_err_count);
20823
20824 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20825 smp_processor_id(), v);
20826diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20827index de918c4..32eed23 100644
20828--- a/arch/x86/kernel/apic/apic_flat_64.c
20829+++ b/arch/x86/kernel/apic/apic_flat_64.c
20830@@ -154,7 +154,7 @@ static int flat_probe(void)
20831 return 1;
20832 }
20833
20834-static struct apic apic_flat = {
20835+static struct apic apic_flat __read_only = {
20836 .name = "flat",
20837 .probe = flat_probe,
20838 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20839@@ -260,7 +260,7 @@ static int physflat_probe(void)
20840 return 0;
20841 }
20842
20843-static struct apic apic_physflat = {
20844+static struct apic apic_physflat __read_only = {
20845
20846 .name = "physical flat",
20847 .probe = physflat_probe,
20848diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20849index b205cdb..d8503ff 100644
20850--- a/arch/x86/kernel/apic/apic_noop.c
20851+++ b/arch/x86/kernel/apic/apic_noop.c
20852@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20853 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20854 }
20855
20856-struct apic apic_noop = {
20857+struct apic apic_noop __read_only = {
20858 .name = "noop",
20859 .probe = noop_probe,
20860 .acpi_madt_oem_check = NULL,
20861diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20862index c4a8d63..fe893ac 100644
20863--- a/arch/x86/kernel/apic/bigsmp_32.c
20864+++ b/arch/x86/kernel/apic/bigsmp_32.c
20865@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20866 return dmi_bigsmp;
20867 }
20868
20869-static struct apic apic_bigsmp = {
20870+static struct apic apic_bigsmp __read_only = {
20871
20872 .name = "bigsmp",
20873 .probe = probe_bigsmp,
20874diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20875index 3f5f604..309c0e6 100644
20876--- a/arch/x86/kernel/apic/io_apic.c
20877+++ b/arch/x86/kernel/apic/io_apic.c
20878@@ -1859,7 +1859,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20879 return ret;
20880 }
20881
20882-atomic_t irq_mis_count;
20883+atomic_unchecked_t irq_mis_count;
20884
20885 #ifdef CONFIG_GENERIC_PENDING_IRQ
20886 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20887@@ -2000,7 +2000,7 @@ static void ack_ioapic_level(struct irq_data *data)
20888 * at the cpu.
20889 */
20890 if (!(v & (1 << (i & 0x1f)))) {
20891- atomic_inc(&irq_mis_count);
20892+ atomic_inc_unchecked(&irq_mis_count);
20893
20894 eoi_ioapic_irq(irq, cfg);
20895 }
20896diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20897index bda4886..f9c7195 100644
20898--- a/arch/x86/kernel/apic/probe_32.c
20899+++ b/arch/x86/kernel/apic/probe_32.c
20900@@ -72,7 +72,7 @@ static int probe_default(void)
20901 return 1;
20902 }
20903
20904-static struct apic apic_default = {
20905+static struct apic apic_default __read_only = {
20906
20907 .name = "default",
20908 .probe = probe_default,
20909diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20910index 6cedd79..023ff8e 100644
20911--- a/arch/x86/kernel/apic/vector.c
20912+++ b/arch/x86/kernel/apic/vector.c
20913@@ -21,7 +21,7 @@
20914
20915 static DEFINE_RAW_SPINLOCK(vector_lock);
20916
20917-void lock_vector_lock(void)
20918+void lock_vector_lock(void) __acquires(vector_lock)
20919 {
20920 /* Used to the online set of cpus does not change
20921 * during assign_irq_vector.
20922@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20923 raw_spin_lock(&vector_lock);
20924 }
20925
20926-void unlock_vector_lock(void)
20927+void unlock_vector_lock(void) __releases(vector_lock)
20928 {
20929 raw_spin_unlock(&vector_lock);
20930 }
20931diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20932index e658f21..b695a1a 100644
20933--- a/arch/x86/kernel/apic/x2apic_cluster.c
20934+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20935@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20936 return notifier_from_errno(err);
20937 }
20938
20939-static struct notifier_block __refdata x2apic_cpu_notifier = {
20940+static struct notifier_block x2apic_cpu_notifier = {
20941 .notifier_call = update_clusterinfo,
20942 };
20943
20944@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20945 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20946 }
20947
20948-static struct apic apic_x2apic_cluster = {
20949+static struct apic apic_x2apic_cluster __read_only = {
20950
20951 .name = "cluster x2apic",
20952 .probe = x2apic_cluster_probe,
20953diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20954index 6fae733..5ca17af 100644
20955--- a/arch/x86/kernel/apic/x2apic_phys.c
20956+++ b/arch/x86/kernel/apic/x2apic_phys.c
20957@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20958 return apic == &apic_x2apic_phys;
20959 }
20960
20961-static struct apic apic_x2apic_phys = {
20962+static struct apic apic_x2apic_phys __read_only = {
20963
20964 .name = "physical x2apic",
20965 .probe = x2apic_phys_probe,
20966diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20967index 8e9dcfd..c61b3e4 100644
20968--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20969+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20970@@ -348,7 +348,7 @@ static int uv_probe(void)
20971 return apic == &apic_x2apic_uv_x;
20972 }
20973
20974-static struct apic __refdata apic_x2apic_uv_x = {
20975+static struct apic apic_x2apic_uv_x __read_only = {
20976
20977 .name = "UV large system",
20978 .probe = uv_probe,
20979diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20980index 927ec92..0dc3bd4 100644
20981--- a/arch/x86/kernel/apm_32.c
20982+++ b/arch/x86/kernel/apm_32.c
20983@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20984 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20985 * even though they are called in protected mode.
20986 */
20987-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20988+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20989 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20990
20991 static const char driver_version[] = "1.16ac"; /* no spaces */
20992@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20993 BUG_ON(cpu != 0);
20994 gdt = get_cpu_gdt_table(cpu);
20995 save_desc_40 = gdt[0x40 / 8];
20996+
20997+ pax_open_kernel();
20998 gdt[0x40 / 8] = bad_bios_desc;
20999+ pax_close_kernel();
21000
21001 apm_irq_save(flags);
21002 APM_DO_SAVE_SEGS;
21003@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
21004 &call->esi);
21005 APM_DO_RESTORE_SEGS;
21006 apm_irq_restore(flags);
21007+
21008+ pax_open_kernel();
21009 gdt[0x40 / 8] = save_desc_40;
21010+ pax_close_kernel();
21011+
21012 put_cpu();
21013
21014 return call->eax & 0xff;
21015@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21016 BUG_ON(cpu != 0);
21017 gdt = get_cpu_gdt_table(cpu);
21018 save_desc_40 = gdt[0x40 / 8];
21019+
21020+ pax_open_kernel();
21021 gdt[0x40 / 8] = bad_bios_desc;
21022+ pax_close_kernel();
21023
21024 apm_irq_save(flags);
21025 APM_DO_SAVE_SEGS;
21026@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21027 &call->eax);
21028 APM_DO_RESTORE_SEGS;
21029 apm_irq_restore(flags);
21030+
21031+ pax_open_kernel();
21032 gdt[0x40 / 8] = save_desc_40;
21033+ pax_close_kernel();
21034+
21035 put_cpu();
21036 return error;
21037 }
21038@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21039 * code to that CPU.
21040 */
21041 gdt = get_cpu_gdt_table(0);
21042+
21043+ pax_open_kernel();
21044 set_desc_base(&gdt[APM_CS >> 3],
21045 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21046 set_desc_base(&gdt[APM_CS_16 >> 3],
21047 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21048 set_desc_base(&gdt[APM_DS >> 3],
21049 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21050+ pax_close_kernel();
21051
21052 proc_create("apm", 0, NULL, &apm_file_ops);
21053
21054diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21055index 9f6b934..cf5ffb3 100644
21056--- a/arch/x86/kernel/asm-offsets.c
21057+++ b/arch/x86/kernel/asm-offsets.c
21058@@ -32,6 +32,8 @@ void common(void) {
21059 OFFSET(TI_flags, thread_info, flags);
21060 OFFSET(TI_status, thread_info, status);
21061 OFFSET(TI_addr_limit, thread_info, addr_limit);
21062+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21063+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21064
21065 BLANK();
21066 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21067@@ -52,8 +54,26 @@ void common(void) {
21068 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21069 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21070 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21071+
21072+#ifdef CONFIG_PAX_KERNEXEC
21073+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21074 #endif
21075
21076+#ifdef CONFIG_PAX_MEMORY_UDEREF
21077+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21078+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21079+#ifdef CONFIG_X86_64
21080+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21081+#endif
21082+#endif
21083+
21084+#endif
21085+
21086+ BLANK();
21087+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21088+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21089+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21090+
21091 #ifdef CONFIG_XEN
21092 BLANK();
21093 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21094diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21095index fdcbb4d..036dd93 100644
21096--- a/arch/x86/kernel/asm-offsets_64.c
21097+++ b/arch/x86/kernel/asm-offsets_64.c
21098@@ -80,6 +80,7 @@ int main(void)
21099 BLANK();
21100 #undef ENTRY
21101
21102+ DEFINE(TSS_size, sizeof(struct tss_struct));
21103 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21104 BLANK();
21105
21106diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21107index 80091ae..0c5184f 100644
21108--- a/arch/x86/kernel/cpu/Makefile
21109+++ b/arch/x86/kernel/cpu/Makefile
21110@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21111 CFLAGS_REMOVE_perf_event.o = -pg
21112 endif
21113
21114-# Make sure load_percpu_segment has no stackprotector
21115-nostackp := $(call cc-option, -fno-stack-protector)
21116-CFLAGS_common.o := $(nostackp)
21117-
21118 obj-y := intel_cacheinfo.o scattered.o topology.o
21119 obj-y += common.o
21120 obj-y += rdrand.o
21121diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21122index 15c5df9..d9a604a 100644
21123--- a/arch/x86/kernel/cpu/amd.c
21124+++ b/arch/x86/kernel/cpu/amd.c
21125@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21126 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21127 {
21128 /* AMD errata T13 (order #21922) */
21129- if ((c->x86 == 6)) {
21130+ if (c->x86 == 6) {
21131 /* Duron Rev A0 */
21132 if (c->x86_model == 3 && c->x86_mask == 0)
21133 size = 64;
21134diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21135index c604965..0b0e28a 100644
21136--- a/arch/x86/kernel/cpu/common.c
21137+++ b/arch/x86/kernel/cpu/common.c
21138@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21139
21140 static const struct cpu_dev *this_cpu = &default_cpu;
21141
21142-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21143-#ifdef CONFIG_X86_64
21144- /*
21145- * We need valid kernel segments for data and code in long mode too
21146- * IRET will check the segment types kkeil 2000/10/28
21147- * Also sysret mandates a special GDT layout
21148- *
21149- * TLS descriptors are currently at a different place compared to i386.
21150- * Hopefully nobody expects them at a fixed place (Wine?)
21151- */
21152- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21153- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21154- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21155- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21156- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21157- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21158-#else
21159- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21160- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21161- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21162- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21163- /*
21164- * Segments used for calling PnP BIOS have byte granularity.
21165- * They code segments and data segments have fixed 64k limits,
21166- * the transfer segment sizes are set at run time.
21167- */
21168- /* 32-bit code */
21169- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21170- /* 16-bit code */
21171- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21172- /* 16-bit data */
21173- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21174- /* 16-bit data */
21175- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21176- /* 16-bit data */
21177- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21178- /*
21179- * The APM segments have byte granularity and their bases
21180- * are set at run time. All have 64k limits.
21181- */
21182- /* 32-bit code */
21183- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21184- /* 16-bit code */
21185- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21186- /* data */
21187- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21188-
21189- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21190- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21191- GDT_STACK_CANARY_INIT
21192-#endif
21193-} };
21194-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21195-
21196 static int __init x86_xsave_setup(char *s)
21197 {
21198 if (strlen(s))
21199@@ -305,6 +251,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21200 }
21201 }
21202
21203+#ifdef CONFIG_X86_64
21204+static __init int setup_disable_pcid(char *arg)
21205+{
21206+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21207+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21208+
21209+#ifdef CONFIG_PAX_MEMORY_UDEREF
21210+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21211+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21212+#endif
21213+
21214+ return 1;
21215+}
21216+__setup("nopcid", setup_disable_pcid);
21217+
21218+static void setup_pcid(struct cpuinfo_x86 *c)
21219+{
21220+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21221+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21222+
21223+#ifdef CONFIG_PAX_MEMORY_UDEREF
21224+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21225+ pax_open_kernel();
21226+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21227+ pax_close_kernel();
21228+ printk("PAX: slow and weak UDEREF enabled\n");
21229+ } else
21230+ printk("PAX: UDEREF disabled\n");
21231+#endif
21232+
21233+ return;
21234+ }
21235+
21236+ printk("PAX: PCID detected\n");
21237+ set_in_cr4(X86_CR4_PCIDE);
21238+
21239+#ifdef CONFIG_PAX_MEMORY_UDEREF
21240+ pax_open_kernel();
21241+ clone_pgd_mask = ~(pgdval_t)0UL;
21242+ pax_close_kernel();
21243+ if (pax_user_shadow_base)
21244+ printk("PAX: weak UDEREF enabled\n");
21245+ else {
21246+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21247+ printk("PAX: strong UDEREF enabled\n");
21248+ }
21249+#endif
21250+
21251+ if (cpu_has(c, X86_FEATURE_INVPCID))
21252+ printk("PAX: INVPCID detected\n");
21253+}
21254+#endif
21255+
21256 /*
21257 * Some CPU features depend on higher CPUID levels, which may not always
21258 * be available due to CPUID level capping or broken virtualization
21259@@ -405,7 +404,7 @@ void switch_to_new_gdt(int cpu)
21260 {
21261 struct desc_ptr gdt_descr;
21262
21263- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21264+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21265 gdt_descr.size = GDT_SIZE - 1;
21266 load_gdt(&gdt_descr);
21267 /* Reload the per-cpu base */
21268@@ -895,6 +894,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21269 setup_smep(c);
21270 setup_smap(c);
21271
21272+#ifdef CONFIG_X86_64
21273+ setup_pcid(c);
21274+#endif
21275+
21276 /*
21277 * The vendor-specific functions might have changed features.
21278 * Now we do "generic changes."
21279@@ -903,6 +906,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21280 /* Filter out anything that depends on CPUID levels we don't have */
21281 filter_cpuid_features(c, true);
21282
21283+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21284+ setup_clear_cpu_cap(X86_FEATURE_SEP);
21285+#endif
21286+
21287 /* If the model name is still unset, do table lookup. */
21288 if (!c->x86_model_id[0]) {
21289 const char *p;
21290@@ -977,7 +984,7 @@ static void syscall32_cpu_init(void)
21291 void enable_sep_cpu(void)
21292 {
21293 int cpu = get_cpu();
21294- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21295+ struct tss_struct *tss = init_tss + cpu;
21296
21297 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21298 put_cpu();
21299@@ -1115,14 +1122,16 @@ static __init int setup_disablecpuid(char *arg)
21300 }
21301 __setup("clearcpuid=", setup_disablecpuid);
21302
21303+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21304+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21305+
21306 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21307- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21308+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21309 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21310
21311 #ifdef CONFIG_X86_64
21312-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21313-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21314- (unsigned long) debug_idt_table };
21315+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21316+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21317
21318 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21319 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21320@@ -1299,7 +1308,7 @@ void cpu_init(void)
21321 */
21322 load_ucode_ap();
21323
21324- t = &per_cpu(init_tss, cpu);
21325+ t = init_tss + cpu;
21326 oist = &per_cpu(orig_ist, cpu);
21327
21328 #ifdef CONFIG_NUMA
21329@@ -1331,7 +1340,6 @@ void cpu_init(void)
21330 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21331 barrier();
21332
21333- x86_configure_nx();
21334 enable_x2apic();
21335
21336 /*
21337@@ -1383,7 +1391,7 @@ void cpu_init(void)
21338 {
21339 int cpu = smp_processor_id();
21340 struct task_struct *curr = current;
21341- struct tss_struct *t = &per_cpu(init_tss, cpu);
21342+ struct tss_struct *t = init_tss + cpu;
21343 struct thread_struct *thread = &curr->thread;
21344
21345 wait_for_master_cpu(cpu);
21346diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21347index c703507..28535e3 100644
21348--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21349+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21350@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21351 };
21352
21353 #ifdef CONFIG_AMD_NB
21354+static struct attribute *default_attrs_amd_nb[] = {
21355+ &type.attr,
21356+ &level.attr,
21357+ &coherency_line_size.attr,
21358+ &physical_line_partition.attr,
21359+ &ways_of_associativity.attr,
21360+ &number_of_sets.attr,
21361+ &size.attr,
21362+ &shared_cpu_map.attr,
21363+ &shared_cpu_list.attr,
21364+ NULL,
21365+ NULL,
21366+ NULL,
21367+ NULL
21368+};
21369+
21370 static struct attribute **amd_l3_attrs(void)
21371 {
21372 static struct attribute **attrs;
21373@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21374
21375 n = ARRAY_SIZE(default_attrs);
21376
21377- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21378- n += 2;
21379-
21380- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21381- n += 1;
21382-
21383- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21384- if (attrs == NULL)
21385- return attrs = default_attrs;
21386-
21387- for (n = 0; default_attrs[n]; n++)
21388- attrs[n] = default_attrs[n];
21389+ attrs = default_attrs_amd_nb;
21390
21391 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21392 attrs[n++] = &cache_disable_0.attr;
21393@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21394 .default_attrs = default_attrs,
21395 };
21396
21397+#ifdef CONFIG_AMD_NB
21398+static struct kobj_type ktype_cache_amd_nb = {
21399+ .sysfs_ops = &sysfs_ops,
21400+ .default_attrs = default_attrs_amd_nb,
21401+};
21402+#endif
21403+
21404 static struct kobj_type ktype_percpu_entry = {
21405 .sysfs_ops = &sysfs_ops,
21406 };
21407@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21408 return retval;
21409 }
21410
21411+#ifdef CONFIG_AMD_NB
21412+ amd_l3_attrs();
21413+#endif
21414+
21415 for (i = 0; i < num_cache_leaves; i++) {
21416+ struct kobj_type *ktype;
21417+
21418 this_object = INDEX_KOBJECT_PTR(cpu, i);
21419 this_object->cpu = cpu;
21420 this_object->index = i;
21421
21422 this_leaf = CPUID4_INFO_IDX(cpu, i);
21423
21424- ktype_cache.default_attrs = default_attrs;
21425+ ktype = &ktype_cache;
21426 #ifdef CONFIG_AMD_NB
21427 if (this_leaf->base.nb)
21428- ktype_cache.default_attrs = amd_l3_attrs();
21429+ ktype = &ktype_cache_amd_nb;
21430 #endif
21431 retval = kobject_init_and_add(&(this_object->kobj),
21432- &ktype_cache,
21433+ ktype,
21434 per_cpu(ici_cache_kobject, cpu),
21435 "index%1lu", i);
21436 if (unlikely(retval)) {
21437diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21438index d2c6116..62fd7aa 100644
21439--- a/arch/x86/kernel/cpu/mcheck/mce.c
21440+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21441@@ -45,6 +45,7 @@
21442 #include <asm/processor.h>
21443 #include <asm/mce.h>
21444 #include <asm/msr.h>
21445+#include <asm/local.h>
21446
21447 #include "mce-internal.h"
21448
21449@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21450 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21451 m->cs, m->ip);
21452
21453- if (m->cs == __KERNEL_CS)
21454+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21455 print_symbol("{%s}", m->ip);
21456 pr_cont("\n");
21457 }
21458@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21459
21460 #define PANIC_TIMEOUT 5 /* 5 seconds */
21461
21462-static atomic_t mce_panicked;
21463+static atomic_unchecked_t mce_panicked;
21464
21465 static int fake_panic;
21466-static atomic_t mce_fake_panicked;
21467+static atomic_unchecked_t mce_fake_panicked;
21468
21469 /* Panic in progress. Enable interrupts and wait for final IPI */
21470 static void wait_for_panic(void)
21471@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21472 /*
21473 * Make sure only one CPU runs in machine check panic
21474 */
21475- if (atomic_inc_return(&mce_panicked) > 1)
21476+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21477 wait_for_panic();
21478 barrier();
21479
21480@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21481 console_verbose();
21482 } else {
21483 /* Don't log too much for fake panic */
21484- if (atomic_inc_return(&mce_fake_panicked) > 1)
21485+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21486 return;
21487 }
21488 /* First print corrected ones that are still unlogged */
21489@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21490 if (!fake_panic) {
21491 if (panic_timeout == 0)
21492 panic_timeout = mca_cfg.panic_timeout;
21493- panic(msg);
21494+ panic("%s", msg);
21495 } else
21496 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21497 }
21498@@ -744,7 +745,7 @@ static int mce_timed_out(u64 *t)
21499 * might have been modified by someone else.
21500 */
21501 rmb();
21502- if (atomic_read(&mce_panicked))
21503+ if (atomic_read_unchecked(&mce_panicked))
21504 wait_for_panic();
21505 if (!mca_cfg.monarch_timeout)
21506 goto out;
21507@@ -1722,7 +1723,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21508 }
21509
21510 /* Call the installed machine check handler for this CPU setup. */
21511-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21512+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21513 unexpected_machine_check;
21514
21515 /*
21516@@ -1745,7 +1746,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21517 return;
21518 }
21519
21520+ pax_open_kernel();
21521 machine_check_vector = do_machine_check;
21522+ pax_close_kernel();
21523
21524 __mcheck_cpu_init_generic();
21525 __mcheck_cpu_init_vendor(c);
21526@@ -1759,7 +1762,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21527 */
21528
21529 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21530-static int mce_chrdev_open_count; /* #times opened */
21531+static local_t mce_chrdev_open_count; /* #times opened */
21532 static int mce_chrdev_open_exclu; /* already open exclusive? */
21533
21534 static int mce_chrdev_open(struct inode *inode, struct file *file)
21535@@ -1767,7 +1770,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21536 spin_lock(&mce_chrdev_state_lock);
21537
21538 if (mce_chrdev_open_exclu ||
21539- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21540+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21541 spin_unlock(&mce_chrdev_state_lock);
21542
21543 return -EBUSY;
21544@@ -1775,7 +1778,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21545
21546 if (file->f_flags & O_EXCL)
21547 mce_chrdev_open_exclu = 1;
21548- mce_chrdev_open_count++;
21549+ local_inc(&mce_chrdev_open_count);
21550
21551 spin_unlock(&mce_chrdev_state_lock);
21552
21553@@ -1786,7 +1789,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21554 {
21555 spin_lock(&mce_chrdev_state_lock);
21556
21557- mce_chrdev_open_count--;
21558+ local_dec(&mce_chrdev_open_count);
21559 mce_chrdev_open_exclu = 0;
21560
21561 spin_unlock(&mce_chrdev_state_lock);
21562@@ -2461,7 +2464,7 @@ static __init void mce_init_banks(void)
21563
21564 for (i = 0; i < mca_cfg.banks; i++) {
21565 struct mce_bank *b = &mce_banks[i];
21566- struct device_attribute *a = &b->attr;
21567+ device_attribute_no_const *a = &b->attr;
21568
21569 sysfs_attr_init(&a->attr);
21570 a->attr.name = b->attrname;
21571@@ -2568,7 +2571,7 @@ struct dentry *mce_get_debugfs_dir(void)
21572 static void mce_reset(void)
21573 {
21574 cpu_missing = 0;
21575- atomic_set(&mce_fake_panicked, 0);
21576+ atomic_set_unchecked(&mce_fake_panicked, 0);
21577 atomic_set(&mce_executing, 0);
21578 atomic_set(&mce_callin, 0);
21579 atomic_set(&global_nwo, 0);
21580diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21581index a304298..49b6d06 100644
21582--- a/arch/x86/kernel/cpu/mcheck/p5.c
21583+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21584@@ -10,6 +10,7 @@
21585 #include <asm/processor.h>
21586 #include <asm/mce.h>
21587 #include <asm/msr.h>
21588+#include <asm/pgtable.h>
21589
21590 /* By default disabled */
21591 int mce_p5_enabled __read_mostly;
21592@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21593 if (!cpu_has(c, X86_FEATURE_MCE))
21594 return;
21595
21596+ pax_open_kernel();
21597 machine_check_vector = pentium_machine_check;
21598+ pax_close_kernel();
21599 /* Make sure the vector pointer is visible before we enable MCEs: */
21600 wmb();
21601
21602diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21603index 7dc5564..1273569 100644
21604--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21605+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21606@@ -9,6 +9,7 @@
21607 #include <asm/processor.h>
21608 #include <asm/mce.h>
21609 #include <asm/msr.h>
21610+#include <asm/pgtable.h>
21611
21612 /* Machine check handler for WinChip C6: */
21613 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21614@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21615 {
21616 u32 lo, hi;
21617
21618+ pax_open_kernel();
21619 machine_check_vector = winchip_machine_check;
21620+ pax_close_kernel();
21621 /* Make sure the vector pointer is visible before we enable MCEs: */
21622 wmb();
21623
21624diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21625index 36a8361..e7058c2 100644
21626--- a/arch/x86/kernel/cpu/microcode/core.c
21627+++ b/arch/x86/kernel/cpu/microcode/core.c
21628@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21629 return NOTIFY_OK;
21630 }
21631
21632-static struct notifier_block __refdata mc_cpu_notifier = {
21633+static struct notifier_block mc_cpu_notifier = {
21634 .notifier_call = mc_cpu_callback,
21635 };
21636
21637diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21638index c6826d1..8dc677e 100644
21639--- a/arch/x86/kernel/cpu/microcode/intel.c
21640+++ b/arch/x86/kernel/cpu/microcode/intel.c
21641@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
21642 struct microcode_header_intel mc_header;
21643 unsigned int mc_size;
21644
21645+ if (leftover < sizeof(mc_header)) {
21646+ pr_err("error! Truncated header in microcode data file\n");
21647+ break;
21648+ }
21649+
21650 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
21651 break;
21652
21653@@ -293,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21654
21655 static int get_ucode_user(void *to, const void *from, size_t n)
21656 {
21657- return copy_from_user(to, from, n);
21658+ return copy_from_user(to, (const void __force_user *)from, n);
21659 }
21660
21661 static enum ucode_state
21662 request_microcode_user(int cpu, const void __user *buf, size_t size)
21663 {
21664- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21665+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21666 }
21667
21668 static void microcode_fini_cpu(int cpu)
21669diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
21670index ec9df6f..420eb93 100644
21671--- a/arch/x86/kernel/cpu/microcode/intel_early.c
21672+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
21673@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
21674 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
21675 int i;
21676
21677- while (leftover) {
21678+ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
21679+
21680+ if (leftover < sizeof(mc_header))
21681+ break;
21682+
21683 mc_header = (struct microcode_header_intel *)ucode_ptr;
21684
21685 mc_size = get_totalsize(mc_header);
21686diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21687index ea5f363..cb0e905 100644
21688--- a/arch/x86/kernel/cpu/mtrr/main.c
21689+++ b/arch/x86/kernel/cpu/mtrr/main.c
21690@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21691 u64 size_or_mask, size_and_mask;
21692 static bool mtrr_aps_delayed_init;
21693
21694-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21695+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21696
21697 const struct mtrr_ops *mtrr_if;
21698
21699diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21700index df5e41f..816c719 100644
21701--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21702+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21703@@ -25,7 +25,7 @@ struct mtrr_ops {
21704 int (*validate_add_page)(unsigned long base, unsigned long size,
21705 unsigned int type);
21706 int (*have_wrcomb)(void);
21707-};
21708+} __do_const;
21709
21710 extern int generic_get_free_region(unsigned long base, unsigned long size,
21711 int replace_reg);
21712diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21713index 143e5f5..5825081 100644
21714--- a/arch/x86/kernel/cpu/perf_event.c
21715+++ b/arch/x86/kernel/cpu/perf_event.c
21716@@ -1374,7 +1374,7 @@ static void __init pmu_check_apic(void)
21717
21718 }
21719
21720-static struct attribute_group x86_pmu_format_group = {
21721+static attribute_group_no_const x86_pmu_format_group = {
21722 .name = "format",
21723 .attrs = NULL,
21724 };
21725@@ -1473,7 +1473,7 @@ static struct attribute *events_attr[] = {
21726 NULL,
21727 };
21728
21729-static struct attribute_group x86_pmu_events_group = {
21730+static attribute_group_no_const x86_pmu_events_group = {
21731 .name = "events",
21732 .attrs = events_attr,
21733 };
21734@@ -1997,7 +1997,7 @@ static unsigned long get_segment_base(unsigned int segment)
21735 if (idx > GDT_ENTRIES)
21736 return 0;
21737
21738- desc = raw_cpu_ptr(gdt_page.gdt);
21739+ desc = get_cpu_gdt_table(smp_processor_id());
21740 }
21741
21742 return get_desc_base(desc + idx);
21743@@ -2087,7 +2087,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21744 break;
21745
21746 perf_callchain_store(entry, frame.return_address);
21747- fp = frame.next_frame;
21748+ fp = (const void __force_user *)frame.next_frame;
21749 }
21750 }
21751
21752diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21753index 97242a9..cf9c30e 100644
21754--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21755+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21756@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21757 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21758 {
21759 struct attribute **attrs;
21760- struct attribute_group *attr_group;
21761+ attribute_group_no_const *attr_group;
21762 int i = 0, j;
21763
21764 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21765diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21766index 498b6d9..4126515 100644
21767--- a/arch/x86/kernel/cpu/perf_event_intel.c
21768+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21769@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21770 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21771
21772 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21773- u64 capabilities;
21774+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21775
21776- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21777- x86_pmu.intel_cap.capabilities = capabilities;
21778+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21779+ x86_pmu.intel_cap.capabilities = capabilities;
21780 }
21781
21782 intel_ds_init();
21783diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21784index c4bb8b8..9f7384d 100644
21785--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21786+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21787@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21788 NULL,
21789 };
21790
21791-static struct attribute_group rapl_pmu_events_group = {
21792+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21793 .name = "events",
21794 .attrs = NULL, /* patched at runtime */
21795 };
21796diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21797index c635b8b..b78835e 100644
21798--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21799+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21800@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21801 static int __init uncore_type_init(struct intel_uncore_type *type)
21802 {
21803 struct intel_uncore_pmu *pmus;
21804- struct attribute_group *attr_group;
21805+ attribute_group_no_const *attr_group;
21806 struct attribute **attrs;
21807 int i, j;
21808
21809diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21810index 6c8c1e7..515b98a 100644
21811--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21812+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21813@@ -114,7 +114,7 @@ struct intel_uncore_box {
21814 struct uncore_event_desc {
21815 struct kobj_attribute attr;
21816 const char *config;
21817-};
21818+} __do_const;
21819
21820 ssize_t uncore_event_show(struct kobject *kobj,
21821 struct kobj_attribute *attr, char *buf);
21822diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21823index 83741a7..bd3507d 100644
21824--- a/arch/x86/kernel/cpuid.c
21825+++ b/arch/x86/kernel/cpuid.c
21826@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21827 return notifier_from_errno(err);
21828 }
21829
21830-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21831+static struct notifier_block cpuid_class_cpu_notifier =
21832 {
21833 .notifier_call = cpuid_class_cpu_callback,
21834 };
21835diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21836index aceb2f9..c76d3e3 100644
21837--- a/arch/x86/kernel/crash.c
21838+++ b/arch/x86/kernel/crash.c
21839@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21840 #ifdef CONFIG_X86_32
21841 struct pt_regs fixed_regs;
21842
21843- if (!user_mode_vm(regs)) {
21844+ if (!user_mode(regs)) {
21845 crash_fixup_ss_esp(&fixed_regs, regs);
21846 regs = &fixed_regs;
21847 }
21848diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21849index afa64ad..dce67dd 100644
21850--- a/arch/x86/kernel/crash_dump_64.c
21851+++ b/arch/x86/kernel/crash_dump_64.c
21852@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21853 return -ENOMEM;
21854
21855 if (userbuf) {
21856- if (copy_to_user(buf, vaddr + offset, csize)) {
21857+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21858 iounmap(vaddr);
21859 return -EFAULT;
21860 }
21861diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21862index f6dfd93..892ade4 100644
21863--- a/arch/x86/kernel/doublefault.c
21864+++ b/arch/x86/kernel/doublefault.c
21865@@ -12,7 +12,7 @@
21866
21867 #define DOUBLEFAULT_STACKSIZE (1024)
21868 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21869-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21870+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21871
21872 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21873
21874@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21875 unsigned long gdt, tss;
21876
21877 native_store_gdt(&gdt_desc);
21878- gdt = gdt_desc.address;
21879+ gdt = (unsigned long)gdt_desc.address;
21880
21881 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21882
21883@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21884 /* 0x2 bit is always set */
21885 .flags = X86_EFLAGS_SF | 0x2,
21886 .sp = STACK_START,
21887- .es = __USER_DS,
21888+ .es = __KERNEL_DS,
21889 .cs = __KERNEL_CS,
21890 .ss = __KERNEL_DS,
21891- .ds = __USER_DS,
21892+ .ds = __KERNEL_DS,
21893 .fs = __KERNEL_PERCPU,
21894
21895 .__cr3 = __pa_nodebug(swapper_pg_dir),
21896diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21897index b74ebc7..2c95874 100644
21898--- a/arch/x86/kernel/dumpstack.c
21899+++ b/arch/x86/kernel/dumpstack.c
21900@@ -2,6 +2,9 @@
21901 * Copyright (C) 1991, 1992 Linus Torvalds
21902 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21903 */
21904+#ifdef CONFIG_GRKERNSEC_HIDESYM
21905+#define __INCLUDED_BY_HIDESYM 1
21906+#endif
21907 #include <linux/kallsyms.h>
21908 #include <linux/kprobes.h>
21909 #include <linux/uaccess.h>
21910@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21911
21912 void printk_address(unsigned long address)
21913 {
21914- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21915+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21916 }
21917
21918 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21919 static void
21920 print_ftrace_graph_addr(unsigned long addr, void *data,
21921 const struct stacktrace_ops *ops,
21922- struct thread_info *tinfo, int *graph)
21923+ struct task_struct *task, int *graph)
21924 {
21925- struct task_struct *task;
21926 unsigned long ret_addr;
21927 int index;
21928
21929 if (addr != (unsigned long)return_to_handler)
21930 return;
21931
21932- task = tinfo->task;
21933 index = task->curr_ret_stack;
21934
21935 if (!task->ret_stack || index < *graph)
21936@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21937 static inline void
21938 print_ftrace_graph_addr(unsigned long addr, void *data,
21939 const struct stacktrace_ops *ops,
21940- struct thread_info *tinfo, int *graph)
21941+ struct task_struct *task, int *graph)
21942 { }
21943 #endif
21944
21945@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21946 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21947 */
21948
21949-static inline int valid_stack_ptr(struct thread_info *tinfo,
21950- void *p, unsigned int size, void *end)
21951+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21952 {
21953- void *t = tinfo;
21954 if (end) {
21955 if (p < end && p >= (end-THREAD_SIZE))
21956 return 1;
21957@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21958 }
21959
21960 unsigned long
21961-print_context_stack(struct thread_info *tinfo,
21962+print_context_stack(struct task_struct *task, void *stack_start,
21963 unsigned long *stack, unsigned long bp,
21964 const struct stacktrace_ops *ops, void *data,
21965 unsigned long *end, int *graph)
21966 {
21967 struct stack_frame *frame = (struct stack_frame *)bp;
21968
21969- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21970+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21971 unsigned long addr;
21972
21973 addr = *stack;
21974@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21975 } else {
21976 ops->address(data, addr, 0);
21977 }
21978- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21979+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21980 }
21981 stack++;
21982 }
21983@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21984 EXPORT_SYMBOL_GPL(print_context_stack);
21985
21986 unsigned long
21987-print_context_stack_bp(struct thread_info *tinfo,
21988+print_context_stack_bp(struct task_struct *task, void *stack_start,
21989 unsigned long *stack, unsigned long bp,
21990 const struct stacktrace_ops *ops, void *data,
21991 unsigned long *end, int *graph)
21992@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21993 struct stack_frame *frame = (struct stack_frame *)bp;
21994 unsigned long *ret_addr = &frame->return_address;
21995
21996- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21997+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21998 unsigned long addr = *ret_addr;
21999
22000 if (!__kernel_text_address(addr))
22001@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22002 ops->address(data, addr, 1);
22003 frame = frame->next_frame;
22004 ret_addr = &frame->return_address;
22005- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22006+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22007 }
22008
22009 return (unsigned long)frame;
22010@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22011 static void print_trace_address(void *data, unsigned long addr, int reliable)
22012 {
22013 touch_nmi_watchdog();
22014- printk(data);
22015+ printk("%s", (char *)data);
22016 printk_stack_address(addr, reliable);
22017 }
22018
22019@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22020 EXPORT_SYMBOL_GPL(oops_begin);
22021 NOKPROBE_SYMBOL(oops_begin);
22022
22023+extern void gr_handle_kernel_exploit(void);
22024+
22025 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22026 {
22027 if (regs && kexec_should_crash(current))
22028@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22029 panic("Fatal exception in interrupt");
22030 if (panic_on_oops)
22031 panic("Fatal exception");
22032- do_exit(signr);
22033+
22034+ gr_handle_kernel_exploit();
22035+
22036+ do_group_exit(signr);
22037 }
22038 NOKPROBE_SYMBOL(oops_end);
22039
22040@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22041 print_modules();
22042 show_regs(regs);
22043 #ifdef CONFIG_X86_32
22044- if (user_mode_vm(regs)) {
22045+ if (user_mode(regs)) {
22046 sp = regs->sp;
22047 ss = regs->ss & 0xffff;
22048 } else {
22049@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22050 unsigned long flags = oops_begin();
22051 int sig = SIGSEGV;
22052
22053- if (!user_mode_vm(regs))
22054+ if (!user_mode(regs))
22055 report_bug(regs->ip, regs);
22056
22057 if (__die(str, regs, err))
22058diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22059index 5abd4cd..c65733b 100644
22060--- a/arch/x86/kernel/dumpstack_32.c
22061+++ b/arch/x86/kernel/dumpstack_32.c
22062@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22063 bp = stack_frame(task, regs);
22064
22065 for (;;) {
22066- struct thread_info *context;
22067+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22068 void *end_stack;
22069
22070 end_stack = is_hardirq_stack(stack, cpu);
22071 if (!end_stack)
22072 end_stack = is_softirq_stack(stack, cpu);
22073
22074- context = task_thread_info(task);
22075- bp = ops->walk_stack(context, stack, bp, ops, data,
22076+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22077 end_stack, &graph);
22078
22079 /* Stop if not on irq stack */
22080@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22081 int i;
22082
22083 show_regs_print_info(KERN_EMERG);
22084- __show_regs(regs, !user_mode_vm(regs));
22085+ __show_regs(regs, !user_mode(regs));
22086
22087 /*
22088 * When in-kernel, we also print out the stack and code at the
22089 * time of the fault..
22090 */
22091- if (!user_mode_vm(regs)) {
22092+ if (!user_mode(regs)) {
22093 unsigned int code_prologue = code_bytes * 43 / 64;
22094 unsigned int code_len = code_bytes;
22095 unsigned char c;
22096 u8 *ip;
22097+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22098
22099 pr_emerg("Stack:\n");
22100 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22101
22102 pr_emerg("Code:");
22103
22104- ip = (u8 *)regs->ip - code_prologue;
22105+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22106 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22107 /* try starting at IP */
22108- ip = (u8 *)regs->ip;
22109+ ip = (u8 *)regs->ip + cs_base;
22110 code_len = code_len - code_prologue + 1;
22111 }
22112 for (i = 0; i < code_len; i++, ip++) {
22113@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22114 pr_cont(" Bad EIP value.");
22115 break;
22116 }
22117- if (ip == (u8 *)regs->ip)
22118+ if (ip == (u8 *)regs->ip + cs_base)
22119 pr_cont(" <%02x>", c);
22120 else
22121 pr_cont(" %02x", c);
22122@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22123 {
22124 unsigned short ud2;
22125
22126+ ip = ktla_ktva(ip);
22127 if (ip < PAGE_OFFSET)
22128 return 0;
22129 if (probe_kernel_address((unsigned short *)ip, ud2))
22130@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22131
22132 return ud2 == 0x0b0f;
22133 }
22134+
22135+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22136+void pax_check_alloca(unsigned long size)
22137+{
22138+ unsigned long sp = (unsigned long)&sp, stack_left;
22139+
22140+ /* all kernel stacks are of the same size */
22141+ stack_left = sp & (THREAD_SIZE - 1);
22142+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22143+}
22144+EXPORT_SYMBOL(pax_check_alloca);
22145+#endif
22146diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22147index ff86f19..73eabf4 100644
22148--- a/arch/x86/kernel/dumpstack_64.c
22149+++ b/arch/x86/kernel/dumpstack_64.c
22150@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22151 const struct stacktrace_ops *ops, void *data)
22152 {
22153 const unsigned cpu = get_cpu();
22154- struct thread_info *tinfo;
22155 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22156 unsigned long dummy;
22157 unsigned used = 0;
22158 int graph = 0;
22159 int done = 0;
22160+ void *stack_start;
22161
22162 if (!task)
22163 task = current;
22164@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22165 * current stack address. If the stacks consist of nested
22166 * exceptions
22167 */
22168- tinfo = task_thread_info(task);
22169 while (!done) {
22170 unsigned long *stack_end;
22171 enum stack_type stype;
22172@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22173 if (ops->stack(data, id) < 0)
22174 break;
22175
22176- bp = ops->walk_stack(tinfo, stack, bp, ops,
22177+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22178 data, stack_end, &graph);
22179 ops->stack(data, "<EOE>");
22180 /*
22181@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22182 * second-to-last pointer (index -2 to end) in the
22183 * exception stack:
22184 */
22185+ if ((u16)stack_end[-1] != __KERNEL_DS)
22186+ goto out;
22187 stack = (unsigned long *) stack_end[-2];
22188 done = 0;
22189 break;
22190@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22191
22192 if (ops->stack(data, "IRQ") < 0)
22193 break;
22194- bp = ops->walk_stack(tinfo, stack, bp,
22195+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22196 ops, data, stack_end, &graph);
22197 /*
22198 * We link to the next stack (which would be
22199@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22200 /*
22201 * This handles the process stack:
22202 */
22203- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22204+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22205+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22206+out:
22207 put_cpu();
22208 }
22209 EXPORT_SYMBOL(dump_trace);
22210@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22211 {
22212 unsigned short ud2;
22213
22214- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22215+ if (probe_kernel_address((unsigned short *)ip, ud2))
22216 return 0;
22217
22218 return ud2 == 0x0b0f;
22219 }
22220+
22221+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22222+void pax_check_alloca(unsigned long size)
22223+{
22224+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22225+ unsigned cpu, used;
22226+ char *id;
22227+
22228+ /* check the process stack first */
22229+ stack_start = (unsigned long)task_stack_page(current);
22230+ stack_end = stack_start + THREAD_SIZE;
22231+ if (likely(stack_start <= sp && sp < stack_end)) {
22232+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22233+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22234+ return;
22235+ }
22236+
22237+ cpu = get_cpu();
22238+
22239+ /* check the irq stacks */
22240+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22241+ stack_start = stack_end - IRQ_STACK_SIZE;
22242+ if (stack_start <= sp && sp < stack_end) {
22243+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22244+ put_cpu();
22245+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22246+ return;
22247+ }
22248+
22249+ /* check the exception stacks */
22250+ used = 0;
22251+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22252+ stack_start = stack_end - EXCEPTION_STKSZ;
22253+ if (stack_end && stack_start <= sp && sp < stack_end) {
22254+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22255+ put_cpu();
22256+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22257+ return;
22258+ }
22259+
22260+ put_cpu();
22261+
22262+ /* unknown stack */
22263+ BUG();
22264+}
22265+EXPORT_SYMBOL(pax_check_alloca);
22266+#endif
22267diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22268index dd2f07a..845dc05 100644
22269--- a/arch/x86/kernel/e820.c
22270+++ b/arch/x86/kernel/e820.c
22271@@ -802,8 +802,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22272
22273 static void early_panic(char *msg)
22274 {
22275- early_printk(msg);
22276- panic(msg);
22277+ early_printk("%s", msg);
22278+ panic("%s", msg);
22279 }
22280
22281 static int userdef __initdata;
22282diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22283index 01d1c18..8073693 100644
22284--- a/arch/x86/kernel/early_printk.c
22285+++ b/arch/x86/kernel/early_printk.c
22286@@ -7,6 +7,7 @@
22287 #include <linux/pci_regs.h>
22288 #include <linux/pci_ids.h>
22289 #include <linux/errno.h>
22290+#include <linux/sched.h>
22291 #include <asm/io.h>
22292 #include <asm/processor.h>
22293 #include <asm/fcntl.h>
22294diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22295index 000d419..8f66802 100644
22296--- a/arch/x86/kernel/entry_32.S
22297+++ b/arch/x86/kernel/entry_32.S
22298@@ -177,13 +177,154 @@
22299 /*CFI_REL_OFFSET gs, PT_GS*/
22300 .endm
22301 .macro SET_KERNEL_GS reg
22302+
22303+#ifdef CONFIG_CC_STACKPROTECTOR
22304 movl $(__KERNEL_STACK_CANARY), \reg
22305+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22306+ movl $(__USER_DS), \reg
22307+#else
22308+ xorl \reg, \reg
22309+#endif
22310+
22311 movl \reg, %gs
22312 .endm
22313
22314 #endif /* CONFIG_X86_32_LAZY_GS */
22315
22316-.macro SAVE_ALL
22317+.macro pax_enter_kernel
22318+#ifdef CONFIG_PAX_KERNEXEC
22319+ call pax_enter_kernel
22320+#endif
22321+.endm
22322+
22323+.macro pax_exit_kernel
22324+#ifdef CONFIG_PAX_KERNEXEC
22325+ call pax_exit_kernel
22326+#endif
22327+.endm
22328+
22329+#ifdef CONFIG_PAX_KERNEXEC
22330+ENTRY(pax_enter_kernel)
22331+#ifdef CONFIG_PARAVIRT
22332+ pushl %eax
22333+ pushl %ecx
22334+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22335+ mov %eax, %esi
22336+#else
22337+ mov %cr0, %esi
22338+#endif
22339+ bts $16, %esi
22340+ jnc 1f
22341+ mov %cs, %esi
22342+ cmp $__KERNEL_CS, %esi
22343+ jz 3f
22344+ ljmp $__KERNEL_CS, $3f
22345+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22346+2:
22347+#ifdef CONFIG_PARAVIRT
22348+ mov %esi, %eax
22349+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22350+#else
22351+ mov %esi, %cr0
22352+#endif
22353+3:
22354+#ifdef CONFIG_PARAVIRT
22355+ popl %ecx
22356+ popl %eax
22357+#endif
22358+ ret
22359+ENDPROC(pax_enter_kernel)
22360+
22361+ENTRY(pax_exit_kernel)
22362+#ifdef CONFIG_PARAVIRT
22363+ pushl %eax
22364+ pushl %ecx
22365+#endif
22366+ mov %cs, %esi
22367+ cmp $__KERNEXEC_KERNEL_CS, %esi
22368+ jnz 2f
22369+#ifdef CONFIG_PARAVIRT
22370+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22371+ mov %eax, %esi
22372+#else
22373+ mov %cr0, %esi
22374+#endif
22375+ btr $16, %esi
22376+ ljmp $__KERNEL_CS, $1f
22377+1:
22378+#ifdef CONFIG_PARAVIRT
22379+ mov %esi, %eax
22380+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22381+#else
22382+ mov %esi, %cr0
22383+#endif
22384+2:
22385+#ifdef CONFIG_PARAVIRT
22386+ popl %ecx
22387+ popl %eax
22388+#endif
22389+ ret
22390+ENDPROC(pax_exit_kernel)
22391+#endif
22392+
22393+ .macro pax_erase_kstack
22394+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22395+ call pax_erase_kstack
22396+#endif
22397+ .endm
22398+
22399+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22400+/*
22401+ * ebp: thread_info
22402+ */
22403+ENTRY(pax_erase_kstack)
22404+ pushl %edi
22405+ pushl %ecx
22406+ pushl %eax
22407+
22408+ mov TI_lowest_stack(%ebp), %edi
22409+ mov $-0xBEEF, %eax
22410+ std
22411+
22412+1: mov %edi, %ecx
22413+ and $THREAD_SIZE_asm - 1, %ecx
22414+ shr $2, %ecx
22415+ repne scasl
22416+ jecxz 2f
22417+
22418+ cmp $2*16, %ecx
22419+ jc 2f
22420+
22421+ mov $2*16, %ecx
22422+ repe scasl
22423+ jecxz 2f
22424+ jne 1b
22425+
22426+2: cld
22427+ or $2*4, %edi
22428+ mov %esp, %ecx
22429+ sub %edi, %ecx
22430+
22431+ cmp $THREAD_SIZE_asm, %ecx
22432+ jb 3f
22433+ ud2
22434+3:
22435+
22436+ shr $2, %ecx
22437+ rep stosl
22438+
22439+ mov TI_task_thread_sp0(%ebp), %edi
22440+ sub $128, %edi
22441+ mov %edi, TI_lowest_stack(%ebp)
22442+
22443+ popl %eax
22444+ popl %ecx
22445+ popl %edi
22446+ ret
22447+ENDPROC(pax_erase_kstack)
22448+#endif
22449+
22450+.macro __SAVE_ALL _DS
22451 cld
22452 PUSH_GS
22453 pushl_cfi %fs
22454@@ -206,7 +347,7 @@
22455 CFI_REL_OFFSET ecx, 0
22456 pushl_cfi %ebx
22457 CFI_REL_OFFSET ebx, 0
22458- movl $(__USER_DS), %edx
22459+ movl $\_DS, %edx
22460 movl %edx, %ds
22461 movl %edx, %es
22462 movl $(__KERNEL_PERCPU), %edx
22463@@ -214,6 +355,15 @@
22464 SET_KERNEL_GS %edx
22465 .endm
22466
22467+.macro SAVE_ALL
22468+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22469+ __SAVE_ALL __KERNEL_DS
22470+ pax_enter_kernel
22471+#else
22472+ __SAVE_ALL __USER_DS
22473+#endif
22474+.endm
22475+
22476 .macro RESTORE_INT_REGS
22477 popl_cfi %ebx
22478 CFI_RESTORE ebx
22479@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22480 popfl_cfi
22481 jmp syscall_exit
22482 CFI_ENDPROC
22483-END(ret_from_fork)
22484+ENDPROC(ret_from_fork)
22485
22486 ENTRY(ret_from_kernel_thread)
22487 CFI_STARTPROC
22488@@ -340,7 +490,15 @@ ret_from_intr:
22489 andl $SEGMENT_RPL_MASK, %eax
22490 #endif
22491 cmpl $USER_RPL, %eax
22492+
22493+#ifdef CONFIG_PAX_KERNEXEC
22494+ jae resume_userspace
22495+
22496+ pax_exit_kernel
22497+ jmp resume_kernel
22498+#else
22499 jb resume_kernel # not returning to v8086 or userspace
22500+#endif
22501
22502 ENTRY(resume_userspace)
22503 LOCKDEP_SYS_EXIT
22504@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22505 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22506 # int/exception return?
22507 jne work_pending
22508- jmp restore_all
22509-END(ret_from_exception)
22510+ jmp restore_all_pax
22511+ENDPROC(ret_from_exception)
22512
22513 #ifdef CONFIG_PREEMPT
22514 ENTRY(resume_kernel)
22515@@ -365,7 +523,7 @@ need_resched:
22516 jz restore_all
22517 call preempt_schedule_irq
22518 jmp need_resched
22519-END(resume_kernel)
22520+ENDPROC(resume_kernel)
22521 #endif
22522 CFI_ENDPROC
22523
22524@@ -395,30 +553,45 @@ sysenter_past_esp:
22525 /*CFI_REL_OFFSET cs, 0*/
22526 /*
22527 * Push current_thread_info()->sysenter_return to the stack.
22528- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22529- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22530 */
22531- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22532+ pushl_cfi $0
22533 CFI_REL_OFFSET eip, 0
22534
22535 pushl_cfi %eax
22536 SAVE_ALL
22537+ GET_THREAD_INFO(%ebp)
22538+ movl TI_sysenter_return(%ebp),%ebp
22539+ movl %ebp,PT_EIP(%esp)
22540 ENABLE_INTERRUPTS(CLBR_NONE)
22541
22542 /*
22543 * Load the potential sixth argument from user stack.
22544 * Careful about security.
22545 */
22546+ movl PT_OLDESP(%esp),%ebp
22547+
22548+#ifdef CONFIG_PAX_MEMORY_UDEREF
22549+ mov PT_OLDSS(%esp),%ds
22550+1: movl %ds:(%ebp),%ebp
22551+ push %ss
22552+ pop %ds
22553+#else
22554 cmpl $__PAGE_OFFSET-3,%ebp
22555 jae syscall_fault
22556 ASM_STAC
22557 1: movl (%ebp),%ebp
22558 ASM_CLAC
22559+#endif
22560+
22561 movl %ebp,PT_EBP(%esp)
22562 _ASM_EXTABLE(1b,syscall_fault)
22563
22564 GET_THREAD_INFO(%ebp)
22565
22566+#ifdef CONFIG_PAX_RANDKSTACK
22567+ pax_erase_kstack
22568+#endif
22569+
22570 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22571 jnz sysenter_audit
22572 sysenter_do_call:
22573@@ -434,12 +607,24 @@ sysenter_after_call:
22574 testl $_TIF_ALLWORK_MASK, %ecx
22575 jne sysexit_audit
22576 sysenter_exit:
22577+
22578+#ifdef CONFIG_PAX_RANDKSTACK
22579+ pushl_cfi %eax
22580+ movl %esp, %eax
22581+ call pax_randomize_kstack
22582+ popl_cfi %eax
22583+#endif
22584+
22585+ pax_erase_kstack
22586+
22587 /* if something modifies registers it must also disable sysexit */
22588 movl PT_EIP(%esp), %edx
22589 movl PT_OLDESP(%esp), %ecx
22590 xorl %ebp,%ebp
22591 TRACE_IRQS_ON
22592 1: mov PT_FS(%esp), %fs
22593+2: mov PT_DS(%esp), %ds
22594+3: mov PT_ES(%esp), %es
22595 PTGS_TO_GS
22596 ENABLE_INTERRUPTS_SYSEXIT
22597
22598@@ -453,6 +638,9 @@ sysenter_audit:
22599 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22600 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22601 call __audit_syscall_entry
22602+
22603+ pax_erase_kstack
22604+
22605 popl_cfi %ecx /* get that remapped edx off the stack */
22606 popl_cfi %ecx /* get that remapped esi off the stack */
22607 movl PT_EAX(%esp),%eax /* reload syscall number */
22608@@ -479,10 +667,16 @@ sysexit_audit:
22609
22610 CFI_ENDPROC
22611 .pushsection .fixup,"ax"
22612-2: movl $0,PT_FS(%esp)
22613+4: movl $0,PT_FS(%esp)
22614+ jmp 1b
22615+5: movl $0,PT_DS(%esp)
22616+ jmp 1b
22617+6: movl $0,PT_ES(%esp)
22618 jmp 1b
22619 .popsection
22620- _ASM_EXTABLE(1b,2b)
22621+ _ASM_EXTABLE(1b,4b)
22622+ _ASM_EXTABLE(2b,5b)
22623+ _ASM_EXTABLE(3b,6b)
22624 PTGS_TO_GS_EX
22625 ENDPROC(ia32_sysenter_target)
22626
22627@@ -493,6 +687,11 @@ ENTRY(system_call)
22628 pushl_cfi %eax # save orig_eax
22629 SAVE_ALL
22630 GET_THREAD_INFO(%ebp)
22631+
22632+#ifdef CONFIG_PAX_RANDKSTACK
22633+ pax_erase_kstack
22634+#endif
22635+
22636 # system call tracing in operation / emulation
22637 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22638 jnz syscall_trace_entry
22639@@ -512,6 +711,15 @@ syscall_exit:
22640 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22641 jne syscall_exit_work
22642
22643+restore_all_pax:
22644+
22645+#ifdef CONFIG_PAX_RANDKSTACK
22646+ movl %esp, %eax
22647+ call pax_randomize_kstack
22648+#endif
22649+
22650+ pax_erase_kstack
22651+
22652 restore_all:
22653 TRACE_IRQS_IRET
22654 restore_all_notrace:
22655@@ -566,14 +774,34 @@ ldt_ss:
22656 * compensating for the offset by changing to the ESPFIX segment with
22657 * a base address that matches for the difference.
22658 */
22659-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22660+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22661 mov %esp, %edx /* load kernel esp */
22662 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22663 mov %dx, %ax /* eax: new kernel esp */
22664 sub %eax, %edx /* offset (low word is 0) */
22665+#ifdef CONFIG_SMP
22666+ movl PER_CPU_VAR(cpu_number), %ebx
22667+ shll $PAGE_SHIFT_asm, %ebx
22668+ addl $cpu_gdt_table, %ebx
22669+#else
22670+ movl $cpu_gdt_table, %ebx
22671+#endif
22672 shr $16, %edx
22673- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22674- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22675+
22676+#ifdef CONFIG_PAX_KERNEXEC
22677+ mov %cr0, %esi
22678+ btr $16, %esi
22679+ mov %esi, %cr0
22680+#endif
22681+
22682+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22683+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22684+
22685+#ifdef CONFIG_PAX_KERNEXEC
22686+ bts $16, %esi
22687+ mov %esi, %cr0
22688+#endif
22689+
22690 pushl_cfi $__ESPFIX_SS
22691 pushl_cfi %eax /* new kernel esp */
22692 /* Disable interrupts, but do not irqtrace this section: we
22693@@ -603,20 +831,18 @@ work_resched:
22694 movl TI_flags(%ebp), %ecx
22695 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22696 # than syscall tracing?
22697- jz restore_all
22698+ jz restore_all_pax
22699 testb $_TIF_NEED_RESCHED, %cl
22700 jnz work_resched
22701
22702 work_notifysig: # deal with pending signals and
22703 # notify-resume requests
22704+ movl %esp, %eax
22705 #ifdef CONFIG_VM86
22706 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22707- movl %esp, %eax
22708 jne work_notifysig_v86 # returning to kernel-space or
22709 # vm86-space
22710 1:
22711-#else
22712- movl %esp, %eax
22713 #endif
22714 TRACE_IRQS_ON
22715 ENABLE_INTERRUPTS(CLBR_NONE)
22716@@ -637,7 +863,7 @@ work_notifysig_v86:
22717 movl %eax, %esp
22718 jmp 1b
22719 #endif
22720-END(work_pending)
22721+ENDPROC(work_pending)
22722
22723 # perform syscall exit tracing
22724 ALIGN
22725@@ -645,11 +871,14 @@ syscall_trace_entry:
22726 movl $-ENOSYS,PT_EAX(%esp)
22727 movl %esp, %eax
22728 call syscall_trace_enter
22729+
22730+ pax_erase_kstack
22731+
22732 /* What it returned is what we'll actually use. */
22733 cmpl $(NR_syscalls), %eax
22734 jnae syscall_call
22735 jmp syscall_exit
22736-END(syscall_trace_entry)
22737+ENDPROC(syscall_trace_entry)
22738
22739 # perform syscall exit tracing
22740 ALIGN
22741@@ -662,26 +891,30 @@ syscall_exit_work:
22742 movl %esp, %eax
22743 call syscall_trace_leave
22744 jmp resume_userspace
22745-END(syscall_exit_work)
22746+ENDPROC(syscall_exit_work)
22747 CFI_ENDPROC
22748
22749 RING0_INT_FRAME # can't unwind into user space anyway
22750 syscall_fault:
22751+#ifdef CONFIG_PAX_MEMORY_UDEREF
22752+ push %ss
22753+ pop %ds
22754+#endif
22755 ASM_CLAC
22756 GET_THREAD_INFO(%ebp)
22757 movl $-EFAULT,PT_EAX(%esp)
22758 jmp resume_userspace
22759-END(syscall_fault)
22760+ENDPROC(syscall_fault)
22761
22762 syscall_badsys:
22763 movl $-ENOSYS,%eax
22764 jmp syscall_after_call
22765-END(syscall_badsys)
22766+ENDPROC(syscall_badsys)
22767
22768 sysenter_badsys:
22769 movl $-ENOSYS,%eax
22770 jmp sysenter_after_call
22771-END(sysenter_badsys)
22772+ENDPROC(sysenter_badsys)
22773 CFI_ENDPROC
22774
22775 .macro FIXUP_ESPFIX_STACK
22776@@ -694,8 +927,15 @@ END(sysenter_badsys)
22777 */
22778 #ifdef CONFIG_X86_ESPFIX32
22779 /* fixup the stack */
22780- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22781- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22782+#ifdef CONFIG_SMP
22783+ movl PER_CPU_VAR(cpu_number), %ebx
22784+ shll $PAGE_SHIFT_asm, %ebx
22785+ addl $cpu_gdt_table, %ebx
22786+#else
22787+ movl $cpu_gdt_table, %ebx
22788+#endif
22789+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22790+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22791 shl $16, %eax
22792 addl %esp, %eax /* the adjusted stack pointer */
22793 pushl_cfi $__KERNEL_DS
22794@@ -751,7 +991,7 @@ vector=vector+1
22795 .endr
22796 2: jmp common_interrupt
22797 .endr
22798-END(irq_entries_start)
22799+ENDPROC(irq_entries_start)
22800
22801 .previous
22802 END(interrupt)
22803@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22804 pushl_cfi $do_coprocessor_error
22805 jmp error_code
22806 CFI_ENDPROC
22807-END(coprocessor_error)
22808+ENDPROC(coprocessor_error)
22809
22810 ENTRY(simd_coprocessor_error)
22811 RING0_INT_FRAME
22812@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22813 .section .altinstructions,"a"
22814 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22815 .previous
22816-.section .altinstr_replacement,"ax"
22817+.section .altinstr_replacement,"a"
22818 663: pushl $do_simd_coprocessor_error
22819 664:
22820 .previous
22821@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22822 #endif
22823 jmp error_code
22824 CFI_ENDPROC
22825-END(simd_coprocessor_error)
22826+ENDPROC(simd_coprocessor_error)
22827
22828 ENTRY(device_not_available)
22829 RING0_INT_FRAME
22830@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22831 pushl_cfi $do_device_not_available
22832 jmp error_code
22833 CFI_ENDPROC
22834-END(device_not_available)
22835+ENDPROC(device_not_available)
22836
22837 #ifdef CONFIG_PARAVIRT
22838 ENTRY(native_iret)
22839 iret
22840 _ASM_EXTABLE(native_iret, iret_exc)
22841-END(native_iret)
22842+ENDPROC(native_iret)
22843
22844 ENTRY(native_irq_enable_sysexit)
22845 sti
22846 sysexit
22847-END(native_irq_enable_sysexit)
22848+ENDPROC(native_irq_enable_sysexit)
22849 #endif
22850
22851 ENTRY(overflow)
22852@@ -860,7 +1100,7 @@ ENTRY(overflow)
22853 pushl_cfi $do_overflow
22854 jmp error_code
22855 CFI_ENDPROC
22856-END(overflow)
22857+ENDPROC(overflow)
22858
22859 ENTRY(bounds)
22860 RING0_INT_FRAME
22861@@ -869,7 +1109,7 @@ ENTRY(bounds)
22862 pushl_cfi $do_bounds
22863 jmp error_code
22864 CFI_ENDPROC
22865-END(bounds)
22866+ENDPROC(bounds)
22867
22868 ENTRY(invalid_op)
22869 RING0_INT_FRAME
22870@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22871 pushl_cfi $do_invalid_op
22872 jmp error_code
22873 CFI_ENDPROC
22874-END(invalid_op)
22875+ENDPROC(invalid_op)
22876
22877 ENTRY(coprocessor_segment_overrun)
22878 RING0_INT_FRAME
22879@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22880 pushl_cfi $do_coprocessor_segment_overrun
22881 jmp error_code
22882 CFI_ENDPROC
22883-END(coprocessor_segment_overrun)
22884+ENDPROC(coprocessor_segment_overrun)
22885
22886 ENTRY(invalid_TSS)
22887 RING0_EC_FRAME
22888@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22889 pushl_cfi $do_invalid_TSS
22890 jmp error_code
22891 CFI_ENDPROC
22892-END(invalid_TSS)
22893+ENDPROC(invalid_TSS)
22894
22895 ENTRY(segment_not_present)
22896 RING0_EC_FRAME
22897@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22898 pushl_cfi $do_segment_not_present
22899 jmp error_code
22900 CFI_ENDPROC
22901-END(segment_not_present)
22902+ENDPROC(segment_not_present)
22903
22904 ENTRY(stack_segment)
22905 RING0_EC_FRAME
22906@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22907 pushl_cfi $do_stack_segment
22908 jmp error_code
22909 CFI_ENDPROC
22910-END(stack_segment)
22911+ENDPROC(stack_segment)
22912
22913 ENTRY(alignment_check)
22914 RING0_EC_FRAME
22915@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22916 pushl_cfi $do_alignment_check
22917 jmp error_code
22918 CFI_ENDPROC
22919-END(alignment_check)
22920+ENDPROC(alignment_check)
22921
22922 ENTRY(divide_error)
22923 RING0_INT_FRAME
22924@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22925 pushl_cfi $do_divide_error
22926 jmp error_code
22927 CFI_ENDPROC
22928-END(divide_error)
22929+ENDPROC(divide_error)
22930
22931 #ifdef CONFIG_X86_MCE
22932 ENTRY(machine_check)
22933@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22934 pushl_cfi machine_check_vector
22935 jmp error_code
22936 CFI_ENDPROC
22937-END(machine_check)
22938+ENDPROC(machine_check)
22939 #endif
22940
22941 ENTRY(spurious_interrupt_bug)
22942@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22943 pushl_cfi $do_spurious_interrupt_bug
22944 jmp error_code
22945 CFI_ENDPROC
22946-END(spurious_interrupt_bug)
22947+ENDPROC(spurious_interrupt_bug)
22948
22949 #ifdef CONFIG_XEN
22950 /* Xen doesn't set %esp to be precisely what the normal sysenter
22951@@ -1054,7 +1294,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22952
22953 ENTRY(mcount)
22954 ret
22955-END(mcount)
22956+ENDPROC(mcount)
22957
22958 ENTRY(ftrace_caller)
22959 pushl %eax
22960@@ -1084,7 +1324,7 @@ ftrace_graph_call:
22961 .globl ftrace_stub
22962 ftrace_stub:
22963 ret
22964-END(ftrace_caller)
22965+ENDPROC(ftrace_caller)
22966
22967 ENTRY(ftrace_regs_caller)
22968 pushf /* push flags before compare (in cs location) */
22969@@ -1182,7 +1422,7 @@ trace:
22970 popl %ecx
22971 popl %eax
22972 jmp ftrace_stub
22973-END(mcount)
22974+ENDPROC(mcount)
22975 #endif /* CONFIG_DYNAMIC_FTRACE */
22976 #endif /* CONFIG_FUNCTION_TRACER */
22977
22978@@ -1200,7 +1440,7 @@ ENTRY(ftrace_graph_caller)
22979 popl %ecx
22980 popl %eax
22981 ret
22982-END(ftrace_graph_caller)
22983+ENDPROC(ftrace_graph_caller)
22984
22985 .globl return_to_handler
22986 return_to_handler:
22987@@ -1261,15 +1501,18 @@ error_code:
22988 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22989 REG_TO_PTGS %ecx
22990 SET_KERNEL_GS %ecx
22991- movl $(__USER_DS), %ecx
22992+ movl $(__KERNEL_DS), %ecx
22993 movl %ecx, %ds
22994 movl %ecx, %es
22995+
22996+ pax_enter_kernel
22997+
22998 TRACE_IRQS_OFF
22999 movl %esp,%eax # pt_regs pointer
23000 call *%edi
23001 jmp ret_from_exception
23002 CFI_ENDPROC
23003-END(page_fault)
23004+ENDPROC(page_fault)
23005
23006 /*
23007 * Debug traps and NMI can happen at the one SYSENTER instruction
23008@@ -1312,7 +1555,7 @@ debug_stack_correct:
23009 call do_debug
23010 jmp ret_from_exception
23011 CFI_ENDPROC
23012-END(debug)
23013+ENDPROC(debug)
23014
23015 /*
23016 * NMI is doubly nasty. It can happen _while_ we're handling
23017@@ -1352,6 +1595,9 @@ nmi_stack_correct:
23018 xorl %edx,%edx # zero error code
23019 movl %esp,%eax # pt_regs pointer
23020 call do_nmi
23021+
23022+ pax_exit_kernel
23023+
23024 jmp restore_all_notrace
23025 CFI_ENDPROC
23026
23027@@ -1389,13 +1635,16 @@ nmi_espfix_stack:
23028 FIXUP_ESPFIX_STACK # %eax == %esp
23029 xorl %edx,%edx # zero error code
23030 call do_nmi
23031+
23032+ pax_exit_kernel
23033+
23034 RESTORE_REGS
23035 lss 12+4(%esp), %esp # back to espfix stack
23036 CFI_ADJUST_CFA_OFFSET -24
23037 jmp irq_return
23038 #endif
23039 CFI_ENDPROC
23040-END(nmi)
23041+ENDPROC(nmi)
23042
23043 ENTRY(int3)
23044 RING0_INT_FRAME
23045@@ -1408,14 +1657,14 @@ ENTRY(int3)
23046 call do_int3
23047 jmp ret_from_exception
23048 CFI_ENDPROC
23049-END(int3)
23050+ENDPROC(int3)
23051
23052 ENTRY(general_protection)
23053 RING0_EC_FRAME
23054 pushl_cfi $do_general_protection
23055 jmp error_code
23056 CFI_ENDPROC
23057-END(general_protection)
23058+ENDPROC(general_protection)
23059
23060 #ifdef CONFIG_KVM_GUEST
23061 ENTRY(async_page_fault)
23062@@ -1424,6 +1673,6 @@ ENTRY(async_page_fault)
23063 pushl_cfi $do_async_page_fault
23064 jmp error_code
23065 CFI_ENDPROC
23066-END(async_page_fault)
23067+ENDPROC(async_page_fault)
23068 #endif
23069
23070diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23071index 4ee9a23..c786610 100644
23072--- a/arch/x86/kernel/entry_64.S
23073+++ b/arch/x86/kernel/entry_64.S
23074@@ -59,6 +59,8 @@
23075 #include <asm/smap.h>
23076 #include <asm/pgtable_types.h>
23077 #include <linux/err.h>
23078+#include <asm/pgtable.h>
23079+#include <asm/alternative-asm.h>
23080
23081 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23082 #include <linux/elf-em.h>
23083@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23084 ENDPROC(native_usergs_sysret64)
23085 #endif /* CONFIG_PARAVIRT */
23086
23087+ .macro ljmpq sel, off
23088+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23089+ .byte 0x48; ljmp *1234f(%rip)
23090+ .pushsection .rodata
23091+ .align 16
23092+ 1234: .quad \off; .word \sel
23093+ .popsection
23094+#else
23095+ pushq $\sel
23096+ pushq $\off
23097+ lretq
23098+#endif
23099+ .endm
23100+
23101+ .macro pax_enter_kernel
23102+ pax_set_fptr_mask
23103+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23104+ call pax_enter_kernel
23105+#endif
23106+ .endm
23107+
23108+ .macro pax_exit_kernel
23109+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23110+ call pax_exit_kernel
23111+#endif
23112+
23113+ .endm
23114+
23115+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23116+ENTRY(pax_enter_kernel)
23117+ pushq %rdi
23118+
23119+#ifdef CONFIG_PARAVIRT
23120+ PV_SAVE_REGS(CLBR_RDI)
23121+#endif
23122+
23123+#ifdef CONFIG_PAX_KERNEXEC
23124+ GET_CR0_INTO_RDI
23125+ bts $16,%rdi
23126+ jnc 3f
23127+ mov %cs,%edi
23128+ cmp $__KERNEL_CS,%edi
23129+ jnz 2f
23130+1:
23131+#endif
23132+
23133+#ifdef CONFIG_PAX_MEMORY_UDEREF
23134+ 661: jmp 111f
23135+ .pushsection .altinstr_replacement, "a"
23136+ 662: ASM_NOP2
23137+ .popsection
23138+ .pushsection .altinstructions, "a"
23139+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23140+ .popsection
23141+ GET_CR3_INTO_RDI
23142+ cmp $0,%dil
23143+ jnz 112f
23144+ mov $__KERNEL_DS,%edi
23145+ mov %edi,%ss
23146+ jmp 111f
23147+112: cmp $1,%dil
23148+ jz 113f
23149+ ud2
23150+113: sub $4097,%rdi
23151+ bts $63,%rdi
23152+ SET_RDI_INTO_CR3
23153+ mov $__UDEREF_KERNEL_DS,%edi
23154+ mov %edi,%ss
23155+111:
23156+#endif
23157+
23158+#ifdef CONFIG_PARAVIRT
23159+ PV_RESTORE_REGS(CLBR_RDI)
23160+#endif
23161+
23162+ popq %rdi
23163+ pax_force_retaddr
23164+ retq
23165+
23166+#ifdef CONFIG_PAX_KERNEXEC
23167+2: ljmpq __KERNEL_CS,1b
23168+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23169+4: SET_RDI_INTO_CR0
23170+ jmp 1b
23171+#endif
23172+ENDPROC(pax_enter_kernel)
23173+
23174+ENTRY(pax_exit_kernel)
23175+ pushq %rdi
23176+
23177+#ifdef CONFIG_PARAVIRT
23178+ PV_SAVE_REGS(CLBR_RDI)
23179+#endif
23180+
23181+#ifdef CONFIG_PAX_KERNEXEC
23182+ mov %cs,%rdi
23183+ cmp $__KERNEXEC_KERNEL_CS,%edi
23184+ jz 2f
23185+ GET_CR0_INTO_RDI
23186+ bts $16,%rdi
23187+ jnc 4f
23188+1:
23189+#endif
23190+
23191+#ifdef CONFIG_PAX_MEMORY_UDEREF
23192+ 661: jmp 111f
23193+ .pushsection .altinstr_replacement, "a"
23194+ 662: ASM_NOP2
23195+ .popsection
23196+ .pushsection .altinstructions, "a"
23197+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23198+ .popsection
23199+ mov %ss,%edi
23200+ cmp $__UDEREF_KERNEL_DS,%edi
23201+ jnz 111f
23202+ GET_CR3_INTO_RDI
23203+ cmp $0,%dil
23204+ jz 112f
23205+ ud2
23206+112: add $4097,%rdi
23207+ bts $63,%rdi
23208+ SET_RDI_INTO_CR3
23209+ mov $__KERNEL_DS,%edi
23210+ mov %edi,%ss
23211+111:
23212+#endif
23213+
23214+#ifdef CONFIG_PARAVIRT
23215+ PV_RESTORE_REGS(CLBR_RDI);
23216+#endif
23217+
23218+ popq %rdi
23219+ pax_force_retaddr
23220+ retq
23221+
23222+#ifdef CONFIG_PAX_KERNEXEC
23223+2: GET_CR0_INTO_RDI
23224+ btr $16,%rdi
23225+ jnc 4f
23226+ ljmpq __KERNEL_CS,3f
23227+3: SET_RDI_INTO_CR0
23228+ jmp 1b
23229+4: ud2
23230+ jmp 4b
23231+#endif
23232+ENDPROC(pax_exit_kernel)
23233+#endif
23234+
23235+ .macro pax_enter_kernel_user
23236+ pax_set_fptr_mask
23237+#ifdef CONFIG_PAX_MEMORY_UDEREF
23238+ call pax_enter_kernel_user
23239+#endif
23240+ .endm
23241+
23242+ .macro pax_exit_kernel_user
23243+#ifdef CONFIG_PAX_MEMORY_UDEREF
23244+ call pax_exit_kernel_user
23245+#endif
23246+#ifdef CONFIG_PAX_RANDKSTACK
23247+ pushq %rax
23248+ pushq %r11
23249+ call pax_randomize_kstack
23250+ popq %r11
23251+ popq %rax
23252+#endif
23253+ .endm
23254+
23255+#ifdef CONFIG_PAX_MEMORY_UDEREF
23256+ENTRY(pax_enter_kernel_user)
23257+ pushq %rdi
23258+ pushq %rbx
23259+
23260+#ifdef CONFIG_PARAVIRT
23261+ PV_SAVE_REGS(CLBR_RDI)
23262+#endif
23263+
23264+ 661: jmp 111f
23265+ .pushsection .altinstr_replacement, "a"
23266+ 662: ASM_NOP2
23267+ .popsection
23268+ .pushsection .altinstructions, "a"
23269+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23270+ .popsection
23271+ GET_CR3_INTO_RDI
23272+ cmp $1,%dil
23273+ jnz 4f
23274+ sub $4097,%rdi
23275+ bts $63,%rdi
23276+ SET_RDI_INTO_CR3
23277+ jmp 3f
23278+111:
23279+
23280+ GET_CR3_INTO_RDI
23281+ mov %rdi,%rbx
23282+ add $__START_KERNEL_map,%rbx
23283+ sub phys_base(%rip),%rbx
23284+
23285+#ifdef CONFIG_PARAVIRT
23286+ cmpl $0, pv_info+PARAVIRT_enabled
23287+ jz 1f
23288+ pushq %rdi
23289+ i = 0
23290+ .rept USER_PGD_PTRS
23291+ mov i*8(%rbx),%rsi
23292+ mov $0,%sil
23293+ lea i*8(%rbx),%rdi
23294+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23295+ i = i + 1
23296+ .endr
23297+ popq %rdi
23298+ jmp 2f
23299+1:
23300+#endif
23301+
23302+ i = 0
23303+ .rept USER_PGD_PTRS
23304+ movb $0,i*8(%rbx)
23305+ i = i + 1
23306+ .endr
23307+
23308+2: SET_RDI_INTO_CR3
23309+
23310+#ifdef CONFIG_PAX_KERNEXEC
23311+ GET_CR0_INTO_RDI
23312+ bts $16,%rdi
23313+ SET_RDI_INTO_CR0
23314+#endif
23315+
23316+3:
23317+
23318+#ifdef CONFIG_PARAVIRT
23319+ PV_RESTORE_REGS(CLBR_RDI)
23320+#endif
23321+
23322+ popq %rbx
23323+ popq %rdi
23324+ pax_force_retaddr
23325+ retq
23326+4: ud2
23327+ENDPROC(pax_enter_kernel_user)
23328+
23329+ENTRY(pax_exit_kernel_user)
23330+ pushq %rdi
23331+ pushq %rbx
23332+
23333+#ifdef CONFIG_PARAVIRT
23334+ PV_SAVE_REGS(CLBR_RDI)
23335+#endif
23336+
23337+ GET_CR3_INTO_RDI
23338+ 661: jmp 1f
23339+ .pushsection .altinstr_replacement, "a"
23340+ 662: ASM_NOP2
23341+ .popsection
23342+ .pushsection .altinstructions, "a"
23343+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23344+ .popsection
23345+ cmp $0,%dil
23346+ jnz 3f
23347+ add $4097,%rdi
23348+ bts $63,%rdi
23349+ SET_RDI_INTO_CR3
23350+ jmp 2f
23351+1:
23352+
23353+ mov %rdi,%rbx
23354+
23355+#ifdef CONFIG_PAX_KERNEXEC
23356+ GET_CR0_INTO_RDI
23357+ btr $16,%rdi
23358+ jnc 3f
23359+ SET_RDI_INTO_CR0
23360+#endif
23361+
23362+ add $__START_KERNEL_map,%rbx
23363+ sub phys_base(%rip),%rbx
23364+
23365+#ifdef CONFIG_PARAVIRT
23366+ cmpl $0, pv_info+PARAVIRT_enabled
23367+ jz 1f
23368+ i = 0
23369+ .rept USER_PGD_PTRS
23370+ mov i*8(%rbx),%rsi
23371+ mov $0x67,%sil
23372+ lea i*8(%rbx),%rdi
23373+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23374+ i = i + 1
23375+ .endr
23376+ jmp 2f
23377+1:
23378+#endif
23379+
23380+ i = 0
23381+ .rept USER_PGD_PTRS
23382+ movb $0x67,i*8(%rbx)
23383+ i = i + 1
23384+ .endr
23385+2:
23386+
23387+#ifdef CONFIG_PARAVIRT
23388+ PV_RESTORE_REGS(CLBR_RDI)
23389+#endif
23390+
23391+ popq %rbx
23392+ popq %rdi
23393+ pax_force_retaddr
23394+ retq
23395+3: ud2
23396+ENDPROC(pax_exit_kernel_user)
23397+#endif
23398+
23399+ .macro pax_enter_kernel_nmi
23400+ pax_set_fptr_mask
23401+
23402+#ifdef CONFIG_PAX_KERNEXEC
23403+ GET_CR0_INTO_RDI
23404+ bts $16,%rdi
23405+ jc 110f
23406+ SET_RDI_INTO_CR0
23407+ or $2,%ebx
23408+110:
23409+#endif
23410+
23411+#ifdef CONFIG_PAX_MEMORY_UDEREF
23412+ 661: jmp 111f
23413+ .pushsection .altinstr_replacement, "a"
23414+ 662: ASM_NOP2
23415+ .popsection
23416+ .pushsection .altinstructions, "a"
23417+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23418+ .popsection
23419+ GET_CR3_INTO_RDI
23420+ cmp $0,%dil
23421+ jz 111f
23422+ sub $4097,%rdi
23423+ or $4,%ebx
23424+ bts $63,%rdi
23425+ SET_RDI_INTO_CR3
23426+ mov $__UDEREF_KERNEL_DS,%edi
23427+ mov %edi,%ss
23428+111:
23429+#endif
23430+ .endm
23431+
23432+ .macro pax_exit_kernel_nmi
23433+#ifdef CONFIG_PAX_KERNEXEC
23434+ btr $1,%ebx
23435+ jnc 110f
23436+ GET_CR0_INTO_RDI
23437+ btr $16,%rdi
23438+ SET_RDI_INTO_CR0
23439+110:
23440+#endif
23441+
23442+#ifdef CONFIG_PAX_MEMORY_UDEREF
23443+ btr $2,%ebx
23444+ jnc 111f
23445+ GET_CR3_INTO_RDI
23446+ add $4097,%rdi
23447+ bts $63,%rdi
23448+ SET_RDI_INTO_CR3
23449+ mov $__KERNEL_DS,%edi
23450+ mov %edi,%ss
23451+111:
23452+#endif
23453+ .endm
23454+
23455+ .macro pax_erase_kstack
23456+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23457+ call pax_erase_kstack
23458+#endif
23459+ .endm
23460+
23461+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23462+ENTRY(pax_erase_kstack)
23463+ pushq %rdi
23464+ pushq %rcx
23465+ pushq %rax
23466+ pushq %r11
23467+
23468+ GET_THREAD_INFO(%r11)
23469+ mov TI_lowest_stack(%r11), %rdi
23470+ mov $-0xBEEF, %rax
23471+ std
23472+
23473+1: mov %edi, %ecx
23474+ and $THREAD_SIZE_asm - 1, %ecx
23475+ shr $3, %ecx
23476+ repne scasq
23477+ jecxz 2f
23478+
23479+ cmp $2*8, %ecx
23480+ jc 2f
23481+
23482+ mov $2*8, %ecx
23483+ repe scasq
23484+ jecxz 2f
23485+ jne 1b
23486+
23487+2: cld
23488+ or $2*8, %rdi
23489+ mov %esp, %ecx
23490+ sub %edi, %ecx
23491+
23492+ cmp $THREAD_SIZE_asm, %rcx
23493+ jb 3f
23494+ ud2
23495+3:
23496+
23497+ shr $3, %ecx
23498+ rep stosq
23499+
23500+ mov TI_task_thread_sp0(%r11), %rdi
23501+ sub $256, %rdi
23502+ mov %rdi, TI_lowest_stack(%r11)
23503+
23504+ popq %r11
23505+ popq %rax
23506+ popq %rcx
23507+ popq %rdi
23508+ pax_force_retaddr
23509+ ret
23510+ENDPROC(pax_erase_kstack)
23511+#endif
23512
23513 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23514 #ifdef CONFIG_TRACE_IRQFLAGS
23515@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23516 .endm
23517
23518 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23519- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23520+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23521 jnc 1f
23522 TRACE_IRQS_ON_DEBUG
23523 1:
23524@@ -155,27 +582,6 @@ ENDPROC(native_usergs_sysret64)
23525 movq \tmp,R11+\offset(%rsp)
23526 .endm
23527
23528- .macro FAKE_STACK_FRAME child_rip
23529- /* push in order ss, rsp, eflags, cs, rip */
23530- xorl %eax, %eax
23531- pushq_cfi $__KERNEL_DS /* ss */
23532- /*CFI_REL_OFFSET ss,0*/
23533- pushq_cfi %rax /* rsp */
23534- CFI_REL_OFFSET rsp,0
23535- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23536- /*CFI_REL_OFFSET rflags,0*/
23537- pushq_cfi $__KERNEL_CS /* cs */
23538- /*CFI_REL_OFFSET cs,0*/
23539- pushq_cfi \child_rip /* rip */
23540- CFI_REL_OFFSET rip,0
23541- pushq_cfi %rax /* orig rax */
23542- .endm
23543-
23544- .macro UNFAKE_STACK_FRAME
23545- addq $8*6, %rsp
23546- CFI_ADJUST_CFA_OFFSET -(6*8)
23547- .endm
23548-
23549 /*
23550 * initial frame state for interrupts (and exceptions without error code)
23551 */
23552@@ -241,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23553 /* save partial stack frame */
23554 .macro SAVE_ARGS_IRQ
23555 cld
23556- /* start from rbp in pt_regs and jump over */
23557- movq_cfi rdi, (RDI-RBP)
23558- movq_cfi rsi, (RSI-RBP)
23559- movq_cfi rdx, (RDX-RBP)
23560- movq_cfi rcx, (RCX-RBP)
23561- movq_cfi rax, (RAX-RBP)
23562- movq_cfi r8, (R8-RBP)
23563- movq_cfi r9, (R9-RBP)
23564- movq_cfi r10, (R10-RBP)
23565- movq_cfi r11, (R11-RBP)
23566+ /* start from r15 in pt_regs and jump over */
23567+ movq_cfi rdi, RDI
23568+ movq_cfi rsi, RSI
23569+ movq_cfi rdx, RDX
23570+ movq_cfi rcx, RCX
23571+ movq_cfi rax, RAX
23572+ movq_cfi r8, R8
23573+ movq_cfi r9, R9
23574+ movq_cfi r10, R10
23575+ movq_cfi r11, R11
23576+ movq_cfi r12, R12
23577
23578 /* Save rbp so that we can unwind from get_irq_regs() */
23579- movq_cfi rbp, 0
23580+ movq_cfi rbp, RBP
23581
23582 /* Save previous stack value */
23583 movq %rsp, %rsi
23584
23585- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23586- testl $3, CS-RBP(%rsi)
23587+ movq %rsp,%rdi /* arg1 for handler */
23588+ testb $3, CS(%rsi)
23589 je 1f
23590 SWAPGS
23591 /*
23592@@ -279,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23593 0x06 /* DW_OP_deref */, \
23594 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23595 0x22 /* DW_OP_plus */
23596+
23597+#ifdef CONFIG_PAX_MEMORY_UDEREF
23598+ testb $3, CS(%rdi)
23599+ jnz 1f
23600+ pax_enter_kernel
23601+ jmp 2f
23602+1: pax_enter_kernel_user
23603+2:
23604+#else
23605+ pax_enter_kernel
23606+#endif
23607+
23608 /* We entered an interrupt context - irqs are off: */
23609 TRACE_IRQS_OFF
23610 .endm
23611@@ -308,9 +727,52 @@ ENTRY(save_paranoid)
23612 js 1f /* negative -> in kernel */
23613 SWAPGS
23614 xorl %ebx,%ebx
23615-1: ret
23616+1:
23617+#ifdef CONFIG_PAX_MEMORY_UDEREF
23618+ testb $3, CS+8(%rsp)
23619+ jnz 1f
23620+ pax_enter_kernel
23621+ jmp 2f
23622+1: pax_enter_kernel_user
23623+2:
23624+#else
23625+ pax_enter_kernel
23626+#endif
23627+ pax_force_retaddr
23628+ ret
23629 CFI_ENDPROC
23630-END(save_paranoid)
23631+ENDPROC(save_paranoid)
23632+
23633+ENTRY(save_paranoid_nmi)
23634+ XCPT_FRAME 1 RDI+8
23635+ cld
23636+ movq_cfi rdi, RDI+8
23637+ movq_cfi rsi, RSI+8
23638+ movq_cfi rdx, RDX+8
23639+ movq_cfi rcx, RCX+8
23640+ movq_cfi rax, RAX+8
23641+ movq_cfi r8, R8+8
23642+ movq_cfi r9, R9+8
23643+ movq_cfi r10, R10+8
23644+ movq_cfi r11, R11+8
23645+ movq_cfi rbx, RBX+8
23646+ movq_cfi rbp, RBP+8
23647+ movq_cfi r12, R12+8
23648+ movq_cfi r13, R13+8
23649+ movq_cfi r14, R14+8
23650+ movq_cfi r15, R15+8
23651+ movl $1,%ebx
23652+ movl $MSR_GS_BASE,%ecx
23653+ rdmsr
23654+ testl %edx,%edx
23655+ js 1f /* negative -> in kernel */
23656+ SWAPGS
23657+ xorl %ebx,%ebx
23658+1: pax_enter_kernel_nmi
23659+ pax_force_retaddr
23660+ ret
23661+ CFI_ENDPROC
23662+ENDPROC(save_paranoid_nmi)
23663
23664 /*
23665 * A newly forked process directly context switches into this address.
23666@@ -331,7 +793,7 @@ ENTRY(ret_from_fork)
23667
23668 RESTORE_REST
23669
23670- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23671+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23672 jz 1f
23673
23674 /*
23675@@ -344,15 +806,13 @@ ENTRY(ret_from_fork)
23676 jmp int_ret_from_sys_call
23677
23678 1:
23679- subq $REST_SKIP, %rsp # leave space for volatiles
23680- CFI_ADJUST_CFA_OFFSET REST_SKIP
23681 movq %rbp, %rdi
23682 call *%rbx
23683 movl $0, RAX(%rsp)
23684 RESTORE_REST
23685 jmp int_ret_from_sys_call
23686 CFI_ENDPROC
23687-END(ret_from_fork)
23688+ENDPROC(ret_from_fork)
23689
23690 /*
23691 * System call entry. Up to 6 arguments in registers are supported.
23692@@ -389,7 +849,7 @@ END(ret_from_fork)
23693 ENTRY(system_call)
23694 CFI_STARTPROC simple
23695 CFI_SIGNAL_FRAME
23696- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23697+ CFI_DEF_CFA rsp,0
23698 CFI_REGISTER rip,rcx
23699 /*CFI_REGISTER rflags,r11*/
23700 SWAPGS_UNSAFE_STACK
23701@@ -402,16 +862,23 @@ GLOBAL(system_call_after_swapgs)
23702
23703 movq %rsp,PER_CPU_VAR(old_rsp)
23704 movq PER_CPU_VAR(kernel_stack),%rsp
23705+ SAVE_ARGS 8*6, 0, rax_enosys=1
23706+ pax_enter_kernel_user
23707+
23708+#ifdef CONFIG_PAX_RANDKSTACK
23709+ pax_erase_kstack
23710+#endif
23711+
23712 /*
23713 * No need to follow this irqs off/on section - it's straight
23714 * and short:
23715 */
23716 ENABLE_INTERRUPTS(CLBR_NONE)
23717- SAVE_ARGS 8, 0, rax_enosys=1
23718 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23719 movq %rcx,RIP-ARGOFFSET(%rsp)
23720 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23721- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23722+ GET_THREAD_INFO(%rcx)
23723+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23724 jnz tracesys
23725 system_call_fastpath:
23726 #if __SYSCALL_MASK == ~0
23727@@ -435,10 +902,13 @@ sysret_check:
23728 LOCKDEP_SYS_EXIT
23729 DISABLE_INTERRUPTS(CLBR_NONE)
23730 TRACE_IRQS_OFF
23731- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23732+ GET_THREAD_INFO(%rcx)
23733+ movl TI_flags(%rcx),%edx
23734 andl %edi,%edx
23735 jnz sysret_careful
23736 CFI_REMEMBER_STATE
23737+ pax_exit_kernel_user
23738+ pax_erase_kstack
23739 /*
23740 * sysretq will re-enable interrupts:
23741 */
23742@@ -497,12 +967,15 @@ sysret_audit:
23743
23744 /* Do syscall tracing */
23745 tracesys:
23746- leaq -REST_SKIP(%rsp), %rdi
23747+ movq %rsp, %rdi
23748 movq $AUDIT_ARCH_X86_64, %rsi
23749 call syscall_trace_enter_phase1
23750 test %rax, %rax
23751 jnz tracesys_phase2 /* if needed, run the slow path */
23752- LOAD_ARGS 0 /* else restore clobbered regs */
23753+
23754+ pax_erase_kstack
23755+
23756+ LOAD_ARGS /* else restore clobbered regs */
23757 jmp system_call_fastpath /* and return to the fast path */
23758
23759 tracesys_phase2:
23760@@ -513,12 +986,14 @@ tracesys_phase2:
23761 movq %rax,%rdx
23762 call syscall_trace_enter_phase2
23763
23764+ pax_erase_kstack
23765+
23766 /*
23767 * Reload arg registers from stack in case ptrace changed them.
23768 * We don't reload %rax because syscall_trace_entry_phase2() returned
23769 * the value it wants us to use in the table lookup.
23770 */
23771- LOAD_ARGS ARGOFFSET, 1
23772+ LOAD_ARGS 1
23773 RESTORE_REST
23774 #if __SYSCALL_MASK == ~0
23775 cmpq $__NR_syscall_max,%rax
23776@@ -548,7 +1023,9 @@ GLOBAL(int_with_check)
23777 andl %edi,%edx
23778 jnz int_careful
23779 andl $~TS_COMPAT,TI_status(%rcx)
23780- jmp retint_swapgs
23781+ pax_exit_kernel_user
23782+ pax_erase_kstack
23783+ jmp retint_swapgs_pax
23784
23785 /* Either reschedule or signal or syscall exit tracking needed. */
23786 /* First do a reschedule test. */
23787@@ -594,7 +1071,7 @@ int_restore_rest:
23788 TRACE_IRQS_OFF
23789 jmp int_with_check
23790 CFI_ENDPROC
23791-END(system_call)
23792+ENDPROC(system_call)
23793
23794 .macro FORK_LIKE func
23795 ENTRY(stub_\func)
23796@@ -607,9 +1084,10 @@ ENTRY(stub_\func)
23797 DEFAULT_FRAME 0 8 /* offset 8: return address */
23798 call sys_\func
23799 RESTORE_TOP_OF_STACK %r11, 8
23800- ret $REST_SKIP /* pop extended registers */
23801+ pax_force_retaddr
23802+ ret
23803 CFI_ENDPROC
23804-END(stub_\func)
23805+ENDPROC(stub_\func)
23806 .endm
23807
23808 .macro FIXED_FRAME label,func
23809@@ -619,9 +1097,10 @@ ENTRY(\label)
23810 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23811 call \func
23812 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23813+ pax_force_retaddr
23814 ret
23815 CFI_ENDPROC
23816-END(\label)
23817+ENDPROC(\label)
23818 .endm
23819
23820 FORK_LIKE clone
23821@@ -629,19 +1108,6 @@ END(\label)
23822 FORK_LIKE vfork
23823 FIXED_FRAME stub_iopl, sys_iopl
23824
23825-ENTRY(ptregscall_common)
23826- DEFAULT_FRAME 1 8 /* offset 8: return address */
23827- RESTORE_TOP_OF_STACK %r11, 8
23828- movq_cfi_restore R15+8, r15
23829- movq_cfi_restore R14+8, r14
23830- movq_cfi_restore R13+8, r13
23831- movq_cfi_restore R12+8, r12
23832- movq_cfi_restore RBP+8, rbp
23833- movq_cfi_restore RBX+8, rbx
23834- ret $REST_SKIP /* pop extended registers */
23835- CFI_ENDPROC
23836-END(ptregscall_common)
23837-
23838 ENTRY(stub_execve)
23839 CFI_STARTPROC
23840 addq $8, %rsp
23841@@ -653,7 +1119,7 @@ ENTRY(stub_execve)
23842 RESTORE_REST
23843 jmp int_ret_from_sys_call
23844 CFI_ENDPROC
23845-END(stub_execve)
23846+ENDPROC(stub_execve)
23847
23848 ENTRY(stub_execveat)
23849 CFI_STARTPROC
23850@@ -667,7 +1133,7 @@ ENTRY(stub_execveat)
23851 RESTORE_REST
23852 jmp int_ret_from_sys_call
23853 CFI_ENDPROC
23854-END(stub_execveat)
23855+ENDPROC(stub_execveat)
23856
23857 /*
23858 * sigreturn is special because it needs to restore all registers on return.
23859@@ -684,7 +1150,7 @@ ENTRY(stub_rt_sigreturn)
23860 RESTORE_REST
23861 jmp int_ret_from_sys_call
23862 CFI_ENDPROC
23863-END(stub_rt_sigreturn)
23864+ENDPROC(stub_rt_sigreturn)
23865
23866 #ifdef CONFIG_X86_X32_ABI
23867 ENTRY(stub_x32_rt_sigreturn)
23868@@ -698,7 +1164,7 @@ ENTRY(stub_x32_rt_sigreturn)
23869 RESTORE_REST
23870 jmp int_ret_from_sys_call
23871 CFI_ENDPROC
23872-END(stub_x32_rt_sigreturn)
23873+ENDPROC(stub_x32_rt_sigreturn)
23874
23875 ENTRY(stub_x32_execve)
23876 CFI_STARTPROC
23877@@ -763,7 +1229,7 @@ vector=vector+1
23878 2: jmp common_interrupt
23879 .endr
23880 CFI_ENDPROC
23881-END(irq_entries_start)
23882+ENDPROC(irq_entries_start)
23883
23884 .previous
23885 END(interrupt)
23886@@ -780,8 +1246,8 @@ END(interrupt)
23887 /* 0(%rsp): ~(interrupt number) */
23888 .macro interrupt func
23889 /* reserve pt_regs for scratch regs and rbp */
23890- subq $ORIG_RAX-RBP, %rsp
23891- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23892+ subq $ORIG_RAX, %rsp
23893+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23894 SAVE_ARGS_IRQ
23895 call \func
23896 .endm
23897@@ -804,14 +1270,14 @@ ret_from_intr:
23898
23899 /* Restore saved previous stack */
23900 popq %rsi
23901- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23902- leaq ARGOFFSET-RBP(%rsi), %rsp
23903+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23904+ movq %rsi, %rsp
23905 CFI_DEF_CFA_REGISTER rsp
23906- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23907+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23908
23909 exit_intr:
23910 GET_THREAD_INFO(%rcx)
23911- testl $3,CS-ARGOFFSET(%rsp)
23912+ testb $3,CS-ARGOFFSET(%rsp)
23913 je retint_kernel
23914
23915 /* Interrupt came from user space */
23916@@ -833,12 +1299,35 @@ retint_swapgs: /* return to user-space */
23917 * The iretq could re-enable interrupts:
23918 */
23919 DISABLE_INTERRUPTS(CLBR_ANY)
23920+ pax_exit_kernel_user
23921+retint_swapgs_pax:
23922 TRACE_IRQS_IRETQ
23923 SWAPGS
23924 jmp restore_args
23925
23926 retint_restore_args: /* return to kernel space */
23927 DISABLE_INTERRUPTS(CLBR_ANY)
23928+ pax_exit_kernel
23929+
23930+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23931+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23932+ * namely calling EFI runtime services with a phys mapping. We're
23933+ * starting off with NOPs and patch in the real instrumentation
23934+ * (BTS/OR) before starting any userland process; even before starting
23935+ * up the APs.
23936+ */
23937+ .pushsection .altinstr_replacement, "a"
23938+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23939+ 602:
23940+ .popsection
23941+ 603: .fill 602b-601b, 1, 0x90
23942+ .pushsection .altinstructions, "a"
23943+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23944+ .popsection
23945+#else
23946+ pax_force_retaddr (RIP-ARGOFFSET)
23947+#endif
23948+
23949 /*
23950 * The iretq could re-enable interrupts:
23951 */
23952@@ -876,15 +1365,15 @@ native_irq_return_ldt:
23953 SWAPGS
23954 movq PER_CPU_VAR(espfix_waddr),%rdi
23955 movq %rax,(0*8)(%rdi) /* RAX */
23956- movq (2*8)(%rsp),%rax /* RIP */
23957+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23958 movq %rax,(1*8)(%rdi)
23959- movq (3*8)(%rsp),%rax /* CS */
23960+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23961 movq %rax,(2*8)(%rdi)
23962- movq (4*8)(%rsp),%rax /* RFLAGS */
23963+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23964 movq %rax,(3*8)(%rdi)
23965- movq (6*8)(%rsp),%rax /* SS */
23966+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23967 movq %rax,(5*8)(%rdi)
23968- movq (5*8)(%rsp),%rax /* RSP */
23969+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23970 movq %rax,(4*8)(%rdi)
23971 andl $0xffff0000,%eax
23972 popq_cfi %rdi
23973@@ -938,7 +1427,7 @@ ENTRY(retint_kernel)
23974 jmp exit_intr
23975 #endif
23976 CFI_ENDPROC
23977-END(common_interrupt)
23978+ENDPROC(common_interrupt)
23979
23980 /*
23981 * APIC interrupts.
23982@@ -952,7 +1441,7 @@ ENTRY(\sym)
23983 interrupt \do_sym
23984 jmp ret_from_intr
23985 CFI_ENDPROC
23986-END(\sym)
23987+ENDPROC(\sym)
23988 .endm
23989
23990 #ifdef CONFIG_TRACING
23991@@ -1025,7 +1514,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23992 /*
23993 * Exception entry points.
23994 */
23995-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23996+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23997
23998 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
23999 ENTRY(\sym)
24000@@ -1076,6 +1565,12 @@ ENTRY(\sym)
24001 .endif
24002
24003 .if \shift_ist != -1
24004+#ifdef CONFIG_SMP
24005+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24006+ lea init_tss(%r13), %r13
24007+#else
24008+ lea init_tss(%rip), %r13
24009+#endif
24010 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24011 .endif
24012
24013@@ -1092,7 +1587,7 @@ ENTRY(\sym)
24014 .endif
24015
24016 CFI_ENDPROC
24017-END(\sym)
24018+ENDPROC(\sym)
24019 .endm
24020
24021 #ifdef CONFIG_TRACING
24022@@ -1133,9 +1628,10 @@ gs_change:
24023 2: mfence /* workaround */
24024 SWAPGS
24025 popfq_cfi
24026+ pax_force_retaddr
24027 ret
24028 CFI_ENDPROC
24029-END(native_load_gs_index)
24030+ENDPROC(native_load_gs_index)
24031
24032 _ASM_EXTABLE(gs_change,bad_gs)
24033 .section .fixup,"ax"
24034@@ -1163,9 +1659,10 @@ ENTRY(do_softirq_own_stack)
24035 CFI_DEF_CFA_REGISTER rsp
24036 CFI_ADJUST_CFA_OFFSET -8
24037 decl PER_CPU_VAR(irq_count)
24038+ pax_force_retaddr
24039 ret
24040 CFI_ENDPROC
24041-END(do_softirq_own_stack)
24042+ENDPROC(do_softirq_own_stack)
24043
24044 #ifdef CONFIG_XEN
24045 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24046@@ -1203,7 +1700,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24047 decl PER_CPU_VAR(irq_count)
24048 jmp error_exit
24049 CFI_ENDPROC
24050-END(xen_do_hypervisor_callback)
24051+ENDPROC(xen_do_hypervisor_callback)
24052
24053 /*
24054 * Hypervisor uses this for application faults while it executes.
24055@@ -1262,7 +1759,7 @@ ENTRY(xen_failsafe_callback)
24056 SAVE_ALL
24057 jmp error_exit
24058 CFI_ENDPROC
24059-END(xen_failsafe_callback)
24060+ENDPROC(xen_failsafe_callback)
24061
24062 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24063 xen_hvm_callback_vector xen_evtchn_do_upcall
24064@@ -1309,18 +1806,33 @@ ENTRY(paranoid_exit)
24065 DEFAULT_FRAME
24066 DISABLE_INTERRUPTS(CLBR_NONE)
24067 TRACE_IRQS_OFF_DEBUG
24068- testl %ebx,%ebx /* swapgs needed? */
24069+ testl $1,%ebx /* swapgs needed? */
24070 jnz paranoid_restore
24071- testl $3,CS(%rsp)
24072+ testb $3,CS(%rsp)
24073 jnz paranoid_userspace
24074+#ifdef CONFIG_PAX_MEMORY_UDEREF
24075+ pax_exit_kernel
24076+ TRACE_IRQS_IRETQ 0
24077+ SWAPGS_UNSAFE_STACK
24078+ RESTORE_ALL 8
24079+ pax_force_retaddr_bts
24080+ jmp irq_return
24081+#endif
24082 paranoid_swapgs:
24083+#ifdef CONFIG_PAX_MEMORY_UDEREF
24084+ pax_exit_kernel_user
24085+#else
24086+ pax_exit_kernel
24087+#endif
24088 TRACE_IRQS_IRETQ 0
24089 SWAPGS_UNSAFE_STACK
24090 RESTORE_ALL 8
24091 jmp irq_return
24092 paranoid_restore:
24093+ pax_exit_kernel
24094 TRACE_IRQS_IRETQ_DEBUG 0
24095 RESTORE_ALL 8
24096+ pax_force_retaddr_bts
24097 jmp irq_return
24098 paranoid_userspace:
24099 GET_THREAD_INFO(%rcx)
24100@@ -1349,7 +1861,7 @@ paranoid_schedule:
24101 TRACE_IRQS_OFF
24102 jmp paranoid_userspace
24103 CFI_ENDPROC
24104-END(paranoid_exit)
24105+ENDPROC(paranoid_exit)
24106
24107 /*
24108 * Exception entry point. This expects an error code/orig_rax on the stack.
24109@@ -1376,12 +1888,23 @@ ENTRY(error_entry)
24110 movq %r14, R14+8(%rsp)
24111 movq %r15, R15+8(%rsp)
24112 xorl %ebx,%ebx
24113- testl $3,CS+8(%rsp)
24114+ testb $3,CS+8(%rsp)
24115 je error_kernelspace
24116 error_swapgs:
24117 SWAPGS
24118 error_sti:
24119+#ifdef CONFIG_PAX_MEMORY_UDEREF
24120+ testb $3, CS+8(%rsp)
24121+ jnz 1f
24122+ pax_enter_kernel
24123+ jmp 2f
24124+1: pax_enter_kernel_user
24125+2:
24126+#else
24127+ pax_enter_kernel
24128+#endif
24129 TRACE_IRQS_OFF
24130+ pax_force_retaddr
24131 ret
24132
24133 /*
24134@@ -1416,7 +1939,7 @@ error_bad_iret:
24135 decl %ebx /* Return to usergs */
24136 jmp error_sti
24137 CFI_ENDPROC
24138-END(error_entry)
24139+ENDPROC(error_entry)
24140
24141
24142 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24143@@ -1427,7 +1950,7 @@ ENTRY(error_exit)
24144 DISABLE_INTERRUPTS(CLBR_NONE)
24145 TRACE_IRQS_OFF
24146 GET_THREAD_INFO(%rcx)
24147- testl %eax,%eax
24148+ testl $1,%eax
24149 jne retint_kernel
24150 LOCKDEP_SYS_EXIT_IRQ
24151 movl TI_flags(%rcx),%edx
24152@@ -1436,7 +1959,7 @@ ENTRY(error_exit)
24153 jnz retint_careful
24154 jmp retint_swapgs
24155 CFI_ENDPROC
24156-END(error_exit)
24157+ENDPROC(error_exit)
24158
24159 /*
24160 * Test if a given stack is an NMI stack or not.
24161@@ -1494,9 +2017,11 @@ ENTRY(nmi)
24162 * If %cs was not the kernel segment, then the NMI triggered in user
24163 * space, which means it is definitely not nested.
24164 */
24165+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24166+ je 1f
24167 cmpl $__KERNEL_CS, 16(%rsp)
24168 jne first_nmi
24169-
24170+1:
24171 /*
24172 * Check the special variable on the stack to see if NMIs are
24173 * executing.
24174@@ -1530,8 +2055,7 @@ nested_nmi:
24175
24176 1:
24177 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24178- leaq -1*8(%rsp), %rdx
24179- movq %rdx, %rsp
24180+ subq $8, %rsp
24181 CFI_ADJUST_CFA_OFFSET 1*8
24182 leaq -10*8(%rsp), %rdx
24183 pushq_cfi $__KERNEL_DS
24184@@ -1549,6 +2073,7 @@ nested_nmi_out:
24185 CFI_RESTORE rdx
24186
24187 /* No need to check faults here */
24188+# pax_force_retaddr_bts
24189 INTERRUPT_RETURN
24190
24191 CFI_RESTORE_STATE
24192@@ -1645,13 +2170,13 @@ end_repeat_nmi:
24193 subq $ORIG_RAX-R15, %rsp
24194 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24195 /*
24196- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24197+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24198 * as we should not be calling schedule in NMI context.
24199 * Even with normal interrupts enabled. An NMI should not be
24200 * setting NEED_RESCHED or anything that normal interrupts and
24201 * exceptions might do.
24202 */
24203- call save_paranoid
24204+ call save_paranoid_nmi
24205 DEFAULT_FRAME 0
24206
24207 /*
24208@@ -1661,9 +2186,9 @@ end_repeat_nmi:
24209 * NMI itself takes a page fault, the page fault that was preempted
24210 * will read the information from the NMI page fault and not the
24211 * origin fault. Save it off and restore it if it changes.
24212- * Use the r12 callee-saved register.
24213+ * Use the r13 callee-saved register.
24214 */
24215- movq %cr2, %r12
24216+ movq %cr2, %r13
24217
24218 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24219 movq %rsp,%rdi
24220@@ -1672,29 +2197,34 @@ end_repeat_nmi:
24221
24222 /* Did the NMI take a page fault? Restore cr2 if it did */
24223 movq %cr2, %rcx
24224- cmpq %rcx, %r12
24225+ cmpq %rcx, %r13
24226 je 1f
24227- movq %r12, %cr2
24228+ movq %r13, %cr2
24229 1:
24230
24231- testl %ebx,%ebx /* swapgs needed? */
24232+ testl $1,%ebx /* swapgs needed? */
24233 jnz nmi_restore
24234 nmi_swapgs:
24235 SWAPGS_UNSAFE_STACK
24236 nmi_restore:
24237+ pax_exit_kernel_nmi
24238 /* Pop the extra iret frame at once */
24239 RESTORE_ALL 6*8
24240+ testb $3, 8(%rsp)
24241+ jnz 1f
24242+ pax_force_retaddr_bts
24243+1:
24244
24245 /* Clear the NMI executing stack variable */
24246 movq $0, 5*8(%rsp)
24247 jmp irq_return
24248 CFI_ENDPROC
24249-END(nmi)
24250+ENDPROC(nmi)
24251
24252 ENTRY(ignore_sysret)
24253 CFI_STARTPROC
24254 mov $-ENOSYS,%eax
24255 sysret
24256 CFI_ENDPROC
24257-END(ignore_sysret)
24258+ENDPROC(ignore_sysret)
24259
24260diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24261index f5d0730..5bce89c 100644
24262--- a/arch/x86/kernel/espfix_64.c
24263+++ b/arch/x86/kernel/espfix_64.c
24264@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24265 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24266 static void *espfix_pages[ESPFIX_MAX_PAGES];
24267
24268-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24269- __aligned(PAGE_SIZE);
24270+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24271
24272 static unsigned int page_random, slot_random;
24273
24274@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24275 void __init init_espfix_bsp(void)
24276 {
24277 pgd_t *pgd_p;
24278+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24279
24280 /* Install the espfix pud into the kernel page directory */
24281- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24282+ pgd_p = &init_level4_pgt[index];
24283 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24284
24285+#ifdef CONFIG_PAX_PER_CPU_PGD
24286+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24287+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24288+#endif
24289+
24290 /* Randomize the locations */
24291 init_espfix_random();
24292
24293@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24294 set_pte(&pte_p[n*PTE_STRIDE], pte);
24295
24296 /* Job is done for this CPU and any CPU which shares this page */
24297- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24298+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24299
24300 unlock_done:
24301 mutex_unlock(&espfix_init_mutex);
24302diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24303index 8b7b0a5..2395f29 100644
24304--- a/arch/x86/kernel/ftrace.c
24305+++ b/arch/x86/kernel/ftrace.c
24306@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24307 * kernel identity mapping to modify code.
24308 */
24309 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24310- ip = (unsigned long)__va(__pa_symbol(ip));
24311+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24312
24313 return ip;
24314 }
24315@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24316 {
24317 unsigned char replaced[MCOUNT_INSN_SIZE];
24318
24319+ ip = ktla_ktva(ip);
24320+
24321 /*
24322 * Note: Due to modules and __init, code can
24323 * disappear and change, we need to protect against faulting
24324@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24325 unsigned char old[MCOUNT_INSN_SIZE];
24326 int ret;
24327
24328- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24329+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24330
24331 ftrace_update_func = ip;
24332 /* Make sure the breakpoints see the ftrace_update_func update */
24333@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24334 unsigned char replaced[MCOUNT_INSN_SIZE];
24335 unsigned char brk = BREAKPOINT_INSTRUCTION;
24336
24337- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24338+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24339 return -EFAULT;
24340
24341 /* Make sure it is what we expect it to be */
24342diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24343index eda1a86..8f6df48 100644
24344--- a/arch/x86/kernel/head64.c
24345+++ b/arch/x86/kernel/head64.c
24346@@ -67,12 +67,12 @@ again:
24347 pgd = *pgd_p;
24348
24349 /*
24350- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24351- * critical -- __PAGE_OFFSET would point us back into the dynamic
24352+ * The use of __early_va rather than __va here is critical:
24353+ * __va would point us back into the dynamic
24354 * range and we might end up looping forever...
24355 */
24356 if (pgd)
24357- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24358+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24359 else {
24360 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24361 reset_early_page_tables();
24362@@ -82,13 +82,13 @@ again:
24363 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24364 for (i = 0; i < PTRS_PER_PUD; i++)
24365 pud_p[i] = 0;
24366- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24367+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24368 }
24369 pud_p += pud_index(address);
24370 pud = *pud_p;
24371
24372 if (pud)
24373- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24374+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24375 else {
24376 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24377 reset_early_page_tables();
24378@@ -98,7 +98,7 @@ again:
24379 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24380 for (i = 0; i < PTRS_PER_PMD; i++)
24381 pmd_p[i] = 0;
24382- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24383+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24384 }
24385 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24386 pmd_p[pmd_index(address)] = pmd;
24387@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24388 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24389 early_printk("Kernel alive\n");
24390
24391- clear_page(init_level4_pgt);
24392 /* set init_level4_pgt kernel high mapping*/
24393 init_level4_pgt[511] = early_level4_pgt[511];
24394
24395diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24396index f36bd42..0ab4474 100644
24397--- a/arch/x86/kernel/head_32.S
24398+++ b/arch/x86/kernel/head_32.S
24399@@ -26,6 +26,12 @@
24400 /* Physical address */
24401 #define pa(X) ((X) - __PAGE_OFFSET)
24402
24403+#ifdef CONFIG_PAX_KERNEXEC
24404+#define ta(X) (X)
24405+#else
24406+#define ta(X) ((X) - __PAGE_OFFSET)
24407+#endif
24408+
24409 /*
24410 * References to members of the new_cpu_data structure.
24411 */
24412@@ -55,11 +61,7 @@
24413 * and small than max_low_pfn, otherwise will waste some page table entries
24414 */
24415
24416-#if PTRS_PER_PMD > 1
24417-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24418-#else
24419-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24420-#endif
24421+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24422
24423 /* Number of possible pages in the lowmem region */
24424 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24425@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24426 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24427
24428 /*
24429+ * Real beginning of normal "text" segment
24430+ */
24431+ENTRY(stext)
24432+ENTRY(_stext)
24433+
24434+/*
24435 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24436 * %esi points to the real-mode code as a 32-bit pointer.
24437 * CS and DS must be 4 GB flat segments, but we don't depend on
24438@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24439 * can.
24440 */
24441 __HEAD
24442+
24443+#ifdef CONFIG_PAX_KERNEXEC
24444+ jmp startup_32
24445+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24446+.fill PAGE_SIZE-5,1,0xcc
24447+#endif
24448+
24449 ENTRY(startup_32)
24450 movl pa(stack_start),%ecx
24451
24452@@ -106,6 +121,59 @@ ENTRY(startup_32)
24453 2:
24454 leal -__PAGE_OFFSET(%ecx),%esp
24455
24456+#ifdef CONFIG_SMP
24457+ movl $pa(cpu_gdt_table),%edi
24458+ movl $__per_cpu_load,%eax
24459+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24460+ rorl $16,%eax
24461+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24462+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24463+ movl $__per_cpu_end - 1,%eax
24464+ subl $__per_cpu_start,%eax
24465+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24466+#endif
24467+
24468+#ifdef CONFIG_PAX_MEMORY_UDEREF
24469+ movl $NR_CPUS,%ecx
24470+ movl $pa(cpu_gdt_table),%edi
24471+1:
24472+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24473+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24474+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24475+ addl $PAGE_SIZE_asm,%edi
24476+ loop 1b
24477+#endif
24478+
24479+#ifdef CONFIG_PAX_KERNEXEC
24480+ movl $pa(boot_gdt),%edi
24481+ movl $__LOAD_PHYSICAL_ADDR,%eax
24482+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24483+ rorl $16,%eax
24484+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24485+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24486+ rorl $16,%eax
24487+
24488+ ljmp $(__BOOT_CS),$1f
24489+1:
24490+
24491+ movl $NR_CPUS,%ecx
24492+ movl $pa(cpu_gdt_table),%edi
24493+ addl $__PAGE_OFFSET,%eax
24494+1:
24495+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24496+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24497+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24498+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24499+ rorl $16,%eax
24500+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24501+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24502+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24503+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24504+ rorl $16,%eax
24505+ addl $PAGE_SIZE_asm,%edi
24506+ loop 1b
24507+#endif
24508+
24509 /*
24510 * Clear BSS first so that there are no surprises...
24511 */
24512@@ -201,8 +269,11 @@ ENTRY(startup_32)
24513 movl %eax, pa(max_pfn_mapped)
24514
24515 /* Do early initialization of the fixmap area */
24516- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24517- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24518+#ifdef CONFIG_COMPAT_VDSO
24519+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24520+#else
24521+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24522+#endif
24523 #else /* Not PAE */
24524
24525 page_pde_offset = (__PAGE_OFFSET >> 20);
24526@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24527 movl %eax, pa(max_pfn_mapped)
24528
24529 /* Do early initialization of the fixmap area */
24530- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24531- movl %eax,pa(initial_page_table+0xffc)
24532+#ifdef CONFIG_COMPAT_VDSO
24533+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24534+#else
24535+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24536+#endif
24537 #endif
24538
24539 #ifdef CONFIG_PARAVIRT
24540@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24541 cmpl $num_subarch_entries, %eax
24542 jae bad_subarch
24543
24544- movl pa(subarch_entries)(,%eax,4), %eax
24545- subl $__PAGE_OFFSET, %eax
24546- jmp *%eax
24547+ jmp *pa(subarch_entries)(,%eax,4)
24548
24549 bad_subarch:
24550 WEAK(lguest_entry)
24551@@ -261,10 +333,10 @@ WEAK(xen_entry)
24552 __INITDATA
24553
24554 subarch_entries:
24555- .long default_entry /* normal x86/PC */
24556- .long lguest_entry /* lguest hypervisor */
24557- .long xen_entry /* Xen hypervisor */
24558- .long default_entry /* Moorestown MID */
24559+ .long ta(default_entry) /* normal x86/PC */
24560+ .long ta(lguest_entry) /* lguest hypervisor */
24561+ .long ta(xen_entry) /* Xen hypervisor */
24562+ .long ta(default_entry) /* Moorestown MID */
24563 num_subarch_entries = (. - subarch_entries) / 4
24564 .previous
24565 #else
24566@@ -354,6 +426,7 @@ default_entry:
24567 movl pa(mmu_cr4_features),%eax
24568 movl %eax,%cr4
24569
24570+#ifdef CONFIG_X86_PAE
24571 testb $X86_CR4_PAE, %al # check if PAE is enabled
24572 jz enable_paging
24573
24574@@ -382,6 +455,9 @@ default_entry:
24575 /* Make changes effective */
24576 wrmsr
24577
24578+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24579+#endif
24580+
24581 enable_paging:
24582
24583 /*
24584@@ -449,14 +525,20 @@ is486:
24585 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24586 movl %eax,%ss # after changing gdt.
24587
24588- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24589+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24590 movl %eax,%ds
24591 movl %eax,%es
24592
24593 movl $(__KERNEL_PERCPU), %eax
24594 movl %eax,%fs # set this cpu's percpu
24595
24596+#ifdef CONFIG_CC_STACKPROTECTOR
24597 movl $(__KERNEL_STACK_CANARY),%eax
24598+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24599+ movl $(__USER_DS),%eax
24600+#else
24601+ xorl %eax,%eax
24602+#endif
24603 movl %eax,%gs
24604
24605 xorl %eax,%eax # Clear LDT
24606@@ -512,8 +594,11 @@ setup_once:
24607 * relocation. Manually set base address in stack canary
24608 * segment descriptor.
24609 */
24610- movl $gdt_page,%eax
24611+ movl $cpu_gdt_table,%eax
24612 movl $stack_canary,%ecx
24613+#ifdef CONFIG_SMP
24614+ addl $__per_cpu_load,%ecx
24615+#endif
24616 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24617 shrl $16, %ecx
24618 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24619@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24620 cmpl $2,(%esp) # X86_TRAP_NMI
24621 je is_nmi # Ignore NMI
24622
24623- cmpl $2,%ss:early_recursion_flag
24624+ cmpl $1,%ss:early_recursion_flag
24625 je hlt_loop
24626 incl %ss:early_recursion_flag
24627
24628@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24629 pushl (20+6*4)(%esp) /* trapno */
24630 pushl $fault_msg
24631 call printk
24632-#endif
24633 call dump_stack
24634+#endif
24635 hlt_loop:
24636 hlt
24637 jmp hlt_loop
24638@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24639 /* This is the default interrupt "handler" :-) */
24640 ALIGN
24641 ignore_int:
24642- cld
24643 #ifdef CONFIG_PRINTK
24644+ cmpl $2,%ss:early_recursion_flag
24645+ je hlt_loop
24646+ incl %ss:early_recursion_flag
24647+ cld
24648 pushl %eax
24649 pushl %ecx
24650 pushl %edx
24651@@ -617,9 +705,6 @@ ignore_int:
24652 movl $(__KERNEL_DS),%eax
24653 movl %eax,%ds
24654 movl %eax,%es
24655- cmpl $2,early_recursion_flag
24656- je hlt_loop
24657- incl early_recursion_flag
24658 pushl 16(%esp)
24659 pushl 24(%esp)
24660 pushl 32(%esp)
24661@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24662 /*
24663 * BSS section
24664 */
24665-__PAGE_ALIGNED_BSS
24666- .align PAGE_SIZE
24667 #ifdef CONFIG_X86_PAE
24668+.section .initial_pg_pmd,"a",@progbits
24669 initial_pg_pmd:
24670 .fill 1024*KPMDS,4,0
24671 #else
24672+.section .initial_page_table,"a",@progbits
24673 ENTRY(initial_page_table)
24674 .fill 1024,4,0
24675 #endif
24676+.section .initial_pg_fixmap,"a",@progbits
24677 initial_pg_fixmap:
24678 .fill 1024,4,0
24679+.section .empty_zero_page,"a",@progbits
24680 ENTRY(empty_zero_page)
24681 .fill 4096,1,0
24682+.section .swapper_pg_dir,"a",@progbits
24683 ENTRY(swapper_pg_dir)
24684+#ifdef CONFIG_X86_PAE
24685+ .fill 4,8,0
24686+#else
24687 .fill 1024,4,0
24688+#endif
24689
24690 /*
24691 * This starts the data section.
24692 */
24693 #ifdef CONFIG_X86_PAE
24694-__PAGE_ALIGNED_DATA
24695- /* Page-aligned for the benefit of paravirt? */
24696- .align PAGE_SIZE
24697+.section .initial_page_table,"a",@progbits
24698 ENTRY(initial_page_table)
24699 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24700 # if KPMDS == 3
24701@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24702 # error "Kernel PMDs should be 1, 2 or 3"
24703 # endif
24704 .align PAGE_SIZE /* needs to be page-sized too */
24705+
24706+#ifdef CONFIG_PAX_PER_CPU_PGD
24707+ENTRY(cpu_pgd)
24708+ .rept 2*NR_CPUS
24709+ .fill 4,8,0
24710+ .endr
24711+#endif
24712+
24713 #endif
24714
24715 .data
24716 .balign 4
24717 ENTRY(stack_start)
24718- .long init_thread_union+THREAD_SIZE
24719+ .long init_thread_union+THREAD_SIZE-8
24720
24721 __INITRODATA
24722 int_msg:
24723@@ -727,7 +825,7 @@ fault_msg:
24724 * segment size, and 32-bit linear address value:
24725 */
24726
24727- .data
24728+.section .rodata,"a",@progbits
24729 .globl boot_gdt_descr
24730 .globl idt_descr
24731
24732@@ -736,7 +834,7 @@ fault_msg:
24733 .word 0 # 32 bit align gdt_desc.address
24734 boot_gdt_descr:
24735 .word __BOOT_DS+7
24736- .long boot_gdt - __PAGE_OFFSET
24737+ .long pa(boot_gdt)
24738
24739 .word 0 # 32-bit align idt_desc.address
24740 idt_descr:
24741@@ -747,7 +845,7 @@ idt_descr:
24742 .word 0 # 32 bit align gdt_desc.address
24743 ENTRY(early_gdt_descr)
24744 .word GDT_ENTRIES*8-1
24745- .long gdt_page /* Overwritten for secondary CPUs */
24746+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24747
24748 /*
24749 * The boot_gdt must mirror the equivalent in setup.S and is
24750@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24751 .align L1_CACHE_BYTES
24752 ENTRY(boot_gdt)
24753 .fill GDT_ENTRY_BOOT_CS,8,0
24754- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24755- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24756+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24757+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24758+
24759+ .align PAGE_SIZE_asm
24760+ENTRY(cpu_gdt_table)
24761+ .rept NR_CPUS
24762+ .quad 0x0000000000000000 /* NULL descriptor */
24763+ .quad 0x0000000000000000 /* 0x0b reserved */
24764+ .quad 0x0000000000000000 /* 0x13 reserved */
24765+ .quad 0x0000000000000000 /* 0x1b reserved */
24766+
24767+#ifdef CONFIG_PAX_KERNEXEC
24768+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24769+#else
24770+ .quad 0x0000000000000000 /* 0x20 unused */
24771+#endif
24772+
24773+ .quad 0x0000000000000000 /* 0x28 unused */
24774+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24775+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24776+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24777+ .quad 0x0000000000000000 /* 0x4b reserved */
24778+ .quad 0x0000000000000000 /* 0x53 reserved */
24779+ .quad 0x0000000000000000 /* 0x5b reserved */
24780+
24781+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24782+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24783+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24784+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24785+
24786+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24787+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24788+
24789+ /*
24790+ * Segments used for calling PnP BIOS have byte granularity.
24791+ * The code segments and data segments have fixed 64k limits,
24792+ * the transfer segment sizes are set at run time.
24793+ */
24794+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24795+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24796+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24797+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24798+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24799+
24800+ /*
24801+ * The APM segments have byte granularity and their bases
24802+ * are set at run time. All have 64k limits.
24803+ */
24804+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24805+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24806+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24807+
24808+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24809+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24810+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24811+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24812+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24813+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24814+
24815+ /* Be sure this is zeroed to avoid false validations in Xen */
24816+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24817+ .endr
24818diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24819index a468c0a..8b5a879 100644
24820--- a/arch/x86/kernel/head_64.S
24821+++ b/arch/x86/kernel/head_64.S
24822@@ -20,6 +20,8 @@
24823 #include <asm/processor-flags.h>
24824 #include <asm/percpu.h>
24825 #include <asm/nops.h>
24826+#include <asm/cpufeature.h>
24827+#include <asm/alternative-asm.h>
24828
24829 #ifdef CONFIG_PARAVIRT
24830 #include <asm/asm-offsets.h>
24831@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24832 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24833 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24834 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24835+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24836+L3_VMALLOC_START = pud_index(VMALLOC_START)
24837+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24838+L3_VMALLOC_END = pud_index(VMALLOC_END)
24839+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24840+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24841
24842 .text
24843 __HEAD
24844@@ -89,11 +97,24 @@ startup_64:
24845 * Fixup the physical addresses in the page table
24846 */
24847 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24848+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24849+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24850+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24851+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24852+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24853
24854- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24855- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24856+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24857+#ifndef CONFIG_XEN
24858+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24859+#endif
24860+
24861+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24862+
24863+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24864+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24865
24866 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24867+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24868
24869 /*
24870 * Set up the identity mapping for the switchover. These
24871@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
24872 * after the boot processor executes this code.
24873 */
24874
24875+ orq $-1, %rbp
24876 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24877 1:
24878
24879- /* Enable PAE mode and PGE */
24880- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24881+ /* Enable PAE mode and PSE/PGE */
24882+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24883 movq %rcx, %cr4
24884
24885 /* Setup early boot stage 4 level pagetables. */
24886@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
24887 movl $MSR_EFER, %ecx
24888 rdmsr
24889 btsl $_EFER_SCE, %eax /* Enable System Call */
24890- btl $20,%edi /* No Execute supported? */
24891+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24892 jnc 1f
24893 btsl $_EFER_NX, %eax
24894+ cmpq $-1, %rbp
24895+ je 1f
24896 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24897+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24898+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24899+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24900+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24901+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24902+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24903+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24904 1: wrmsr /* Make changes effective */
24905
24906 /* Setup cr0 */
24907@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
24908 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24909 * address given in m16:64.
24910 */
24911+ pax_set_fptr_mask
24912 movq initial_code(%rip),%rax
24913 pushq $0 # fake return address to stop unwinder
24914 pushq $__KERNEL_CS # set correct cs
24915@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
24916 .quad INIT_PER_CPU_VAR(irq_stack_union)
24917
24918 GLOBAL(stack_start)
24919- .quad init_thread_union+THREAD_SIZE-8
24920+ .quad init_thread_union+THREAD_SIZE-16
24921 .word 0
24922 __FINITDATA
24923
24924@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
24925 call dump_stack
24926 #ifdef CONFIG_KALLSYMS
24927 leaq early_idt_ripmsg(%rip),%rdi
24928- movq 40(%rsp),%rsi # %rip again
24929+ movq 88(%rsp),%rsi # %rip again
24930 call __print_symbol
24931 #endif
24932 #endif /* EARLY_PRINTK */
24933@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
24934 early_recursion_flag:
24935 .long 0
24936
24937+ .section .rodata,"a",@progbits
24938 #ifdef CONFIG_EARLY_PRINTK
24939 early_idt_msg:
24940 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24941@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
24942 NEXT_PAGE(early_dynamic_pgts)
24943 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24944
24945- .data
24946+ .section .rodata,"a",@progbits
24947
24948-#ifndef CONFIG_XEN
24949 NEXT_PAGE(init_level4_pgt)
24950- .fill 512,8,0
24951-#else
24952-NEXT_PAGE(init_level4_pgt)
24953- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24954 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24955 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24956+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24957+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24958+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24959+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24960+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24961+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24962 .org init_level4_pgt + L4_START_KERNEL*8, 0
24963 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24964 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24965
24966+#ifdef CONFIG_PAX_PER_CPU_PGD
24967+NEXT_PAGE(cpu_pgd)
24968+ .rept 2*NR_CPUS
24969+ .fill 512,8,0
24970+ .endr
24971+#endif
24972+
24973 NEXT_PAGE(level3_ident_pgt)
24974 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24975+#ifdef CONFIG_XEN
24976 .fill 511, 8, 0
24977+#else
24978+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24979+ .fill 510,8,0
24980+#endif
24981+
24982+NEXT_PAGE(level3_vmalloc_start_pgt)
24983+ .fill 512,8,0
24984+
24985+NEXT_PAGE(level3_vmalloc_end_pgt)
24986+ .fill 512,8,0
24987+
24988+NEXT_PAGE(level3_vmemmap_pgt)
24989+ .fill L3_VMEMMAP_START,8,0
24990+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24991+
24992 NEXT_PAGE(level2_ident_pgt)
24993- /* Since I easily can, map the first 1G.
24994+ /* Since I easily can, map the first 2G.
24995 * Don't set NX because code runs from these pages.
24996 */
24997- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24998-#endif
24999+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25000
25001 NEXT_PAGE(level3_kernel_pgt)
25002 .fill L3_START_KERNEL,8,0
25003@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25004 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25005 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25006
25007+NEXT_PAGE(level2_vmemmap_pgt)
25008+ .fill 512,8,0
25009+
25010 NEXT_PAGE(level2_kernel_pgt)
25011 /*
25012 * 512 MB kernel mapping. We spend a full page on this pagetable
25013@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25014 NEXT_PAGE(level2_fixmap_pgt)
25015 .fill 506,8,0
25016 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25017- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25018- .fill 5,8,0
25019+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25020+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25021+ .fill 4,8,0
25022
25023 NEXT_PAGE(level1_fixmap_pgt)
25024 .fill 512,8,0
25025
25026+NEXT_PAGE(level1_vsyscall_pgt)
25027+ .fill 512,8,0
25028+
25029 #undef PMDS
25030
25031- .data
25032+ .align PAGE_SIZE
25033+ENTRY(cpu_gdt_table)
25034+ .rept NR_CPUS
25035+ .quad 0x0000000000000000 /* NULL descriptor */
25036+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25037+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25038+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25039+ .quad 0x00cffb000000ffff /* __USER32_CS */
25040+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25041+ .quad 0x00affb000000ffff /* __USER_CS */
25042+
25043+#ifdef CONFIG_PAX_KERNEXEC
25044+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25045+#else
25046+ .quad 0x0 /* unused */
25047+#endif
25048+
25049+ .quad 0,0 /* TSS */
25050+ .quad 0,0 /* LDT */
25051+ .quad 0,0,0 /* three TLS descriptors */
25052+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25053+ /* asm/segment.h:GDT_ENTRIES must match this */
25054+
25055+#ifdef CONFIG_PAX_MEMORY_UDEREF
25056+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25057+#else
25058+ .quad 0x0 /* unused */
25059+#endif
25060+
25061+ /* zero the remaining page */
25062+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25063+ .endr
25064+
25065 .align 16
25066 .globl early_gdt_descr
25067 early_gdt_descr:
25068 .word GDT_ENTRIES*8-1
25069 early_gdt_descr_base:
25070- .quad INIT_PER_CPU_VAR(gdt_page)
25071+ .quad cpu_gdt_table
25072
25073 ENTRY(phys_base)
25074 /* This must match the first entry in level2_kernel_pgt */
25075 .quad 0x0000000000000000
25076
25077 #include "../../x86/xen/xen-head.S"
25078-
25079- __PAGE_ALIGNED_BSS
25080+
25081+ .section .rodata,"a",@progbits
25082 NEXT_PAGE(empty_zero_page)
25083 .skip PAGE_SIZE
25084diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25085index 05fd74f..c3548b1 100644
25086--- a/arch/x86/kernel/i386_ksyms_32.c
25087+++ b/arch/x86/kernel/i386_ksyms_32.c
25088@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25089 EXPORT_SYMBOL(cmpxchg8b_emu);
25090 #endif
25091
25092+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25093+
25094 /* Networking helper routines. */
25095 EXPORT_SYMBOL(csum_partial_copy_generic);
25096+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25097+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25098
25099 EXPORT_SYMBOL(__get_user_1);
25100 EXPORT_SYMBOL(__get_user_2);
25101@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25102 EXPORT_SYMBOL(___preempt_schedule_context);
25103 #endif
25104 #endif
25105+
25106+#ifdef CONFIG_PAX_KERNEXEC
25107+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25108+#endif
25109+
25110+#ifdef CONFIG_PAX_PER_CPU_PGD
25111+EXPORT_SYMBOL(cpu_pgd);
25112+#endif
25113diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25114index a9a4229..6f4d476 100644
25115--- a/arch/x86/kernel/i387.c
25116+++ b/arch/x86/kernel/i387.c
25117@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25118 static inline bool interrupted_user_mode(void)
25119 {
25120 struct pt_regs *regs = get_irq_regs();
25121- return regs && user_mode_vm(regs);
25122+ return regs && user_mode(regs);
25123 }
25124
25125 /*
25126diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25127index e7cc537..67d7372 100644
25128--- a/arch/x86/kernel/i8259.c
25129+++ b/arch/x86/kernel/i8259.c
25130@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25131 static void make_8259A_irq(unsigned int irq)
25132 {
25133 disable_irq_nosync(irq);
25134- io_apic_irqs &= ~(1<<irq);
25135+ io_apic_irqs &= ~(1UL<<irq);
25136 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25137 enable_irq(irq);
25138 }
25139@@ -208,7 +208,7 @@ spurious_8259A_irq:
25140 "spurious 8259A interrupt: IRQ%d.\n", irq);
25141 spurious_irq_mask |= irqmask;
25142 }
25143- atomic_inc(&irq_err_count);
25144+ atomic_inc_unchecked(&irq_err_count);
25145 /*
25146 * Theoretically we do not have to handle this IRQ,
25147 * but in Linux this does not cause problems and is
25148@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25149 /* (slave's support for AEOI in flat mode is to be investigated) */
25150 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25151
25152+ pax_open_kernel();
25153 if (auto_eoi)
25154 /*
25155 * In AEOI mode we just have to mask the interrupt
25156 * when acking.
25157 */
25158- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25159+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25160 else
25161- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25162+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25163+ pax_close_kernel();
25164
25165 udelay(100); /* wait for 8259A to initialize */
25166
25167diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25168index a979b5b..1d6db75 100644
25169--- a/arch/x86/kernel/io_delay.c
25170+++ b/arch/x86/kernel/io_delay.c
25171@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25172 * Quirk table for systems that misbehave (lock up, etc.) if port
25173 * 0x80 is used:
25174 */
25175-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25176+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25177 {
25178 .callback = dmi_io_delay_0xed_port,
25179 .ident = "Compaq Presario V6000",
25180diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25181index 4ddaf66..49d5c18 100644
25182--- a/arch/x86/kernel/ioport.c
25183+++ b/arch/x86/kernel/ioport.c
25184@@ -6,6 +6,7 @@
25185 #include <linux/sched.h>
25186 #include <linux/kernel.h>
25187 #include <linux/capability.h>
25188+#include <linux/security.h>
25189 #include <linux/errno.h>
25190 #include <linux/types.h>
25191 #include <linux/ioport.h>
25192@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25193 return -EINVAL;
25194 if (turn_on && !capable(CAP_SYS_RAWIO))
25195 return -EPERM;
25196+#ifdef CONFIG_GRKERNSEC_IO
25197+ if (turn_on && grsec_disable_privio) {
25198+ gr_handle_ioperm();
25199+ return -ENODEV;
25200+ }
25201+#endif
25202
25203 /*
25204 * If it's the first ioperm() call in this thread's lifetime, set the
25205@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25206 * because the ->io_bitmap_max value must match the bitmap
25207 * contents:
25208 */
25209- tss = &per_cpu(init_tss, get_cpu());
25210+ tss = init_tss + get_cpu();
25211
25212 if (turn_on)
25213 bitmap_clear(t->io_bitmap_ptr, from, num);
25214@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25215 if (level > old) {
25216 if (!capable(CAP_SYS_RAWIO))
25217 return -EPERM;
25218+#ifdef CONFIG_GRKERNSEC_IO
25219+ if (grsec_disable_privio) {
25220+ gr_handle_iopl();
25221+ return -ENODEV;
25222+ }
25223+#endif
25224 }
25225 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25226 t->iopl = level << 12;
25227diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25228index 705ef8d..8672c9d 100644
25229--- a/arch/x86/kernel/irq.c
25230+++ b/arch/x86/kernel/irq.c
25231@@ -22,7 +22,7 @@
25232 #define CREATE_TRACE_POINTS
25233 #include <asm/trace/irq_vectors.h>
25234
25235-atomic_t irq_err_count;
25236+atomic_unchecked_t irq_err_count;
25237
25238 /* Function pointer for generic interrupt vector handling */
25239 void (*x86_platform_ipi_callback)(void) = NULL;
25240@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25241 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25242 seq_puts(p, " Hypervisor callback interrupts\n");
25243 #endif
25244- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25245+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25246 #if defined(CONFIG_X86_IO_APIC)
25247- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25248+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25249 #endif
25250 return 0;
25251 }
25252@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25253
25254 u64 arch_irq_stat(void)
25255 {
25256- u64 sum = atomic_read(&irq_err_count);
25257+ u64 sum = atomic_read_unchecked(&irq_err_count);
25258 return sum;
25259 }
25260
25261diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25262index 63ce838..2ea3e06 100644
25263--- a/arch/x86/kernel/irq_32.c
25264+++ b/arch/x86/kernel/irq_32.c
25265@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25266
25267 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25268
25269+extern void gr_handle_kernel_exploit(void);
25270+
25271 int sysctl_panic_on_stackoverflow __read_mostly;
25272
25273 /* Debugging check for stack overflow: is there less than 1KB free? */
25274@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25275 __asm__ __volatile__("andl %%esp,%0" :
25276 "=r" (sp) : "0" (THREAD_SIZE - 1));
25277
25278- return sp < (sizeof(struct thread_info) + STACK_WARN);
25279+ return sp < STACK_WARN;
25280 }
25281
25282 static void print_stack_overflow(void)
25283 {
25284 printk(KERN_WARNING "low stack detected by irq handler\n");
25285 dump_stack();
25286+ gr_handle_kernel_exploit();
25287 if (sysctl_panic_on_stackoverflow)
25288 panic("low stack detected by irq handler - check messages\n");
25289 }
25290@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25291 static inline int
25292 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25293 {
25294- struct irq_stack *curstk, *irqstk;
25295+ struct irq_stack *irqstk;
25296 u32 *isp, *prev_esp, arg1, arg2;
25297
25298- curstk = (struct irq_stack *) current_stack();
25299 irqstk = __this_cpu_read(hardirq_stack);
25300
25301 /*
25302@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25303 * handler) we can't do that and just have to keep using the
25304 * current stack (which is the irq stack already after all)
25305 */
25306- if (unlikely(curstk == irqstk))
25307+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25308 return 0;
25309
25310- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25311+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25312
25313 /* Save the next esp at the bottom of the stack */
25314 prev_esp = (u32 *)irqstk;
25315 *prev_esp = current_stack_pointer;
25316
25317+#ifdef CONFIG_PAX_MEMORY_UDEREF
25318+ __set_fs(MAKE_MM_SEG(0));
25319+#endif
25320+
25321 if (unlikely(overflow))
25322 call_on_stack(print_stack_overflow, isp);
25323
25324@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25325 : "0" (irq), "1" (desc), "2" (isp),
25326 "D" (desc->handle_irq)
25327 : "memory", "cc", "ecx");
25328+
25329+#ifdef CONFIG_PAX_MEMORY_UDEREF
25330+ __set_fs(current_thread_info()->addr_limit);
25331+#endif
25332+
25333 return 1;
25334 }
25335
25336@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25337 */
25338 void irq_ctx_init(int cpu)
25339 {
25340- struct irq_stack *irqstk;
25341-
25342 if (per_cpu(hardirq_stack, cpu))
25343 return;
25344
25345- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25346- THREADINFO_GFP,
25347- THREAD_SIZE_ORDER));
25348- per_cpu(hardirq_stack, cpu) = irqstk;
25349-
25350- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25351- THREADINFO_GFP,
25352- THREAD_SIZE_ORDER));
25353- per_cpu(softirq_stack, cpu) = irqstk;
25354-
25355- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25356- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25357+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25358+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25359 }
25360
25361 void do_softirq_own_stack(void)
25362 {
25363- struct thread_info *curstk;
25364 struct irq_stack *irqstk;
25365 u32 *isp, *prev_esp;
25366
25367- curstk = current_stack();
25368 irqstk = __this_cpu_read(softirq_stack);
25369
25370 /* build the stack frame on the softirq stack */
25371@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25372 prev_esp = (u32 *)irqstk;
25373 *prev_esp = current_stack_pointer;
25374
25375+#ifdef CONFIG_PAX_MEMORY_UDEREF
25376+ __set_fs(MAKE_MM_SEG(0));
25377+#endif
25378+
25379 call_on_stack(__do_softirq, isp);
25380+
25381+#ifdef CONFIG_PAX_MEMORY_UDEREF
25382+ __set_fs(current_thread_info()->addr_limit);
25383+#endif
25384+
25385 }
25386
25387 bool handle_irq(unsigned irq, struct pt_regs *regs)
25388@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25389 if (unlikely(!desc))
25390 return false;
25391
25392- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25393+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25394 if (unlikely(overflow))
25395 print_stack_overflow();
25396 desc->handle_irq(irq, desc);
25397diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25398index e4b503d..824fce8 100644
25399--- a/arch/x86/kernel/irq_64.c
25400+++ b/arch/x86/kernel/irq_64.c
25401@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25402 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25403 EXPORT_PER_CPU_SYMBOL(irq_regs);
25404
25405+extern void gr_handle_kernel_exploit(void);
25406+
25407 int sysctl_panic_on_stackoverflow;
25408
25409 /*
25410@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25411 u64 estack_top, estack_bottom;
25412 u64 curbase = (u64)task_stack_page(current);
25413
25414- if (user_mode_vm(regs))
25415+ if (user_mode(regs))
25416 return;
25417
25418 if (regs->sp >= curbase + sizeof(struct thread_info) +
25419@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25420 irq_stack_top, irq_stack_bottom,
25421 estack_top, estack_bottom);
25422
25423+ gr_handle_kernel_exploit();
25424+
25425 if (sysctl_panic_on_stackoverflow)
25426 panic("low stack detected by irq handler - check messages\n");
25427 #endif
25428diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25429index 26d5a55..a01160a 100644
25430--- a/arch/x86/kernel/jump_label.c
25431+++ b/arch/x86/kernel/jump_label.c
25432@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25433 * Jump label is enabled for the first time.
25434 * So we expect a default_nop...
25435 */
25436- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25437+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25438 != 0))
25439 bug_at((void *)entry->code, __LINE__);
25440 } else {
25441@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25442 * ...otherwise expect an ideal_nop. Otherwise
25443 * something went horribly wrong.
25444 */
25445- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25446+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25447 != 0))
25448 bug_at((void *)entry->code, __LINE__);
25449 }
25450@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25451 * are converting the default nop to the ideal nop.
25452 */
25453 if (init) {
25454- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25455+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25456 bug_at((void *)entry->code, __LINE__);
25457 } else {
25458 code.jump = 0xe9;
25459 code.offset = entry->target -
25460 (entry->code + JUMP_LABEL_NOP_SIZE);
25461- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25462+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25463 bug_at((void *)entry->code, __LINE__);
25464 }
25465 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25466diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25467index 7ec1d5f..5a7d130 100644
25468--- a/arch/x86/kernel/kgdb.c
25469+++ b/arch/x86/kernel/kgdb.c
25470@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25471 #ifdef CONFIG_X86_32
25472 switch (regno) {
25473 case GDB_SS:
25474- if (!user_mode_vm(regs))
25475+ if (!user_mode(regs))
25476 *(unsigned long *)mem = __KERNEL_DS;
25477 break;
25478 case GDB_SP:
25479- if (!user_mode_vm(regs))
25480+ if (!user_mode(regs))
25481 *(unsigned long *)mem = kernel_stack_pointer(regs);
25482 break;
25483 case GDB_GS:
25484@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25485 bp->attr.bp_addr = breakinfo[breakno].addr;
25486 bp->attr.bp_len = breakinfo[breakno].len;
25487 bp->attr.bp_type = breakinfo[breakno].type;
25488- info->address = breakinfo[breakno].addr;
25489+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25490+ info->address = ktla_ktva(breakinfo[breakno].addr);
25491+ else
25492+ info->address = breakinfo[breakno].addr;
25493 info->len = breakinfo[breakno].len;
25494 info->type = breakinfo[breakno].type;
25495 val = arch_install_hw_breakpoint(bp);
25496@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25497 case 'k':
25498 /* clear the trace bit */
25499 linux_regs->flags &= ~X86_EFLAGS_TF;
25500- atomic_set(&kgdb_cpu_doing_single_step, -1);
25501+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25502
25503 /* set the trace bit if we're stepping */
25504 if (remcomInBuffer[0] == 's') {
25505 linux_regs->flags |= X86_EFLAGS_TF;
25506- atomic_set(&kgdb_cpu_doing_single_step,
25507+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25508 raw_smp_processor_id());
25509 }
25510
25511@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25512
25513 switch (cmd) {
25514 case DIE_DEBUG:
25515- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25516+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25517 if (user_mode(regs))
25518 return single_step_cont(regs, args);
25519 break;
25520@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25521 #endif /* CONFIG_DEBUG_RODATA */
25522
25523 bpt->type = BP_BREAKPOINT;
25524- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25525+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25526 BREAK_INSTR_SIZE);
25527 if (err)
25528 return err;
25529- err = probe_kernel_write((char *)bpt->bpt_addr,
25530+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25531 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25532 #ifdef CONFIG_DEBUG_RODATA
25533 if (!err)
25534@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25535 return -EBUSY;
25536 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25537 BREAK_INSTR_SIZE);
25538- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25539+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25540 if (err)
25541 return err;
25542 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25543@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25544 if (mutex_is_locked(&text_mutex))
25545 goto knl_write;
25546 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25547- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25548+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25549 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25550 goto knl_write;
25551 return err;
25552 knl_write:
25553 #endif /* CONFIG_DEBUG_RODATA */
25554- return probe_kernel_write((char *)bpt->bpt_addr,
25555+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25556 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25557 }
25558
25559diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25560index 98f654d..ac04352 100644
25561--- a/arch/x86/kernel/kprobes/core.c
25562+++ b/arch/x86/kernel/kprobes/core.c
25563@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25564 s32 raddr;
25565 } __packed *insn;
25566
25567- insn = (struct __arch_relative_insn *)from;
25568+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25569+
25570+ pax_open_kernel();
25571 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25572 insn->op = op;
25573+ pax_close_kernel();
25574 }
25575
25576 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25577@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25578 kprobe_opcode_t opcode;
25579 kprobe_opcode_t *orig_opcodes = opcodes;
25580
25581- if (search_exception_tables((unsigned long)opcodes))
25582+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25583 return 0; /* Page fault may occur on this address. */
25584
25585 retry:
25586@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25587 * for the first byte, we can recover the original instruction
25588 * from it and kp->opcode.
25589 */
25590- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25591+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25592 buf[0] = kp->opcode;
25593- return (unsigned long)buf;
25594+ return ktva_ktla((unsigned long)buf);
25595 }
25596
25597 /*
25598@@ -338,7 +341,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25599 /* Another subsystem puts a breakpoint, failed to recover */
25600 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25601 return 0;
25602+ pax_open_kernel();
25603 memcpy(dest, insn.kaddr, insn.length);
25604+ pax_close_kernel();
25605
25606 #ifdef CONFIG_X86_64
25607 if (insn_rip_relative(&insn)) {
25608@@ -365,7 +370,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25609 return 0;
25610 }
25611 disp = (u8 *) dest + insn_offset_displacement(&insn);
25612+ pax_open_kernel();
25613 *(s32 *) disp = (s32) newdisp;
25614+ pax_close_kernel();
25615 }
25616 #endif
25617 return insn.length;
25618@@ -507,7 +514,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25619 * nor set current_kprobe, because it doesn't use single
25620 * stepping.
25621 */
25622- regs->ip = (unsigned long)p->ainsn.insn;
25623+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25624 preempt_enable_no_resched();
25625 return;
25626 }
25627@@ -524,9 +531,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25628 regs->flags &= ~X86_EFLAGS_IF;
25629 /* single step inline if the instruction is an int3 */
25630 if (p->opcode == BREAKPOINT_INSTRUCTION)
25631- regs->ip = (unsigned long)p->addr;
25632+ regs->ip = ktla_ktva((unsigned long)p->addr);
25633 else
25634- regs->ip = (unsigned long)p->ainsn.insn;
25635+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25636 }
25637 NOKPROBE_SYMBOL(setup_singlestep);
25638
25639@@ -576,7 +583,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25640 struct kprobe *p;
25641 struct kprobe_ctlblk *kcb;
25642
25643- if (user_mode_vm(regs))
25644+ if (user_mode(regs))
25645 return 0;
25646
25647 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25648@@ -611,7 +618,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25649 setup_singlestep(p, regs, kcb, 0);
25650 return 1;
25651 }
25652- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25653+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25654 /*
25655 * The breakpoint instruction was removed right
25656 * after we hit it. Another cpu has removed
25657@@ -658,6 +665,9 @@ static void __used kretprobe_trampoline_holder(void)
25658 " movq %rax, 152(%rsp)\n"
25659 RESTORE_REGS_STRING
25660 " popfq\n"
25661+#ifdef KERNEXEC_PLUGIN
25662+ " btsq $63,(%rsp)\n"
25663+#endif
25664 #else
25665 " pushf\n"
25666 SAVE_REGS_STRING
25667@@ -798,7 +808,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25668 struct kprobe_ctlblk *kcb)
25669 {
25670 unsigned long *tos = stack_addr(regs);
25671- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25672+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25673 unsigned long orig_ip = (unsigned long)p->addr;
25674 kprobe_opcode_t *insn = p->ainsn.insn;
25675
25676@@ -981,7 +991,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25677 struct die_args *args = data;
25678 int ret = NOTIFY_DONE;
25679
25680- if (args->regs && user_mode_vm(args->regs))
25681+ if (args->regs && user_mode(args->regs))
25682 return ret;
25683
25684 if (val == DIE_GPF) {
25685diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25686index 7c523bb..01b051b 100644
25687--- a/arch/x86/kernel/kprobes/opt.c
25688+++ b/arch/x86/kernel/kprobes/opt.c
25689@@ -79,6 +79,7 @@ found:
25690 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25691 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25692 {
25693+ pax_open_kernel();
25694 #ifdef CONFIG_X86_64
25695 *addr++ = 0x48;
25696 *addr++ = 0xbf;
25697@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25698 *addr++ = 0xb8;
25699 #endif
25700 *(unsigned long *)addr = val;
25701+ pax_close_kernel();
25702 }
25703
25704 asm (
25705@@ -339,7 +341,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25706 * Verify if the address gap is in 2GB range, because this uses
25707 * a relative jump.
25708 */
25709- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25710+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25711 if (abs(rel) > 0x7fffffff) {
25712 __arch_remove_optimized_kprobe(op, 0);
25713 return -ERANGE;
25714@@ -356,16 +358,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25715 op->optinsn.size = ret;
25716
25717 /* Copy arch-dep-instance from template */
25718- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25719+ pax_open_kernel();
25720+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25721+ pax_close_kernel();
25722
25723 /* Set probe information */
25724 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25725
25726 /* Set probe function call */
25727- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25728+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25729
25730 /* Set returning jmp instruction at the tail of out-of-line buffer */
25731- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25732+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25733 (u8 *)op->kp.addr + op->optinsn.size);
25734
25735 flush_icache_range((unsigned long) buf,
25736@@ -390,7 +394,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25737 WARN_ON(kprobe_disabled(&op->kp));
25738
25739 /* Backup instructions which will be replaced by jump address */
25740- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25741+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25742 RELATIVE_ADDR_SIZE);
25743
25744 insn_buf[0] = RELATIVEJUMP_OPCODE;
25745@@ -438,7 +442,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25746 /* This kprobe is really able to run optimized path. */
25747 op = container_of(p, struct optimized_kprobe, kp);
25748 /* Detour through copied instructions */
25749- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25750+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25751 if (!reenter)
25752 reset_current_kprobe();
25753 preempt_enable_no_resched();
25754diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25755index c2bedae..25e7ab60 100644
25756--- a/arch/x86/kernel/ksysfs.c
25757+++ b/arch/x86/kernel/ksysfs.c
25758@@ -184,7 +184,7 @@ out:
25759
25760 static struct kobj_attribute type_attr = __ATTR_RO(type);
25761
25762-static struct bin_attribute data_attr = {
25763+static bin_attribute_no_const data_attr __read_only = {
25764 .attr = {
25765 .name = "data",
25766 .mode = S_IRUGO,
25767diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25768index c37886d..d851d32 100644
25769--- a/arch/x86/kernel/ldt.c
25770+++ b/arch/x86/kernel/ldt.c
25771@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25772 if (reload) {
25773 #ifdef CONFIG_SMP
25774 preempt_disable();
25775- load_LDT(pc);
25776+ load_LDT_nolock(pc);
25777 if (!cpumask_equal(mm_cpumask(current->mm),
25778 cpumask_of(smp_processor_id())))
25779 smp_call_function(flush_ldt, current->mm, 1);
25780 preempt_enable();
25781 #else
25782- load_LDT(pc);
25783+ load_LDT_nolock(pc);
25784 #endif
25785 }
25786 if (oldsize) {
25787@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25788 return err;
25789
25790 for (i = 0; i < old->size; i++)
25791- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25792+ write_ldt_entry(new->ldt, i, old->ldt + i);
25793 return 0;
25794 }
25795
25796@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25797 retval = copy_ldt(&mm->context, &old_mm->context);
25798 mutex_unlock(&old_mm->context.lock);
25799 }
25800+
25801+ if (tsk == current) {
25802+ mm->context.vdso = 0;
25803+
25804+#ifdef CONFIG_X86_32
25805+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25806+ mm->context.user_cs_base = 0UL;
25807+ mm->context.user_cs_limit = ~0UL;
25808+
25809+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25810+ cpus_clear(mm->context.cpu_user_cs_mask);
25811+#endif
25812+
25813+#endif
25814+#endif
25815+
25816+ }
25817+
25818 return retval;
25819 }
25820
25821@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25822 }
25823 }
25824
25825+#ifdef CONFIG_PAX_SEGMEXEC
25826+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25827+ error = -EINVAL;
25828+ goto out_unlock;
25829+ }
25830+#endif
25831+
25832 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25833 error = -EINVAL;
25834 goto out_unlock;
25835diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25836index 469b23d..5449cfe 100644
25837--- a/arch/x86/kernel/machine_kexec_32.c
25838+++ b/arch/x86/kernel/machine_kexec_32.c
25839@@ -26,7 +26,7 @@
25840 #include <asm/cacheflush.h>
25841 #include <asm/debugreg.h>
25842
25843-static void set_idt(void *newidt, __u16 limit)
25844+static void set_idt(struct desc_struct *newidt, __u16 limit)
25845 {
25846 struct desc_ptr curidt;
25847
25848@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25849 }
25850
25851
25852-static void set_gdt(void *newgdt, __u16 limit)
25853+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25854 {
25855 struct desc_ptr curgdt;
25856
25857@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25858 }
25859
25860 control_page = page_address(image->control_code_page);
25861- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25862+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25863
25864 relocate_kernel_ptr = control_page;
25865 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25866diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25867index 94ea120..4154cea 100644
25868--- a/arch/x86/kernel/mcount_64.S
25869+++ b/arch/x86/kernel/mcount_64.S
25870@@ -7,7 +7,7 @@
25871 #include <linux/linkage.h>
25872 #include <asm/ptrace.h>
25873 #include <asm/ftrace.h>
25874-
25875+#include <asm/alternative-asm.h>
25876
25877 .code64
25878 .section .entry.text, "ax"
25879@@ -148,8 +148,9 @@
25880 #ifdef CONFIG_DYNAMIC_FTRACE
25881
25882 ENTRY(function_hook)
25883+ pax_force_retaddr
25884 retq
25885-END(function_hook)
25886+ENDPROC(function_hook)
25887
25888 ENTRY(ftrace_caller)
25889 /* save_mcount_regs fills in first two parameters */
25890@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25891 #endif
25892
25893 GLOBAL(ftrace_stub)
25894+ pax_force_retaddr
25895 retq
25896-END(ftrace_caller)
25897+ENDPROC(ftrace_caller)
25898
25899 ENTRY(ftrace_regs_caller)
25900 /* Save the current flags before any operations that can change them */
25901@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25902
25903 jmp ftrace_return
25904
25905-END(ftrace_regs_caller)
25906+ENDPROC(ftrace_regs_caller)
25907
25908
25909 #else /* ! CONFIG_DYNAMIC_FTRACE */
25910@@ -272,18 +274,20 @@ fgraph_trace:
25911 #endif
25912
25913 GLOBAL(ftrace_stub)
25914+ pax_force_retaddr
25915 retq
25916
25917 trace:
25918 /* save_mcount_regs fills in first two parameters */
25919 save_mcount_regs
25920
25921+ pax_force_fptr ftrace_trace_function
25922 call *ftrace_trace_function
25923
25924 restore_mcount_regs
25925
25926 jmp fgraph_trace
25927-END(function_hook)
25928+ENDPROC(function_hook)
25929 #endif /* CONFIG_DYNAMIC_FTRACE */
25930 #endif /* CONFIG_FUNCTION_TRACER */
25931
25932@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25933
25934 restore_mcount_regs
25935
25936+ pax_force_retaddr
25937 retq
25938-END(ftrace_graph_caller)
25939+ENDPROC(ftrace_graph_caller)
25940
25941 GLOBAL(return_to_handler)
25942 subq $24, %rsp
25943@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
25944 movq 8(%rsp), %rdx
25945 movq (%rsp), %rax
25946 addq $24, %rsp
25947+ pax_force_fptr %rdi
25948 jmp *%rdi
25949+ENDPROC(return_to_handler)
25950 #endif
25951diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25952index e69f988..72902b7 100644
25953--- a/arch/x86/kernel/module.c
25954+++ b/arch/x86/kernel/module.c
25955@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
25956 }
25957 #endif
25958
25959-void *module_alloc(unsigned long size)
25960+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25961 {
25962- if (PAGE_ALIGN(size) > MODULES_LEN)
25963+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25964 return NULL;
25965 return __vmalloc_node_range(size, 1,
25966 MODULES_VADDR + get_module_load_offset(),
25967- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
25968- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
25969+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
25970+ prot, NUMA_NO_NODE,
25971 __builtin_return_address(0));
25972 }
25973
25974+void *module_alloc(unsigned long size)
25975+{
25976+
25977+#ifdef CONFIG_PAX_KERNEXEC
25978+ return __module_alloc(size, PAGE_KERNEL);
25979+#else
25980+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25981+#endif
25982+
25983+}
25984+
25985+#ifdef CONFIG_PAX_KERNEXEC
25986+#ifdef CONFIG_X86_32
25987+void *module_alloc_exec(unsigned long size)
25988+{
25989+ struct vm_struct *area;
25990+
25991+ if (size == 0)
25992+ return NULL;
25993+
25994+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25995+return area ? area->addr : NULL;
25996+}
25997+EXPORT_SYMBOL(module_alloc_exec);
25998+
25999+void module_memfree_exec(void *module_region)
26000+{
26001+ vunmap(module_region);
26002+}
26003+EXPORT_SYMBOL(module_memfree_exec);
26004+#else
26005+void module_memfree_exec(void *module_region)
26006+{
26007+ module_memfree(module_region);
26008+}
26009+EXPORT_SYMBOL(module_memfree_exec);
26010+
26011+void *module_alloc_exec(unsigned long size)
26012+{
26013+ return __module_alloc(size, PAGE_KERNEL_RX);
26014+}
26015+EXPORT_SYMBOL(module_alloc_exec);
26016+#endif
26017+#endif
26018+
26019 #ifdef CONFIG_X86_32
26020 int apply_relocate(Elf32_Shdr *sechdrs,
26021 const char *strtab,
26022@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26023 unsigned int i;
26024 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26025 Elf32_Sym *sym;
26026- uint32_t *location;
26027+ uint32_t *plocation, location;
26028
26029 DEBUGP("Applying relocate section %u to %u\n",
26030 relsec, sechdrs[relsec].sh_info);
26031 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26032 /* This is where to make the change */
26033- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26034- + rel[i].r_offset;
26035+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26036+ location = (uint32_t)plocation;
26037+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26038+ plocation = ktla_ktva((void *)plocation);
26039 /* This is the symbol it is referring to. Note that all
26040 undefined symbols have been resolved. */
26041 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26042@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26043 switch (ELF32_R_TYPE(rel[i].r_info)) {
26044 case R_386_32:
26045 /* We add the value into the location given */
26046- *location += sym->st_value;
26047+ pax_open_kernel();
26048+ *plocation += sym->st_value;
26049+ pax_close_kernel();
26050 break;
26051 case R_386_PC32:
26052 /* Add the value, subtract its position */
26053- *location += sym->st_value - (uint32_t)location;
26054+ pax_open_kernel();
26055+ *plocation += sym->st_value - location;
26056+ pax_close_kernel();
26057 break;
26058 default:
26059 pr_err("%s: Unknown relocation: %u\n",
26060@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26061 case R_X86_64_NONE:
26062 break;
26063 case R_X86_64_64:
26064+ pax_open_kernel();
26065 *(u64 *)loc = val;
26066+ pax_close_kernel();
26067 break;
26068 case R_X86_64_32:
26069+ pax_open_kernel();
26070 *(u32 *)loc = val;
26071+ pax_close_kernel();
26072 if (val != *(u32 *)loc)
26073 goto overflow;
26074 break;
26075 case R_X86_64_32S:
26076+ pax_open_kernel();
26077 *(s32 *)loc = val;
26078+ pax_close_kernel();
26079 if ((s64)val != *(s32 *)loc)
26080 goto overflow;
26081 break;
26082 case R_X86_64_PC32:
26083 val -= (u64)loc;
26084+ pax_open_kernel();
26085 *(u32 *)loc = val;
26086+ pax_close_kernel();
26087+
26088 #if 0
26089 if ((s64)val != *(s32 *)loc)
26090 goto overflow;
26091diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26092index 113e707..0a690e1 100644
26093--- a/arch/x86/kernel/msr.c
26094+++ b/arch/x86/kernel/msr.c
26095@@ -39,6 +39,7 @@
26096 #include <linux/notifier.h>
26097 #include <linux/uaccess.h>
26098 #include <linux/gfp.h>
26099+#include <linux/grsecurity.h>
26100
26101 #include <asm/processor.h>
26102 #include <asm/msr.h>
26103@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26104 int err = 0;
26105 ssize_t bytes = 0;
26106
26107+#ifdef CONFIG_GRKERNSEC_KMEM
26108+ gr_handle_msr_write();
26109+ return -EPERM;
26110+#endif
26111+
26112 if (count % 8)
26113 return -EINVAL; /* Invalid chunk size */
26114
26115@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26116 err = -EBADF;
26117 break;
26118 }
26119+#ifdef CONFIG_GRKERNSEC_KMEM
26120+ gr_handle_msr_write();
26121+ return -EPERM;
26122+#endif
26123 if (copy_from_user(&regs, uregs, sizeof regs)) {
26124 err = -EFAULT;
26125 break;
26126@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26127 return notifier_from_errno(err);
26128 }
26129
26130-static struct notifier_block __refdata msr_class_cpu_notifier = {
26131+static struct notifier_block msr_class_cpu_notifier = {
26132 .notifier_call = msr_class_cpu_callback,
26133 };
26134
26135diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26136index c3e985d..110a36a 100644
26137--- a/arch/x86/kernel/nmi.c
26138+++ b/arch/x86/kernel/nmi.c
26139@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26140
26141 static void nmi_max_handler(struct irq_work *w)
26142 {
26143- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26144+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26145 int remainder_ns, decimal_msecs;
26146- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26147+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26148
26149 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26150 decimal_msecs = remainder_ns / 1000;
26151
26152 printk_ratelimited(KERN_INFO
26153 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26154- a->handler, whole_msecs, decimal_msecs);
26155+ n->action->handler, whole_msecs, decimal_msecs);
26156 }
26157
26158 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26159@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26160 delta = sched_clock() - delta;
26161 trace_nmi_handler(a->handler, (int)delta, thishandled);
26162
26163- if (delta < nmi_longest_ns || delta < a->max_duration)
26164+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26165 continue;
26166
26167- a->max_duration = delta;
26168- irq_work_queue(&a->irq_work);
26169+ a->work->max_duration = delta;
26170+ irq_work_queue(&a->work->irq_work);
26171 }
26172
26173 rcu_read_unlock();
26174@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26175 }
26176 NOKPROBE_SYMBOL(nmi_handle);
26177
26178-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26179+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26180 {
26181 struct nmi_desc *desc = nmi_to_desc(type);
26182 unsigned long flags;
26183@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26184 if (!action->handler)
26185 return -EINVAL;
26186
26187- init_irq_work(&action->irq_work, nmi_max_handler);
26188+ action->work->action = action;
26189+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26190
26191 spin_lock_irqsave(&desc->lock, flags);
26192
26193@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26194 * event confuses some handlers (kdump uses this flag)
26195 */
26196 if (action->flags & NMI_FLAG_FIRST)
26197- list_add_rcu(&action->list, &desc->head);
26198+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26199 else
26200- list_add_tail_rcu(&action->list, &desc->head);
26201+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26202
26203 spin_unlock_irqrestore(&desc->lock, flags);
26204 return 0;
26205@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26206 if (!strcmp(n->name, name)) {
26207 WARN(in_nmi(),
26208 "Trying to free NMI (%s) from NMI context!\n", n->name);
26209- list_del_rcu(&n->list);
26210+ pax_list_del_rcu((struct list_head *)&n->list);
26211 break;
26212 }
26213 }
26214@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26215 dotraplinkage notrace void
26216 do_nmi(struct pt_regs *regs, long error_code)
26217 {
26218+
26219+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26220+ if (!user_mode(regs)) {
26221+ unsigned long cs = regs->cs & 0xFFFF;
26222+ unsigned long ip = ktva_ktla(regs->ip);
26223+
26224+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26225+ regs->ip = ip;
26226+ }
26227+#endif
26228+
26229 nmi_nesting_preprocess(regs);
26230
26231 nmi_enter();
26232diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26233index 6d9582e..f746287 100644
26234--- a/arch/x86/kernel/nmi_selftest.c
26235+++ b/arch/x86/kernel/nmi_selftest.c
26236@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26237 {
26238 /* trap all the unknown NMIs we may generate */
26239 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26240- __initdata);
26241+ __initconst);
26242 }
26243
26244 static void __init cleanup_nmi_testsuite(void)
26245@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26246 unsigned long timeout;
26247
26248 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26249- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26250+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26251 nmi_fail = FAILURE;
26252 return;
26253 }
26254diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26255index bbb6c73..24a58ef 100644
26256--- a/arch/x86/kernel/paravirt-spinlocks.c
26257+++ b/arch/x86/kernel/paravirt-spinlocks.c
26258@@ -8,7 +8,7 @@
26259
26260 #include <asm/paravirt.h>
26261
26262-struct pv_lock_ops pv_lock_ops = {
26263+struct pv_lock_ops pv_lock_ops __read_only = {
26264 #ifdef CONFIG_SMP
26265 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26266 .unlock_kick = paravirt_nop,
26267diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26268index 548d25f..f8fb99c 100644
26269--- a/arch/x86/kernel/paravirt.c
26270+++ b/arch/x86/kernel/paravirt.c
26271@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26272 {
26273 return x;
26274 }
26275+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26276+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26277+#endif
26278
26279 void __init default_banner(void)
26280 {
26281@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26282
26283 if (opfunc == NULL)
26284 /* If there's no function, patch it with a ud2a (BUG) */
26285- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26286- else if (opfunc == _paravirt_nop)
26287+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26288+ else if (opfunc == (void *)_paravirt_nop)
26289 /* If the operation is a nop, then nop the callsite */
26290 ret = paravirt_patch_nop();
26291
26292 /* identity functions just return their single argument */
26293- else if (opfunc == _paravirt_ident_32)
26294+ else if (opfunc == (void *)_paravirt_ident_32)
26295 ret = paravirt_patch_ident_32(insnbuf, len);
26296- else if (opfunc == _paravirt_ident_64)
26297+ else if (opfunc == (void *)_paravirt_ident_64)
26298 ret = paravirt_patch_ident_64(insnbuf, len);
26299+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26300+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26301+ ret = paravirt_patch_ident_64(insnbuf, len);
26302+#endif
26303
26304 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26305 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26306@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26307 if (insn_len > len || start == NULL)
26308 insn_len = len;
26309 else
26310- memcpy(insnbuf, start, insn_len);
26311+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26312
26313 return insn_len;
26314 }
26315@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26316 return this_cpu_read(paravirt_lazy_mode);
26317 }
26318
26319-struct pv_info pv_info = {
26320+struct pv_info pv_info __read_only = {
26321 .name = "bare hardware",
26322 .paravirt_enabled = 0,
26323 .kernel_rpl = 0,
26324@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26325 #endif
26326 };
26327
26328-struct pv_init_ops pv_init_ops = {
26329+struct pv_init_ops pv_init_ops __read_only = {
26330 .patch = native_patch,
26331 };
26332
26333-struct pv_time_ops pv_time_ops = {
26334+struct pv_time_ops pv_time_ops __read_only = {
26335 .sched_clock = native_sched_clock,
26336 .steal_clock = native_steal_clock,
26337 };
26338
26339-__visible struct pv_irq_ops pv_irq_ops = {
26340+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26341 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26342 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26343 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26344@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26345 #endif
26346 };
26347
26348-__visible struct pv_cpu_ops pv_cpu_ops = {
26349+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26350 .cpuid = native_cpuid,
26351 .get_debugreg = native_get_debugreg,
26352 .set_debugreg = native_set_debugreg,
26353@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26354 NOKPROBE_SYMBOL(native_set_debugreg);
26355 NOKPROBE_SYMBOL(native_load_idt);
26356
26357-struct pv_apic_ops pv_apic_ops = {
26358+struct pv_apic_ops pv_apic_ops __read_only= {
26359 #ifdef CONFIG_X86_LOCAL_APIC
26360 .startup_ipi_hook = paravirt_nop,
26361 #endif
26362 };
26363
26364-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26365+#ifdef CONFIG_X86_32
26366+#ifdef CONFIG_X86_PAE
26367+/* 64-bit pagetable entries */
26368+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26369+#else
26370 /* 32-bit pagetable entries */
26371 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26372+#endif
26373 #else
26374 /* 64-bit pagetable entries */
26375 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26376 #endif
26377
26378-struct pv_mmu_ops pv_mmu_ops = {
26379+struct pv_mmu_ops pv_mmu_ops __read_only = {
26380
26381 .read_cr2 = native_read_cr2,
26382 .write_cr2 = native_write_cr2,
26383@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26384 .make_pud = PTE_IDENT,
26385
26386 .set_pgd = native_set_pgd,
26387+ .set_pgd_batched = native_set_pgd_batched,
26388 #endif
26389 #endif /* PAGETABLE_LEVELS >= 3 */
26390
26391@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26392 },
26393
26394 .set_fixmap = native_set_fixmap,
26395+
26396+#ifdef CONFIG_PAX_KERNEXEC
26397+ .pax_open_kernel = native_pax_open_kernel,
26398+ .pax_close_kernel = native_pax_close_kernel,
26399+#endif
26400+
26401 };
26402
26403 EXPORT_SYMBOL_GPL(pv_time_ops);
26404diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26405index a1da673..b6f5831 100644
26406--- a/arch/x86/kernel/paravirt_patch_64.c
26407+++ b/arch/x86/kernel/paravirt_patch_64.c
26408@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26409 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26410 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26411 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26412+
26413+#ifndef CONFIG_PAX_MEMORY_UDEREF
26414 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26415+#endif
26416+
26417 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26418 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26419
26420@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26421 PATCH_SITE(pv_mmu_ops, read_cr3);
26422 PATCH_SITE(pv_mmu_ops, write_cr3);
26423 PATCH_SITE(pv_cpu_ops, clts);
26424+
26425+#ifndef CONFIG_PAX_MEMORY_UDEREF
26426 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26427+#endif
26428+
26429 PATCH_SITE(pv_cpu_ops, wbinvd);
26430
26431 patch_site:
26432diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26433index 0497f71..7186c0d 100644
26434--- a/arch/x86/kernel/pci-calgary_64.c
26435+++ b/arch/x86/kernel/pci-calgary_64.c
26436@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26437 tce_space = be64_to_cpu(readq(target));
26438 tce_space = tce_space & TAR_SW_BITS;
26439
26440- tce_space = tce_space & (~specified_table_size);
26441+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26442 info->tce_space = (u64 *)__va(tce_space);
26443 }
26444 }
26445diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26446index 35ccf75..7a15747 100644
26447--- a/arch/x86/kernel/pci-iommu_table.c
26448+++ b/arch/x86/kernel/pci-iommu_table.c
26449@@ -2,7 +2,7 @@
26450 #include <asm/iommu_table.h>
26451 #include <linux/string.h>
26452 #include <linux/kallsyms.h>
26453-
26454+#include <linux/sched.h>
26455
26456 #define DEBUG 1
26457
26458diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26459index 77dd0ad..9ec4723 100644
26460--- a/arch/x86/kernel/pci-swiotlb.c
26461+++ b/arch/x86/kernel/pci-swiotlb.c
26462@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26463 struct dma_attrs *attrs)
26464 {
26465 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26466- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26467+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26468 else
26469 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26470 }
26471diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26472index e127dda..94e384d 100644
26473--- a/arch/x86/kernel/process.c
26474+++ b/arch/x86/kernel/process.c
26475@@ -36,7 +36,8 @@
26476 * section. Since TSS's are completely CPU-local, we want them
26477 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26478 */
26479-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26480+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26481+EXPORT_SYMBOL(init_tss);
26482
26483 #ifdef CONFIG_X86_64
26484 static DEFINE_PER_CPU(unsigned char, is_idle);
26485@@ -94,7 +95,7 @@ void arch_task_cache_init(void)
26486 task_xstate_cachep =
26487 kmem_cache_create("task_xstate", xstate_size,
26488 __alignof__(union thread_xstate),
26489- SLAB_PANIC | SLAB_NOTRACK, NULL);
26490+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26491 setup_xstate_comp();
26492 }
26493
26494@@ -108,7 +109,7 @@ void exit_thread(void)
26495 unsigned long *bp = t->io_bitmap_ptr;
26496
26497 if (bp) {
26498- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26499+ struct tss_struct *tss = init_tss + get_cpu();
26500
26501 t->io_bitmap_ptr = NULL;
26502 clear_thread_flag(TIF_IO_BITMAP);
26503@@ -128,6 +129,9 @@ void flush_thread(void)
26504 {
26505 struct task_struct *tsk = current;
26506
26507+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26508+ loadsegment(gs, 0);
26509+#endif
26510 flush_ptrace_hw_breakpoint(tsk);
26511 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26512 drop_init_fpu(tsk);
26513@@ -274,7 +278,7 @@ static void __exit_idle(void)
26514 void exit_idle(void)
26515 {
26516 /* idle loop has pid 0 */
26517- if (current->pid)
26518+ if (task_pid_nr(current))
26519 return;
26520 __exit_idle();
26521 }
26522@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
26523 return ret;
26524 }
26525 #endif
26526-void stop_this_cpu(void *dummy)
26527+__noreturn void stop_this_cpu(void *dummy)
26528 {
26529 local_irq_disable();
26530 /*
26531@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
26532 }
26533 early_param("idle", idle_setup);
26534
26535-unsigned long arch_align_stack(unsigned long sp)
26536+#ifdef CONFIG_PAX_RANDKSTACK
26537+void pax_randomize_kstack(struct pt_regs *regs)
26538 {
26539- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26540- sp -= get_random_int() % 8192;
26541- return sp & ~0xf;
26542-}
26543+ struct thread_struct *thread = &current->thread;
26544+ unsigned long time;
26545
26546-unsigned long arch_randomize_brk(struct mm_struct *mm)
26547-{
26548- unsigned long range_end = mm->brk + 0x02000000;
26549- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26550-}
26551+ if (!randomize_va_space)
26552+ return;
26553+
26554+ if (v8086_mode(regs))
26555+ return;
26556
26557+ rdtscl(time);
26558+
26559+ /* P4 seems to return a 0 LSB, ignore it */
26560+#ifdef CONFIG_MPENTIUM4
26561+ time &= 0x3EUL;
26562+ time <<= 2;
26563+#elif defined(CONFIG_X86_64)
26564+ time &= 0xFUL;
26565+ time <<= 4;
26566+#else
26567+ time &= 0x1FUL;
26568+ time <<= 3;
26569+#endif
26570+
26571+ thread->sp0 ^= time;
26572+ load_sp0(init_tss + smp_processor_id(), thread);
26573+
26574+#ifdef CONFIG_X86_64
26575+ this_cpu_write(kernel_stack, thread->sp0);
26576+#endif
26577+}
26578+#endif
26579diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26580index 8f3ebfe..cbc731b 100644
26581--- a/arch/x86/kernel/process_32.c
26582+++ b/arch/x86/kernel/process_32.c
26583@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26584 unsigned long thread_saved_pc(struct task_struct *tsk)
26585 {
26586 return ((unsigned long *)tsk->thread.sp)[3];
26587+//XXX return tsk->thread.eip;
26588 }
26589
26590 void __show_regs(struct pt_regs *regs, int all)
26591@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26592 unsigned long sp;
26593 unsigned short ss, gs;
26594
26595- if (user_mode_vm(regs)) {
26596+ if (user_mode(regs)) {
26597 sp = regs->sp;
26598 ss = regs->ss & 0xffff;
26599- gs = get_user_gs(regs);
26600 } else {
26601 sp = kernel_stack_pointer(regs);
26602 savesegment(ss, ss);
26603- savesegment(gs, gs);
26604 }
26605+ gs = get_user_gs(regs);
26606
26607 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26608 (u16)regs->cs, regs->ip, regs->flags,
26609- smp_processor_id());
26610+ raw_smp_processor_id());
26611 print_symbol("EIP is at %s\n", regs->ip);
26612
26613 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26614@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26615 int copy_thread(unsigned long clone_flags, unsigned long sp,
26616 unsigned long arg, struct task_struct *p)
26617 {
26618- struct pt_regs *childregs = task_pt_regs(p);
26619+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26620 struct task_struct *tsk;
26621 int err;
26622
26623 p->thread.sp = (unsigned long) childregs;
26624 p->thread.sp0 = (unsigned long) (childregs+1);
26625+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26626 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26627
26628 if (unlikely(p->flags & PF_KTHREAD)) {
26629 /* kernel thread */
26630 memset(childregs, 0, sizeof(struct pt_regs));
26631 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26632- task_user_gs(p) = __KERNEL_STACK_CANARY;
26633- childregs->ds = __USER_DS;
26634- childregs->es = __USER_DS;
26635+ savesegment(gs, childregs->gs);
26636+ childregs->ds = __KERNEL_DS;
26637+ childregs->es = __KERNEL_DS;
26638 childregs->fs = __KERNEL_PERCPU;
26639 childregs->bx = sp; /* function */
26640 childregs->bp = arg;
26641@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26642 struct thread_struct *prev = &prev_p->thread,
26643 *next = &next_p->thread;
26644 int cpu = smp_processor_id();
26645- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26646+ struct tss_struct *tss = init_tss + cpu;
26647 fpu_switch_t fpu;
26648
26649 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26650@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26651 */
26652 lazy_save_gs(prev->gs);
26653
26654+#ifdef CONFIG_PAX_MEMORY_UDEREF
26655+ __set_fs(task_thread_info(next_p)->addr_limit);
26656+#endif
26657+
26658 /*
26659 * Load the per-thread Thread-Local Storage descriptor.
26660 */
26661@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26662 */
26663 arch_end_context_switch(next_p);
26664
26665- this_cpu_write(kernel_stack,
26666- (unsigned long)task_stack_page(next_p) +
26667- THREAD_SIZE - KERNEL_STACK_OFFSET);
26668+ this_cpu_write(current_task, next_p);
26669+ this_cpu_write(current_tinfo, &next_p->tinfo);
26670+ this_cpu_write(kernel_stack, next->sp0);
26671
26672 /*
26673 * Restore %gs if needed (which is common)
26674@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26675
26676 switch_fpu_finish(next_p, fpu);
26677
26678- this_cpu_write(current_task, next_p);
26679-
26680 return prev_p;
26681 }
26682
26683@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26684 } while (count++ < 16);
26685 return 0;
26686 }
26687-
26688diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26689index 5a2c029..ec8611d 100644
26690--- a/arch/x86/kernel/process_64.c
26691+++ b/arch/x86/kernel/process_64.c
26692@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26693 struct pt_regs *childregs;
26694 struct task_struct *me = current;
26695
26696- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26697+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26698 childregs = task_pt_regs(p);
26699 p->thread.sp = (unsigned long) childregs;
26700 p->thread.usersp = me->thread.usersp;
26701+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26702 set_tsk_thread_flag(p, TIF_FORK);
26703 p->thread.io_bitmap_ptr = NULL;
26704
26705@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26706 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26707 savesegment(es, p->thread.es);
26708 savesegment(ds, p->thread.ds);
26709+ savesegment(ss, p->thread.ss);
26710+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26711 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26712
26713 if (unlikely(p->flags & PF_KTHREAD)) {
26714@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26715 struct thread_struct *prev = &prev_p->thread;
26716 struct thread_struct *next = &next_p->thread;
26717 int cpu = smp_processor_id();
26718- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26719+ struct tss_struct *tss = init_tss + cpu;
26720 unsigned fsindex, gsindex;
26721 fpu_switch_t fpu;
26722
26723@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26724 if (unlikely(next->ds | prev->ds))
26725 loadsegment(ds, next->ds);
26726
26727+ savesegment(ss, prev->ss);
26728+ if (unlikely(next->ss != prev->ss))
26729+ loadsegment(ss, next->ss);
26730+
26731 /*
26732 * Switch FS and GS.
26733 *
26734@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26735 prev->usersp = this_cpu_read(old_rsp);
26736 this_cpu_write(old_rsp, next->usersp);
26737 this_cpu_write(current_task, next_p);
26738+ this_cpu_write(current_tinfo, &next_p->tinfo);
26739
26740 /*
26741 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26742@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26743 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26744 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26745
26746- this_cpu_write(kernel_stack,
26747- (unsigned long)task_stack_page(next_p) +
26748- THREAD_SIZE - KERNEL_STACK_OFFSET);
26749+ this_cpu_write(kernel_stack, next->sp0);
26750
26751 /*
26752 * Now maybe reload the debug registers and handle I/O bitmaps
26753@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26754 if (!p || p == current || p->state == TASK_RUNNING)
26755 return 0;
26756 stack = (unsigned long)task_stack_page(p);
26757- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26758+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26759 return 0;
26760 fp = *(u64 *)(p->thread.sp);
26761 do {
26762- if (fp < (unsigned long)stack ||
26763- fp >= (unsigned long)stack+THREAD_SIZE)
26764+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26765 return 0;
26766 ip = *(u64 *)(fp+8);
26767 if (!in_sched_functions(ip))
26768diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26769index e510618..5165ac0 100644
26770--- a/arch/x86/kernel/ptrace.c
26771+++ b/arch/x86/kernel/ptrace.c
26772@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26773 unsigned long sp = (unsigned long)&regs->sp;
26774 u32 *prev_esp;
26775
26776- if (context == (sp & ~(THREAD_SIZE - 1)))
26777+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26778 return sp;
26779
26780- prev_esp = (u32 *)(context);
26781+ prev_esp = *(u32 **)(context);
26782 if (prev_esp)
26783 return (unsigned long)prev_esp;
26784
26785@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26786 if (child->thread.gs != value)
26787 return do_arch_prctl(child, ARCH_SET_GS, value);
26788 return 0;
26789+
26790+ case offsetof(struct user_regs_struct,ip):
26791+ /*
26792+ * Protect against any attempt to set ip to an
26793+ * impossible address. There are dragons lurking if the
26794+ * address is noncanonical. (This explicitly allows
26795+ * setting ip to TASK_SIZE_MAX, because user code can do
26796+ * that all by itself by running off the end of its
26797+ * address space.
26798+ */
26799+ if (value > TASK_SIZE_MAX)
26800+ return -EIO;
26801+ break;
26802+
26803 #endif
26804 }
26805
26806@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26807 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26808 {
26809 int i;
26810- int dr7 = 0;
26811+ unsigned long dr7 = 0;
26812 struct arch_hw_breakpoint *info;
26813
26814 for (i = 0; i < HBP_NUM; i++) {
26815@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26816 unsigned long addr, unsigned long data)
26817 {
26818 int ret;
26819- unsigned long __user *datap = (unsigned long __user *)data;
26820+ unsigned long __user *datap = (__force unsigned long __user *)data;
26821
26822 switch (request) {
26823 /* read the word at location addr in the USER area. */
26824@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26825 if ((int) addr < 0)
26826 return -EIO;
26827 ret = do_get_thread_area(child, addr,
26828- (struct user_desc __user *)data);
26829+ (__force struct user_desc __user *) data);
26830 break;
26831
26832 case PTRACE_SET_THREAD_AREA:
26833 if ((int) addr < 0)
26834 return -EIO;
26835 ret = do_set_thread_area(child, addr,
26836- (struct user_desc __user *)data, 0);
26837+ (__force struct user_desc __user *) data, 0);
26838 break;
26839 #endif
26840
26841@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26842
26843 #ifdef CONFIG_X86_64
26844
26845-static struct user_regset x86_64_regsets[] __read_mostly = {
26846+static user_regset_no_const x86_64_regsets[] __read_only = {
26847 [REGSET_GENERAL] = {
26848 .core_note_type = NT_PRSTATUS,
26849 .n = sizeof(struct user_regs_struct) / sizeof(long),
26850@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26851 #endif /* CONFIG_X86_64 */
26852
26853 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26854-static struct user_regset x86_32_regsets[] __read_mostly = {
26855+static user_regset_no_const x86_32_regsets[] __read_only = {
26856 [REGSET_GENERAL] = {
26857 .core_note_type = NT_PRSTATUS,
26858 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26859@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26860 */
26861 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26862
26863-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26864+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26865 {
26866 #ifdef CONFIG_X86_64
26867 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26868@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26869 memset(info, 0, sizeof(*info));
26870 info->si_signo = SIGTRAP;
26871 info->si_code = si_code;
26872- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26873+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26874 }
26875
26876 void user_single_step_siginfo(struct task_struct *tsk,
26877@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26878 }
26879 }
26880
26881+#ifdef CONFIG_GRKERNSEC_SETXID
26882+extern void gr_delayed_cred_worker(void);
26883+#endif
26884+
26885 /*
26886 * We can return 0 to resume the syscall or anything else to go to phase
26887 * 2. If we resume the syscall, we need to put something appropriate in
26888@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26889
26890 BUG_ON(regs != task_pt_regs(current));
26891
26892+#ifdef CONFIG_GRKERNSEC_SETXID
26893+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26894+ gr_delayed_cred_worker();
26895+#endif
26896+
26897 /*
26898 * If we stepped into a sysenter/syscall insn, it trapped in
26899 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26900@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26901 */
26902 user_exit();
26903
26904+#ifdef CONFIG_GRKERNSEC_SETXID
26905+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26906+ gr_delayed_cred_worker();
26907+#endif
26908+
26909 audit_syscall_exit(regs);
26910
26911 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26912diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26913index 2f355d2..e75ed0a 100644
26914--- a/arch/x86/kernel/pvclock.c
26915+++ b/arch/x86/kernel/pvclock.c
26916@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26917 reset_hung_task_detector();
26918 }
26919
26920-static atomic64_t last_value = ATOMIC64_INIT(0);
26921+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26922
26923 void pvclock_resume(void)
26924 {
26925- atomic64_set(&last_value, 0);
26926+ atomic64_set_unchecked(&last_value, 0);
26927 }
26928
26929 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26930@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26931 * updating at the same time, and one of them could be slightly behind,
26932 * making the assumption that last_value always go forward fail to hold.
26933 */
26934- last = atomic64_read(&last_value);
26935+ last = atomic64_read_unchecked(&last_value);
26936 do {
26937 if (ret < last)
26938 return last;
26939- last = atomic64_cmpxchg(&last_value, last, ret);
26940+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26941 } while (unlikely(last != ret));
26942
26943 return ret;
26944diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26945index bae6c60..b438619 100644
26946--- a/arch/x86/kernel/reboot.c
26947+++ b/arch/x86/kernel/reboot.c
26948@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26949
26950 void __noreturn machine_real_restart(unsigned int type)
26951 {
26952+
26953+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26954+ struct desc_struct *gdt;
26955+#endif
26956+
26957 local_irq_disable();
26958
26959 /*
26960@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
26961
26962 /* Jump to the identity-mapped low memory code */
26963 #ifdef CONFIG_X86_32
26964- asm volatile("jmpl *%0" : :
26965+
26966+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26967+ gdt = get_cpu_gdt_table(smp_processor_id());
26968+ pax_open_kernel();
26969+#ifdef CONFIG_PAX_MEMORY_UDEREF
26970+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26971+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26972+ loadsegment(ds, __KERNEL_DS);
26973+ loadsegment(es, __KERNEL_DS);
26974+ loadsegment(ss, __KERNEL_DS);
26975+#endif
26976+#ifdef CONFIG_PAX_KERNEXEC
26977+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26978+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26979+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26980+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26981+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26982+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26983+#endif
26984+ pax_close_kernel();
26985+#endif
26986+
26987+ asm volatile("ljmpl *%0" : :
26988 "rm" (real_mode_header->machine_real_restart_asm),
26989 "a" (type));
26990 #else
26991@@ -501,7 +528,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
26992 * This means that this function can never return, it can misbehave
26993 * by not rebooting properly and hanging.
26994 */
26995-static void native_machine_emergency_restart(void)
26996+static void __noreturn native_machine_emergency_restart(void)
26997 {
26998 int i;
26999 int attempt = 0;
27000@@ -621,13 +648,13 @@ void native_machine_shutdown(void)
27001 #endif
27002 }
27003
27004-static void __machine_emergency_restart(int emergency)
27005+static void __noreturn __machine_emergency_restart(int emergency)
27006 {
27007 reboot_emergency = emergency;
27008 machine_ops.emergency_restart();
27009 }
27010
27011-static void native_machine_restart(char *__unused)
27012+static void __noreturn native_machine_restart(char *__unused)
27013 {
27014 pr_notice("machine restart\n");
27015
27016@@ -636,7 +663,7 @@ static void native_machine_restart(char *__unused)
27017 __machine_emergency_restart(0);
27018 }
27019
27020-static void native_machine_halt(void)
27021+static void __noreturn native_machine_halt(void)
27022 {
27023 /* Stop other cpus and apics */
27024 machine_shutdown();
27025@@ -646,7 +673,7 @@ static void native_machine_halt(void)
27026 stop_this_cpu(NULL);
27027 }
27028
27029-static void native_machine_power_off(void)
27030+static void __noreturn native_machine_power_off(void)
27031 {
27032 if (pm_power_off) {
27033 if (!reboot_force)
27034@@ -655,9 +682,10 @@ static void native_machine_power_off(void)
27035 }
27036 /* A fallback in case there is no PM info available */
27037 tboot_shutdown(TB_SHUTDOWN_HALT);
27038+ unreachable();
27039 }
27040
27041-struct machine_ops machine_ops = {
27042+struct machine_ops machine_ops __read_only = {
27043 .power_off = native_machine_power_off,
27044 .shutdown = native_machine_shutdown,
27045 .emergency_restart = native_machine_emergency_restart,
27046diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27047index c8e41e9..64049ef 100644
27048--- a/arch/x86/kernel/reboot_fixups_32.c
27049+++ b/arch/x86/kernel/reboot_fixups_32.c
27050@@ -57,7 +57,7 @@ struct device_fixup {
27051 unsigned int vendor;
27052 unsigned int device;
27053 void (*reboot_fixup)(struct pci_dev *);
27054-};
27055+} __do_const;
27056
27057 /*
27058 * PCI ids solely used for fixups_table go here
27059diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27060index 3fd2c69..a444264 100644
27061--- a/arch/x86/kernel/relocate_kernel_64.S
27062+++ b/arch/x86/kernel/relocate_kernel_64.S
27063@@ -96,8 +96,7 @@ relocate_kernel:
27064
27065 /* jump to identity mapped page */
27066 addq $(identity_mapped - relocate_kernel), %r8
27067- pushq %r8
27068- ret
27069+ jmp *%r8
27070
27071 identity_mapped:
27072 /* set return address to 0 if not preserving context */
27073diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27074index ab4734e..c4ca0eb 100644
27075--- a/arch/x86/kernel/setup.c
27076+++ b/arch/x86/kernel/setup.c
27077@@ -110,6 +110,7 @@
27078 #include <asm/mce.h>
27079 #include <asm/alternative.h>
27080 #include <asm/prom.h>
27081+#include <asm/boot.h>
27082
27083 /*
27084 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27085@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27086 #endif
27087
27088
27089-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27090-__visible unsigned long mmu_cr4_features;
27091+#ifdef CONFIG_X86_64
27092+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27093+#elif defined(CONFIG_X86_PAE)
27094+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27095 #else
27096-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27097+__visible unsigned long mmu_cr4_features __read_only;
27098 #endif
27099
27100+void set_in_cr4(unsigned long mask)
27101+{
27102+ unsigned long cr4 = read_cr4();
27103+
27104+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27105+ return;
27106+
27107+ pax_open_kernel();
27108+ mmu_cr4_features |= mask;
27109+ pax_close_kernel();
27110+
27111+ if (trampoline_cr4_features)
27112+ *trampoline_cr4_features = mmu_cr4_features;
27113+ cr4 |= mask;
27114+ write_cr4(cr4);
27115+}
27116+EXPORT_SYMBOL(set_in_cr4);
27117+
27118+void clear_in_cr4(unsigned long mask)
27119+{
27120+ unsigned long cr4 = read_cr4();
27121+
27122+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27123+ return;
27124+
27125+ pax_open_kernel();
27126+ mmu_cr4_features &= ~mask;
27127+ pax_close_kernel();
27128+
27129+ if (trampoline_cr4_features)
27130+ *trampoline_cr4_features = mmu_cr4_features;
27131+ cr4 &= ~mask;
27132+ write_cr4(cr4);
27133+}
27134+EXPORT_SYMBOL(clear_in_cr4);
27135+
27136 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27137 int bootloader_type, bootloader_version;
27138
27139@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27140 * area (640->1Mb) as ram even though it is not.
27141 * take them out.
27142 */
27143- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27144+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27145
27146 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27147 }
27148@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27149 /* called before trim_bios_range() to spare extra sanitize */
27150 static void __init e820_add_kernel_range(void)
27151 {
27152- u64 start = __pa_symbol(_text);
27153+ u64 start = __pa_symbol(ktla_ktva(_text));
27154 u64 size = __pa_symbol(_end) - start;
27155
27156 /*
27157@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27158
27159 void __init setup_arch(char **cmdline_p)
27160 {
27161+#ifdef CONFIG_X86_32
27162+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27163+#else
27164 memblock_reserve(__pa_symbol(_text),
27165 (unsigned long)__bss_stop - (unsigned long)_text);
27166+#endif
27167
27168 early_reserve_initrd();
27169
27170@@ -955,16 +998,16 @@ void __init setup_arch(char **cmdline_p)
27171
27172 if (!boot_params.hdr.root_flags)
27173 root_mountflags &= ~MS_RDONLY;
27174- init_mm.start_code = (unsigned long) _text;
27175- init_mm.end_code = (unsigned long) _etext;
27176+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27177+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27178 init_mm.end_data = (unsigned long) _edata;
27179 init_mm.brk = _brk_end;
27180
27181 mpx_mm_init(&init_mm);
27182
27183- code_resource.start = __pa_symbol(_text);
27184- code_resource.end = __pa_symbol(_etext)-1;
27185- data_resource.start = __pa_symbol(_etext);
27186+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27187+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27188+ data_resource.start = __pa_symbol(_sdata);
27189 data_resource.end = __pa_symbol(_edata)-1;
27190 bss_resource.start = __pa_symbol(__bss_start);
27191 bss_resource.end = __pa_symbol(__bss_stop)-1;
27192diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27193index e4fcb87..9c06c55 100644
27194--- a/arch/x86/kernel/setup_percpu.c
27195+++ b/arch/x86/kernel/setup_percpu.c
27196@@ -21,19 +21,17 @@
27197 #include <asm/cpu.h>
27198 #include <asm/stackprotector.h>
27199
27200-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27201+#ifdef CONFIG_SMP
27202+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27203 EXPORT_PER_CPU_SYMBOL(cpu_number);
27204+#endif
27205
27206-#ifdef CONFIG_X86_64
27207 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27208-#else
27209-#define BOOT_PERCPU_OFFSET 0
27210-#endif
27211
27212 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27213 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27214
27215-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27216+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27217 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27218 };
27219 EXPORT_SYMBOL(__per_cpu_offset);
27220@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27221 {
27222 #ifdef CONFIG_NEED_MULTIPLE_NODES
27223 pg_data_t *last = NULL;
27224- unsigned int cpu;
27225+ int cpu;
27226
27227 for_each_possible_cpu(cpu) {
27228 int node = early_cpu_to_node(cpu);
27229@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27230 {
27231 #ifdef CONFIG_X86_32
27232 struct desc_struct gdt;
27233+ unsigned long base = per_cpu_offset(cpu);
27234
27235- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27236- 0x2 | DESCTYPE_S, 0x8);
27237- gdt.s = 1;
27238+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27239+ 0x83 | DESCTYPE_S, 0xC);
27240 write_gdt_entry(get_cpu_gdt_table(cpu),
27241 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27242 #endif
27243@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27244 /* alrighty, percpu areas up and running */
27245 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27246 for_each_possible_cpu(cpu) {
27247+#ifdef CONFIG_CC_STACKPROTECTOR
27248+#ifdef CONFIG_X86_32
27249+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27250+#endif
27251+#endif
27252 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27253 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27254 per_cpu(cpu_number, cpu) = cpu;
27255@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27256 */
27257 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27258 #endif
27259+#ifdef CONFIG_CC_STACKPROTECTOR
27260+#ifdef CONFIG_X86_32
27261+ if (!cpu)
27262+ per_cpu(stack_canary.canary, cpu) = canary;
27263+#endif
27264+#endif
27265 /*
27266 * Up to this point, the boot CPU has been using .init.data
27267 * area. Reload any changed state for the boot CPU.
27268diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27269index ed37a76..39f936e 100644
27270--- a/arch/x86/kernel/signal.c
27271+++ b/arch/x86/kernel/signal.c
27272@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27273 * Align the stack pointer according to the i386 ABI,
27274 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27275 */
27276- sp = ((sp + 4) & -16ul) - 4;
27277+ sp = ((sp - 12) & -16ul) - 4;
27278 #else /* !CONFIG_X86_32 */
27279 sp = round_down(sp, 16) - 8;
27280 #endif
27281@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27282 }
27283
27284 if (current->mm->context.vdso)
27285- restorer = current->mm->context.vdso +
27286- selected_vdso32->sym___kernel_sigreturn;
27287+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27288 else
27289- restorer = &frame->retcode;
27290+ restorer = (void __user *)&frame->retcode;
27291 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27292 restorer = ksig->ka.sa.sa_restorer;
27293
27294@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27295 * reasons and because gdb uses it as a signature to notice
27296 * signal handler stack frames.
27297 */
27298- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27299+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27300
27301 if (err)
27302 return -EFAULT;
27303@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27304 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27305
27306 /* Set up to return from userspace. */
27307- restorer = current->mm->context.vdso +
27308- selected_vdso32->sym___kernel_rt_sigreturn;
27309+ if (current->mm->context.vdso)
27310+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27311+ else
27312+ restorer = (void __user *)&frame->retcode;
27313 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27314 restorer = ksig->ka.sa.sa_restorer;
27315 put_user_ex(restorer, &frame->pretcode);
27316@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27317 * reasons and because gdb uses it as a signature to notice
27318 * signal handler stack frames.
27319 */
27320- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27321+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27322 } put_user_catch(err);
27323
27324 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27325@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27326 {
27327 int usig = signr_convert(ksig->sig);
27328 sigset_t *set = sigmask_to_save();
27329- compat_sigset_t *cset = (compat_sigset_t *) set;
27330+ sigset_t sigcopy;
27331+ compat_sigset_t *cset;
27332+
27333+ sigcopy = *set;
27334+
27335+ cset = (compat_sigset_t *) &sigcopy;
27336
27337 /* Set up the stack frame */
27338 if (is_ia32_frame()) {
27339@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27340 } else if (is_x32_frame()) {
27341 return x32_setup_rt_frame(ksig, cset, regs);
27342 } else {
27343- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27344+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27345 }
27346 }
27347
27348diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27349index be8e1bd..a3d93fa 100644
27350--- a/arch/x86/kernel/smp.c
27351+++ b/arch/x86/kernel/smp.c
27352@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27353
27354 __setup("nonmi_ipi", nonmi_ipi_setup);
27355
27356-struct smp_ops smp_ops = {
27357+struct smp_ops smp_ops __read_only = {
27358 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27359 .smp_prepare_cpus = native_smp_prepare_cpus,
27360 .smp_cpus_done = native_smp_cpus_done,
27361diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27362index 6d7022c..4feb6be 100644
27363--- a/arch/x86/kernel/smpboot.c
27364+++ b/arch/x86/kernel/smpboot.c
27365@@ -194,14 +194,17 @@ static void notrace start_secondary(void *unused)
27366
27367 enable_start_cpu0 = 0;
27368
27369-#ifdef CONFIG_X86_32
27370+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27371+ barrier();
27372+
27373 /* switch away from the initial page table */
27374+#ifdef CONFIG_PAX_PER_CPU_PGD
27375+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27376+#else
27377 load_cr3(swapper_pg_dir);
27378+#endif
27379 __flush_tlb_all();
27380-#endif
27381
27382- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27383- barrier();
27384 /*
27385 * Check TSC synchronization with the BP:
27386 */
27387@@ -765,8 +768,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27388 alternatives_enable_smp();
27389
27390 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27391- (THREAD_SIZE + task_stack_page(idle))) - 1);
27392+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27393 per_cpu(current_task, cpu) = idle;
27394+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27395
27396 #ifdef CONFIG_X86_32
27397 /* Stack for startup_32 can be just as for start_secondary onwards */
27398@@ -775,10 +779,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27399 clear_tsk_thread_flag(idle, TIF_FORK);
27400 initial_gs = per_cpu_offset(cpu);
27401 #endif
27402- per_cpu(kernel_stack, cpu) =
27403- (unsigned long)task_stack_page(idle) -
27404- KERNEL_STACK_OFFSET + THREAD_SIZE;
27405+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27406+ pax_open_kernel();
27407 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27408+ pax_close_kernel();
27409 initial_code = (unsigned long)start_secondary;
27410 stack_start = idle->thread.sp;
27411
27412@@ -918,6 +922,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27413 /* the FPU context is blank, nobody can own it */
27414 __cpu_disable_lazy_restore(cpu);
27415
27416+#ifdef CONFIG_PAX_PER_CPU_PGD
27417+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27418+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27419+ KERNEL_PGD_PTRS);
27420+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27421+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27422+ KERNEL_PGD_PTRS);
27423+#endif
27424+
27425 err = do_boot_cpu(apicid, cpu, tidle);
27426 if (err) {
27427 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27428diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27429index 9b4d51d..5d28b58 100644
27430--- a/arch/x86/kernel/step.c
27431+++ b/arch/x86/kernel/step.c
27432@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27433 struct desc_struct *desc;
27434 unsigned long base;
27435
27436- seg &= ~7UL;
27437+ seg >>= 3;
27438
27439 mutex_lock(&child->mm->context.lock);
27440- if (unlikely((seg >> 3) >= child->mm->context.size))
27441+ if (unlikely(seg >= child->mm->context.size))
27442 addr = -1L; /* bogus selector, access would fault */
27443 else {
27444 desc = child->mm->context.ldt + seg;
27445@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27446 addr += base;
27447 }
27448 mutex_unlock(&child->mm->context.lock);
27449- }
27450+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27451+ addr = ktla_ktva(addr);
27452
27453 return addr;
27454 }
27455@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27456 unsigned char opcode[15];
27457 unsigned long addr = convert_ip_to_linear(child, regs);
27458
27459+ if (addr == -EINVAL)
27460+ return 0;
27461+
27462 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27463 for (i = 0; i < copied; i++) {
27464 switch (opcode[i]) {
27465diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27466new file mode 100644
27467index 0000000..5877189
27468--- /dev/null
27469+++ b/arch/x86/kernel/sys_i386_32.c
27470@@ -0,0 +1,189 @@
27471+/*
27472+ * This file contains various random system calls that
27473+ * have a non-standard calling sequence on the Linux/i386
27474+ * platform.
27475+ */
27476+
27477+#include <linux/errno.h>
27478+#include <linux/sched.h>
27479+#include <linux/mm.h>
27480+#include <linux/fs.h>
27481+#include <linux/smp.h>
27482+#include <linux/sem.h>
27483+#include <linux/msg.h>
27484+#include <linux/shm.h>
27485+#include <linux/stat.h>
27486+#include <linux/syscalls.h>
27487+#include <linux/mman.h>
27488+#include <linux/file.h>
27489+#include <linux/utsname.h>
27490+#include <linux/ipc.h>
27491+#include <linux/elf.h>
27492+
27493+#include <linux/uaccess.h>
27494+#include <linux/unistd.h>
27495+
27496+#include <asm/syscalls.h>
27497+
27498+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27499+{
27500+ unsigned long pax_task_size = TASK_SIZE;
27501+
27502+#ifdef CONFIG_PAX_SEGMEXEC
27503+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27504+ pax_task_size = SEGMEXEC_TASK_SIZE;
27505+#endif
27506+
27507+ if (flags & MAP_FIXED)
27508+ if (len > pax_task_size || addr > pax_task_size - len)
27509+ return -EINVAL;
27510+
27511+ return 0;
27512+}
27513+
27514+/*
27515+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27516+ */
27517+static unsigned long get_align_mask(void)
27518+{
27519+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27520+ return 0;
27521+
27522+ if (!(current->flags & PF_RANDOMIZE))
27523+ return 0;
27524+
27525+ return va_align.mask;
27526+}
27527+
27528+unsigned long
27529+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27530+ unsigned long len, unsigned long pgoff, unsigned long flags)
27531+{
27532+ struct mm_struct *mm = current->mm;
27533+ struct vm_area_struct *vma;
27534+ unsigned long pax_task_size = TASK_SIZE;
27535+ struct vm_unmapped_area_info info;
27536+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27537+
27538+#ifdef CONFIG_PAX_SEGMEXEC
27539+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27540+ pax_task_size = SEGMEXEC_TASK_SIZE;
27541+#endif
27542+
27543+ pax_task_size -= PAGE_SIZE;
27544+
27545+ if (len > pax_task_size)
27546+ return -ENOMEM;
27547+
27548+ if (flags & MAP_FIXED)
27549+ return addr;
27550+
27551+#ifdef CONFIG_PAX_RANDMMAP
27552+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27553+#endif
27554+
27555+ if (addr) {
27556+ addr = PAGE_ALIGN(addr);
27557+ if (pax_task_size - len >= addr) {
27558+ vma = find_vma(mm, addr);
27559+ if (check_heap_stack_gap(vma, addr, len, offset))
27560+ return addr;
27561+ }
27562+ }
27563+
27564+ info.flags = 0;
27565+ info.length = len;
27566+ info.align_mask = filp ? get_align_mask() : 0;
27567+ info.align_offset = pgoff << PAGE_SHIFT;
27568+ info.threadstack_offset = offset;
27569+
27570+#ifdef CONFIG_PAX_PAGEEXEC
27571+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27572+ info.low_limit = 0x00110000UL;
27573+ info.high_limit = mm->start_code;
27574+
27575+#ifdef CONFIG_PAX_RANDMMAP
27576+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27577+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27578+#endif
27579+
27580+ if (info.low_limit < info.high_limit) {
27581+ addr = vm_unmapped_area(&info);
27582+ if (!IS_ERR_VALUE(addr))
27583+ return addr;
27584+ }
27585+ } else
27586+#endif
27587+
27588+ info.low_limit = mm->mmap_base;
27589+ info.high_limit = pax_task_size;
27590+
27591+ return vm_unmapped_area(&info);
27592+}
27593+
27594+unsigned long
27595+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27596+ const unsigned long len, const unsigned long pgoff,
27597+ const unsigned long flags)
27598+{
27599+ struct vm_area_struct *vma;
27600+ struct mm_struct *mm = current->mm;
27601+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27602+ struct vm_unmapped_area_info info;
27603+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27604+
27605+#ifdef CONFIG_PAX_SEGMEXEC
27606+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27607+ pax_task_size = SEGMEXEC_TASK_SIZE;
27608+#endif
27609+
27610+ pax_task_size -= PAGE_SIZE;
27611+
27612+ /* requested length too big for entire address space */
27613+ if (len > pax_task_size)
27614+ return -ENOMEM;
27615+
27616+ if (flags & MAP_FIXED)
27617+ return addr;
27618+
27619+#ifdef CONFIG_PAX_PAGEEXEC
27620+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27621+ goto bottomup;
27622+#endif
27623+
27624+#ifdef CONFIG_PAX_RANDMMAP
27625+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27626+#endif
27627+
27628+ /* requesting a specific address */
27629+ if (addr) {
27630+ addr = PAGE_ALIGN(addr);
27631+ if (pax_task_size - len >= addr) {
27632+ vma = find_vma(mm, addr);
27633+ if (check_heap_stack_gap(vma, addr, len, offset))
27634+ return addr;
27635+ }
27636+ }
27637+
27638+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27639+ info.length = len;
27640+ info.low_limit = PAGE_SIZE;
27641+ info.high_limit = mm->mmap_base;
27642+ info.align_mask = filp ? get_align_mask() : 0;
27643+ info.align_offset = pgoff << PAGE_SHIFT;
27644+ info.threadstack_offset = offset;
27645+
27646+ addr = vm_unmapped_area(&info);
27647+ if (!(addr & ~PAGE_MASK))
27648+ return addr;
27649+ VM_BUG_ON(addr != -ENOMEM);
27650+
27651+bottomup:
27652+ /*
27653+ * A failed mmap() very likely causes application failure,
27654+ * so fall back to the bottom-up function here. This scenario
27655+ * can happen with large stack limits and large mmap()
27656+ * allocations.
27657+ */
27658+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27659+}
27660diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27661index 30277e2..5664a29 100644
27662--- a/arch/x86/kernel/sys_x86_64.c
27663+++ b/arch/x86/kernel/sys_x86_64.c
27664@@ -81,8 +81,8 @@ out:
27665 return error;
27666 }
27667
27668-static void find_start_end(unsigned long flags, unsigned long *begin,
27669- unsigned long *end)
27670+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27671+ unsigned long *begin, unsigned long *end)
27672 {
27673 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27674 unsigned long new_begin;
27675@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27676 *begin = new_begin;
27677 }
27678 } else {
27679- *begin = current->mm->mmap_legacy_base;
27680+ *begin = mm->mmap_legacy_base;
27681 *end = TASK_SIZE;
27682 }
27683 }
27684@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27685 struct vm_area_struct *vma;
27686 struct vm_unmapped_area_info info;
27687 unsigned long begin, end;
27688+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27689
27690 if (flags & MAP_FIXED)
27691 return addr;
27692
27693- find_start_end(flags, &begin, &end);
27694+ find_start_end(mm, flags, &begin, &end);
27695
27696 if (len > end)
27697 return -ENOMEM;
27698
27699+#ifdef CONFIG_PAX_RANDMMAP
27700+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27701+#endif
27702+
27703 if (addr) {
27704 addr = PAGE_ALIGN(addr);
27705 vma = find_vma(mm, addr);
27706- if (end - len >= addr &&
27707- (!vma || addr + len <= vma->vm_start))
27708+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27709 return addr;
27710 }
27711
27712@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27713 info.high_limit = end;
27714 info.align_mask = filp ? get_align_mask() : 0;
27715 info.align_offset = pgoff << PAGE_SHIFT;
27716+ info.threadstack_offset = offset;
27717 return vm_unmapped_area(&info);
27718 }
27719
27720@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27721 struct mm_struct *mm = current->mm;
27722 unsigned long addr = addr0;
27723 struct vm_unmapped_area_info info;
27724+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27725
27726 /* requested length too big for entire address space */
27727 if (len > TASK_SIZE)
27728@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27729 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27730 goto bottomup;
27731
27732+#ifdef CONFIG_PAX_RANDMMAP
27733+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27734+#endif
27735+
27736 /* requesting a specific address */
27737 if (addr) {
27738 addr = PAGE_ALIGN(addr);
27739 vma = find_vma(mm, addr);
27740- if (TASK_SIZE - len >= addr &&
27741- (!vma || addr + len <= vma->vm_start))
27742+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27743 return addr;
27744 }
27745
27746@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27747 info.high_limit = mm->mmap_base;
27748 info.align_mask = filp ? get_align_mask() : 0;
27749 info.align_offset = pgoff << PAGE_SHIFT;
27750+ info.threadstack_offset = offset;
27751 addr = vm_unmapped_area(&info);
27752 if (!(addr & ~PAGE_MASK))
27753 return addr;
27754diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27755index 91a4496..bb87552 100644
27756--- a/arch/x86/kernel/tboot.c
27757+++ b/arch/x86/kernel/tboot.c
27758@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27759
27760 void tboot_shutdown(u32 shutdown_type)
27761 {
27762- void (*shutdown)(void);
27763+ void (* __noreturn shutdown)(void);
27764
27765 if (!tboot_enabled())
27766 return;
27767@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27768
27769 switch_to_tboot_pt();
27770
27771- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27772+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27773 shutdown();
27774
27775 /* should not reach here */
27776@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27777 return -ENODEV;
27778 }
27779
27780-static atomic_t ap_wfs_count;
27781+static atomic_unchecked_t ap_wfs_count;
27782
27783 static int tboot_wait_for_aps(int num_aps)
27784 {
27785@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27786 {
27787 switch (action) {
27788 case CPU_DYING:
27789- atomic_inc(&ap_wfs_count);
27790+ atomic_inc_unchecked(&ap_wfs_count);
27791 if (num_online_cpus() == 1)
27792- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27793+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27794 return NOTIFY_BAD;
27795 break;
27796 }
27797@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27798
27799 tboot_create_trampoline();
27800
27801- atomic_set(&ap_wfs_count, 0);
27802+ atomic_set_unchecked(&ap_wfs_count, 0);
27803 register_hotcpu_notifier(&tboot_cpu_notifier);
27804
27805 #ifdef CONFIG_DEBUG_FS
27806diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27807index 25adc0e..1df4349 100644
27808--- a/arch/x86/kernel/time.c
27809+++ b/arch/x86/kernel/time.c
27810@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27811 {
27812 unsigned long pc = instruction_pointer(regs);
27813
27814- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27815+ if (!user_mode(regs) && in_lock_functions(pc)) {
27816 #ifdef CONFIG_FRAME_POINTER
27817- return *(unsigned long *)(regs->bp + sizeof(long));
27818+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27819 #else
27820 unsigned long *sp =
27821 (unsigned long *)kernel_stack_pointer(regs);
27822@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27823 * or above a saved flags. Eflags has bits 22-31 zero,
27824 * kernel addresses don't.
27825 */
27826+
27827+#ifdef CONFIG_PAX_KERNEXEC
27828+ return ktla_ktva(sp[0]);
27829+#else
27830 if (sp[0] >> 22)
27831 return sp[0];
27832 if (sp[1] >> 22)
27833 return sp[1];
27834 #endif
27835+
27836+#endif
27837 }
27838 return pc;
27839 }
27840diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27841index 7fc5e84..c6e445a 100644
27842--- a/arch/x86/kernel/tls.c
27843+++ b/arch/x86/kernel/tls.c
27844@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27845 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27846 return -EINVAL;
27847
27848+#ifdef CONFIG_PAX_SEGMEXEC
27849+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27850+ return -EINVAL;
27851+#endif
27852+
27853 set_tls_desc(p, idx, &info, 1);
27854
27855 return 0;
27856@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27857
27858 if (kbuf)
27859 info = kbuf;
27860- else if (__copy_from_user(infobuf, ubuf, count))
27861+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27862 return -EFAULT;
27863 else
27864 info = infobuf;
27865diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27866index 1c113db..287b42e 100644
27867--- a/arch/x86/kernel/tracepoint.c
27868+++ b/arch/x86/kernel/tracepoint.c
27869@@ -9,11 +9,11 @@
27870 #include <linux/atomic.h>
27871
27872 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27873-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27874+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27875 (unsigned long) trace_idt_table };
27876
27877 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27878-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27879+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27880
27881 static int trace_irq_vector_refcount;
27882 static DEFINE_MUTEX(irq_vector_mutex);
27883diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27884index 88900e2..aa4149d 100644
27885--- a/arch/x86/kernel/traps.c
27886+++ b/arch/x86/kernel/traps.c
27887@@ -68,7 +68,7 @@
27888 #include <asm/proto.h>
27889
27890 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27891-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27892+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27893 #else
27894 #include <asm/processor-flags.h>
27895 #include <asm/setup.h>
27896@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27897 #endif
27898
27899 /* Must be page-aligned because the real IDT is used in a fixmap. */
27900-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27901+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27902
27903 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27904 EXPORT_SYMBOL_GPL(used_vectors);
27905@@ -109,11 +109,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
27906 }
27907
27908 static nokprobe_inline int
27909-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27910+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27911 struct pt_regs *regs, long error_code)
27912 {
27913 #ifdef CONFIG_X86_32
27914- if (regs->flags & X86_VM_MASK) {
27915+ if (v8086_mode(regs)) {
27916 /*
27917 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27918 * On nmi (interrupt 2), do_trap should not be called.
27919@@ -126,12 +126,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27920 return -1;
27921 }
27922 #endif
27923- if (!user_mode(regs)) {
27924+ if (!user_mode_novm(regs)) {
27925 if (!fixup_exception(regs)) {
27926 tsk->thread.error_code = error_code;
27927 tsk->thread.trap_nr = trapnr;
27928+
27929+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27930+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27931+ str = "PAX: suspicious stack segment fault";
27932+#endif
27933+
27934 die(str, regs, error_code);
27935 }
27936+
27937+#ifdef CONFIG_PAX_REFCOUNT
27938+ if (trapnr == X86_TRAP_OF)
27939+ pax_report_refcount_overflow(regs);
27940+#endif
27941+
27942 return 0;
27943 }
27944
27945@@ -170,7 +182,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
27946 }
27947
27948 static void
27949-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27950+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27951 long error_code, siginfo_t *info)
27952 {
27953 struct task_struct *tsk = current;
27954@@ -194,7 +206,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27955 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27956 printk_ratelimit()) {
27957 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27958- tsk->comm, tsk->pid, str,
27959+ tsk->comm, task_pid_nr(tsk), str,
27960 regs->ip, regs->sp, error_code);
27961 print_vma_addr(" in ", regs->ip);
27962 pr_cont("\n");
27963@@ -274,6 +286,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27964 tsk->thread.error_code = error_code;
27965 tsk->thread.trap_nr = X86_TRAP_DF;
27966
27967+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27968+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
27969+ die("grsec: kernel stack overflow detected", regs, error_code);
27970+#endif
27971+
27972 #ifdef CONFIG_DOUBLEFAULT
27973 df_debug(regs, error_code);
27974 #endif
27975@@ -379,7 +396,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27976 conditional_sti(regs);
27977
27978 #ifdef CONFIG_X86_32
27979- if (regs->flags & X86_VM_MASK) {
27980+ if (v8086_mode(regs)) {
27981 local_irq_enable();
27982 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
27983 goto exit;
27984@@ -387,18 +404,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
27985 #endif
27986
27987 tsk = current;
27988- if (!user_mode(regs)) {
27989+ if (!user_mode_novm(regs)) {
27990 if (fixup_exception(regs))
27991 goto exit;
27992
27993 tsk->thread.error_code = error_code;
27994 tsk->thread.trap_nr = X86_TRAP_GP;
27995 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
27996- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
27997+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
27998+
27999+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28000+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28001+ die("PAX: suspicious general protection fault", regs, error_code);
28002+ else
28003+#endif
28004+
28005 die("general protection fault", regs, error_code);
28006+ }
28007 goto exit;
28008 }
28009
28010+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28011+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28012+ struct mm_struct *mm = tsk->mm;
28013+ unsigned long limit;
28014+
28015+ down_write(&mm->mmap_sem);
28016+ limit = mm->context.user_cs_limit;
28017+ if (limit < TASK_SIZE) {
28018+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28019+ up_write(&mm->mmap_sem);
28020+ return;
28021+ }
28022+ up_write(&mm->mmap_sem);
28023+ }
28024+#endif
28025+
28026 tsk->thread.error_code = error_code;
28027 tsk->thread.trap_nr = X86_TRAP_GP;
28028
28029@@ -510,13 +551,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28030 container_of(task_pt_regs(current),
28031 struct bad_iret_stack, regs);
28032
28033+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28034+ new_stack = s;
28035+
28036 /* Copy the IRET target to the new stack. */
28037 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28038
28039 /* Copy the remainder of the stack from the current stack. */
28040 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28041
28042- BUG_ON(!user_mode_vm(&new_stack->regs));
28043+ BUG_ON(!user_mode(&new_stack->regs));
28044 return new_stack;
28045 }
28046 NOKPROBE_SYMBOL(fixup_bad_iret);
28047@@ -602,7 +646,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28048 /* It's safe to allow irq's after DR6 has been saved */
28049 preempt_conditional_sti(regs);
28050
28051- if (regs->flags & X86_VM_MASK) {
28052+ if (v8086_mode(regs)) {
28053 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28054 X86_TRAP_DB);
28055 preempt_conditional_cli(regs);
28056@@ -617,7 +661,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28057 * We already checked v86 mode above, so we can check for kernel mode
28058 * by just checking the CPL of CS.
28059 */
28060- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28061+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28062 tsk->thread.debugreg6 &= ~DR_STEP;
28063 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28064 regs->flags &= ~X86_EFLAGS_TF;
28065@@ -650,7 +694,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28066 return;
28067 conditional_sti(regs);
28068
28069- if (!user_mode_vm(regs))
28070+ if (!user_mode(regs))
28071 {
28072 if (!fixup_exception(regs)) {
28073 task->thread.error_code = error_code;
28074diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28075index 5054497..139f8f8 100644
28076--- a/arch/x86/kernel/tsc.c
28077+++ b/arch/x86/kernel/tsc.c
28078@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28079 */
28080 smp_wmb();
28081
28082- ACCESS_ONCE(c2n->head) = data;
28083+ ACCESS_ONCE_RW(c2n->head) = data;
28084 }
28085
28086 /*
28087diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28088index 8b96a94..792b410 100644
28089--- a/arch/x86/kernel/uprobes.c
28090+++ b/arch/x86/kernel/uprobes.c
28091@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28092 int ret = NOTIFY_DONE;
28093
28094 /* We are only interested in userspace traps */
28095- if (regs && !user_mode_vm(regs))
28096+ if (regs && !user_mode(regs))
28097 return NOTIFY_DONE;
28098
28099 switch (val) {
28100@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28101
28102 if (nleft != rasize) {
28103 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28104- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28105+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28106
28107 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28108 }
28109diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28110index b9242ba..50c5edd 100644
28111--- a/arch/x86/kernel/verify_cpu.S
28112+++ b/arch/x86/kernel/verify_cpu.S
28113@@ -20,6 +20,7 @@
28114 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28115 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28116 * arch/x86/kernel/head_32.S: processor startup
28117+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28118 *
28119 * verify_cpu, returns the status of longmode and SSE in register %eax.
28120 * 0: Success 1: Failure
28121diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28122index e8edcf5..27f9344 100644
28123--- a/arch/x86/kernel/vm86_32.c
28124+++ b/arch/x86/kernel/vm86_32.c
28125@@ -44,6 +44,7 @@
28126 #include <linux/ptrace.h>
28127 #include <linux/audit.h>
28128 #include <linux/stddef.h>
28129+#include <linux/grsecurity.h>
28130
28131 #include <asm/uaccess.h>
28132 #include <asm/io.h>
28133@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28134 do_exit(SIGSEGV);
28135 }
28136
28137- tss = &per_cpu(init_tss, get_cpu());
28138+ tss = init_tss + get_cpu();
28139 current->thread.sp0 = current->thread.saved_sp0;
28140 current->thread.sysenter_cs = __KERNEL_CS;
28141 load_sp0(tss, &current->thread);
28142@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28143
28144 if (tsk->thread.saved_sp0)
28145 return -EPERM;
28146+
28147+#ifdef CONFIG_GRKERNSEC_VM86
28148+ if (!capable(CAP_SYS_RAWIO)) {
28149+ gr_handle_vm86();
28150+ return -EPERM;
28151+ }
28152+#endif
28153+
28154 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28155 offsetof(struct kernel_vm86_struct, vm86plus) -
28156 sizeof(info.regs));
28157@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28158 int tmp;
28159 struct vm86plus_struct __user *v86;
28160
28161+#ifdef CONFIG_GRKERNSEC_VM86
28162+ if (!capable(CAP_SYS_RAWIO)) {
28163+ gr_handle_vm86();
28164+ return -EPERM;
28165+ }
28166+#endif
28167+
28168 tsk = current;
28169 switch (cmd) {
28170 case VM86_REQUEST_IRQ:
28171@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28172 tsk->thread.saved_fs = info->regs32->fs;
28173 tsk->thread.saved_gs = get_user_gs(info->regs32);
28174
28175- tss = &per_cpu(init_tss, get_cpu());
28176+ tss = init_tss + get_cpu();
28177 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28178 if (cpu_has_sep)
28179 tsk->thread.sysenter_cs = 0;
28180@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28181 goto cannot_handle;
28182 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28183 goto cannot_handle;
28184- intr_ptr = (unsigned long __user *) (i << 2);
28185+ intr_ptr = (__force unsigned long __user *) (i << 2);
28186 if (get_user(segoffs, intr_ptr))
28187 goto cannot_handle;
28188 if ((segoffs >> 16) == BIOSSEG)
28189diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28190index 00bf300..129df8e 100644
28191--- a/arch/x86/kernel/vmlinux.lds.S
28192+++ b/arch/x86/kernel/vmlinux.lds.S
28193@@ -26,6 +26,13 @@
28194 #include <asm/page_types.h>
28195 #include <asm/cache.h>
28196 #include <asm/boot.h>
28197+#include <asm/segment.h>
28198+
28199+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28200+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28201+#else
28202+#define __KERNEL_TEXT_OFFSET 0
28203+#endif
28204
28205 #undef i386 /* in case the preprocessor is a 32bit one */
28206
28207@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28208
28209 PHDRS {
28210 text PT_LOAD FLAGS(5); /* R_E */
28211+#ifdef CONFIG_X86_32
28212+ module PT_LOAD FLAGS(5); /* R_E */
28213+#endif
28214+#ifdef CONFIG_XEN
28215+ rodata PT_LOAD FLAGS(5); /* R_E */
28216+#else
28217+ rodata PT_LOAD FLAGS(4); /* R__ */
28218+#endif
28219 data PT_LOAD FLAGS(6); /* RW_ */
28220-#ifdef CONFIG_X86_64
28221+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28222 #ifdef CONFIG_SMP
28223 percpu PT_LOAD FLAGS(6); /* RW_ */
28224 #endif
28225+ text.init PT_LOAD FLAGS(5); /* R_E */
28226+ text.exit PT_LOAD FLAGS(5); /* R_E */
28227 init PT_LOAD FLAGS(7); /* RWE */
28228-#endif
28229 note PT_NOTE FLAGS(0); /* ___ */
28230 }
28231
28232 SECTIONS
28233 {
28234 #ifdef CONFIG_X86_32
28235- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28236- phys_startup_32 = startup_32 - LOAD_OFFSET;
28237+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28238 #else
28239- . = __START_KERNEL;
28240- phys_startup_64 = startup_64 - LOAD_OFFSET;
28241+ . = __START_KERNEL;
28242 #endif
28243
28244 /* Text and read-only data */
28245- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28246- _text = .;
28247+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28248 /* bootstrapping code */
28249+#ifdef CONFIG_X86_32
28250+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28251+#else
28252+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28253+#endif
28254+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28255+ _text = .;
28256 HEAD_TEXT
28257 . = ALIGN(8);
28258 _stext = .;
28259@@ -104,13 +124,47 @@ SECTIONS
28260 IRQENTRY_TEXT
28261 *(.fixup)
28262 *(.gnu.warning)
28263- /* End of text section */
28264- _etext = .;
28265 } :text = 0x9090
28266
28267- NOTES :text :note
28268+ . += __KERNEL_TEXT_OFFSET;
28269
28270- EXCEPTION_TABLE(16) :text = 0x9090
28271+#ifdef CONFIG_X86_32
28272+ . = ALIGN(PAGE_SIZE);
28273+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28274+
28275+#ifdef CONFIG_PAX_KERNEXEC
28276+ MODULES_EXEC_VADDR = .;
28277+ BYTE(0)
28278+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28279+ . = ALIGN(HPAGE_SIZE) - 1;
28280+ MODULES_EXEC_END = .;
28281+#endif
28282+
28283+ } :module
28284+#endif
28285+
28286+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28287+ /* End of text section */
28288+ BYTE(0)
28289+ _etext = . - __KERNEL_TEXT_OFFSET;
28290+ }
28291+
28292+#ifdef CONFIG_X86_32
28293+ . = ALIGN(PAGE_SIZE);
28294+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28295+ . = ALIGN(PAGE_SIZE);
28296+ *(.empty_zero_page)
28297+ *(.initial_pg_fixmap)
28298+ *(.initial_pg_pmd)
28299+ *(.initial_page_table)
28300+ *(.swapper_pg_dir)
28301+ } :rodata
28302+#endif
28303+
28304+ . = ALIGN(PAGE_SIZE);
28305+ NOTES :rodata :note
28306+
28307+ EXCEPTION_TABLE(16) :rodata
28308
28309 #if defined(CONFIG_DEBUG_RODATA)
28310 /* .text should occupy whole number of pages */
28311@@ -122,16 +176,20 @@ SECTIONS
28312
28313 /* Data */
28314 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28315+
28316+#ifdef CONFIG_PAX_KERNEXEC
28317+ . = ALIGN(HPAGE_SIZE);
28318+#else
28319+ . = ALIGN(PAGE_SIZE);
28320+#endif
28321+
28322 /* Start of data section */
28323 _sdata = .;
28324
28325 /* init_task */
28326 INIT_TASK_DATA(THREAD_SIZE)
28327
28328-#ifdef CONFIG_X86_32
28329- /* 32 bit has nosave before _edata */
28330 NOSAVE_DATA
28331-#endif
28332
28333 PAGE_ALIGNED_DATA(PAGE_SIZE)
28334
28335@@ -174,12 +232,19 @@ SECTIONS
28336 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28337
28338 /* Init code and data - will be freed after init */
28339- . = ALIGN(PAGE_SIZE);
28340 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28341+ BYTE(0)
28342+
28343+#ifdef CONFIG_PAX_KERNEXEC
28344+ . = ALIGN(HPAGE_SIZE);
28345+#else
28346+ . = ALIGN(PAGE_SIZE);
28347+#endif
28348+
28349 __init_begin = .; /* paired with __init_end */
28350- }
28351+ } :init.begin
28352
28353-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28354+#ifdef CONFIG_SMP
28355 /*
28356 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28357 * output PHDR, so the next output section - .init.text - should
28358@@ -190,12 +255,27 @@ SECTIONS
28359 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28360 #endif
28361
28362- INIT_TEXT_SECTION(PAGE_SIZE)
28363-#ifdef CONFIG_X86_64
28364- :init
28365-#endif
28366+ . = ALIGN(PAGE_SIZE);
28367+ init_begin = .;
28368+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28369+ VMLINUX_SYMBOL(_sinittext) = .;
28370+ INIT_TEXT
28371+ . = ALIGN(PAGE_SIZE);
28372+ } :text.init
28373
28374- INIT_DATA_SECTION(16)
28375+ /*
28376+ * .exit.text is discard at runtime, not link time, to deal with
28377+ * references from .altinstructions and .eh_frame
28378+ */
28379+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28380+ EXIT_TEXT
28381+ VMLINUX_SYMBOL(_einittext) = .;
28382+ . = ALIGN(16);
28383+ } :text.exit
28384+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28385+
28386+ . = ALIGN(PAGE_SIZE);
28387+ INIT_DATA_SECTION(16) :init
28388
28389 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28390 __x86_cpu_dev_start = .;
28391@@ -266,19 +346,12 @@ SECTIONS
28392 }
28393
28394 . = ALIGN(8);
28395- /*
28396- * .exit.text is discard at runtime, not link time, to deal with
28397- * references from .altinstructions and .eh_frame
28398- */
28399- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28400- EXIT_TEXT
28401- }
28402
28403 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28404 EXIT_DATA
28405 }
28406
28407-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28408+#ifndef CONFIG_SMP
28409 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28410 #endif
28411
28412@@ -297,16 +370,10 @@ SECTIONS
28413 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28414 __smp_locks = .;
28415 *(.smp_locks)
28416- . = ALIGN(PAGE_SIZE);
28417 __smp_locks_end = .;
28418+ . = ALIGN(PAGE_SIZE);
28419 }
28420
28421-#ifdef CONFIG_X86_64
28422- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28423- NOSAVE_DATA
28424- }
28425-#endif
28426-
28427 /* BSS */
28428 . = ALIGN(PAGE_SIZE);
28429 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28430@@ -322,6 +389,7 @@ SECTIONS
28431 __brk_base = .;
28432 . += 64 * 1024; /* 64k alignment slop space */
28433 *(.brk_reservation) /* areas brk users have reserved */
28434+ . = ALIGN(HPAGE_SIZE);
28435 __brk_limit = .;
28436 }
28437
28438@@ -348,13 +416,12 @@ SECTIONS
28439 * for the boot processor.
28440 */
28441 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28442-INIT_PER_CPU(gdt_page);
28443 INIT_PER_CPU(irq_stack_union);
28444
28445 /*
28446 * Build-time check on the image size:
28447 */
28448-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28449+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28450 "kernel image bigger than KERNEL_IMAGE_SIZE");
28451
28452 #ifdef CONFIG_SMP
28453diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28454index 2dcc6ff..082dc7a 100644
28455--- a/arch/x86/kernel/vsyscall_64.c
28456+++ b/arch/x86/kernel/vsyscall_64.c
28457@@ -38,15 +38,13 @@
28458 #define CREATE_TRACE_POINTS
28459 #include "vsyscall_trace.h"
28460
28461-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28462+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28463
28464 static int __init vsyscall_setup(char *str)
28465 {
28466 if (str) {
28467 if (!strcmp("emulate", str))
28468 vsyscall_mode = EMULATE;
28469- else if (!strcmp("native", str))
28470- vsyscall_mode = NATIVE;
28471 else if (!strcmp("none", str))
28472 vsyscall_mode = NONE;
28473 else
28474@@ -264,8 +262,7 @@ do_ret:
28475 return true;
28476
28477 sigsegv:
28478- force_sig(SIGSEGV, current);
28479- return true;
28480+ do_group_exit(SIGKILL);
28481 }
28482
28483 /*
28484@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28485 static struct vm_area_struct gate_vma = {
28486 .vm_start = VSYSCALL_ADDR,
28487 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28488- .vm_page_prot = PAGE_READONLY_EXEC,
28489- .vm_flags = VM_READ | VM_EXEC,
28490+ .vm_page_prot = PAGE_READONLY,
28491+ .vm_flags = VM_READ,
28492 .vm_ops = &gate_vma_ops,
28493 };
28494
28495@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28496 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28497
28498 if (vsyscall_mode != NONE)
28499- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28500- vsyscall_mode == NATIVE
28501- ? PAGE_KERNEL_VSYSCALL
28502- : PAGE_KERNEL_VVAR);
28503+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28504
28505 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28506 (unsigned long)VSYSCALL_ADDR);
28507diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28508index 04068192..4d75aa6 100644
28509--- a/arch/x86/kernel/x8664_ksyms_64.c
28510+++ b/arch/x86/kernel/x8664_ksyms_64.c
28511@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28512 EXPORT_SYMBOL(copy_user_generic_unrolled);
28513 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28514 EXPORT_SYMBOL(__copy_user_nocache);
28515-EXPORT_SYMBOL(_copy_from_user);
28516-EXPORT_SYMBOL(_copy_to_user);
28517
28518 EXPORT_SYMBOL(copy_page);
28519 EXPORT_SYMBOL(clear_page);
28520@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28521 EXPORT_SYMBOL(___preempt_schedule_context);
28522 #endif
28523 #endif
28524+
28525+#ifdef CONFIG_PAX_PER_CPU_PGD
28526+EXPORT_SYMBOL(cpu_pgd);
28527+#endif
28528diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28529index 234b072..b7ab191 100644
28530--- a/arch/x86/kernel/x86_init.c
28531+++ b/arch/x86/kernel/x86_init.c
28532@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28533 static void default_nmi_init(void) { };
28534 static int default_i8042_detect(void) { return 1; };
28535
28536-struct x86_platform_ops x86_platform = {
28537+struct x86_platform_ops x86_platform __read_only = {
28538 .calibrate_tsc = native_calibrate_tsc,
28539 .get_wallclock = mach_get_cmos_time,
28540 .set_wallclock = mach_set_rtc_mmss,
28541@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28542 EXPORT_SYMBOL_GPL(x86_platform);
28543
28544 #if defined(CONFIG_PCI_MSI)
28545-struct x86_msi_ops x86_msi = {
28546+struct x86_msi_ops x86_msi __read_only = {
28547 .setup_msi_irqs = native_setup_msi_irqs,
28548 .compose_msi_msg = native_compose_msi_msg,
28549 .teardown_msi_irq = native_teardown_msi_irq,
28550@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28551 }
28552 #endif
28553
28554-struct x86_io_apic_ops x86_io_apic_ops = {
28555+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28556 .init = native_io_apic_init_mappings,
28557 .read = native_io_apic_read,
28558 .write = native_io_apic_write,
28559diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28560index 0de1fae..298d037 100644
28561--- a/arch/x86/kernel/xsave.c
28562+++ b/arch/x86/kernel/xsave.c
28563@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28564
28565 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28566 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28567- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28568+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28569
28570 if (!use_xsave())
28571 return err;
28572
28573- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28574+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28575
28576 /*
28577 * Read the xstate_bv which we copied (directly from the cpu or
28578 * from the state in task struct) to the user buffers.
28579 */
28580- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28581+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28582
28583 /*
28584 * For legacy compatible, we always set FP/SSE bits in the bit
28585@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28586 */
28587 xstate_bv |= XSTATE_FPSSE;
28588
28589- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28590+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28591
28592 return err;
28593 }
28594@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28595 {
28596 int err;
28597
28598+ buf = (struct xsave_struct __user *)____m(buf);
28599 if (use_xsave())
28600 err = xsave_user(buf);
28601 else if (use_fxsr())
28602@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28603 */
28604 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28605 {
28606+ buf = (void __user *)____m(buf);
28607 if (use_xsave()) {
28608 if ((unsigned long)buf % 64 || fx_only) {
28609 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28610diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28611index 8a80737..bac4961 100644
28612--- a/arch/x86/kvm/cpuid.c
28613+++ b/arch/x86/kvm/cpuid.c
28614@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28615 struct kvm_cpuid2 *cpuid,
28616 struct kvm_cpuid_entry2 __user *entries)
28617 {
28618- int r;
28619+ int r, i;
28620
28621 r = -E2BIG;
28622 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28623 goto out;
28624 r = -EFAULT;
28625- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28626- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28627+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28628 goto out;
28629+ for (i = 0; i < cpuid->nent; ++i) {
28630+ struct kvm_cpuid_entry2 cpuid_entry;
28631+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28632+ goto out;
28633+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28634+ }
28635 vcpu->arch.cpuid_nent = cpuid->nent;
28636 kvm_apic_set_version(vcpu);
28637 kvm_x86_ops->cpuid_update(vcpu);
28638@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28639 struct kvm_cpuid2 *cpuid,
28640 struct kvm_cpuid_entry2 __user *entries)
28641 {
28642- int r;
28643+ int r, i;
28644
28645 r = -E2BIG;
28646 if (cpuid->nent < vcpu->arch.cpuid_nent)
28647 goto out;
28648 r = -EFAULT;
28649- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28650- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28651+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28652 goto out;
28653+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28654+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28655+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28656+ goto out;
28657+ }
28658 return 0;
28659
28660 out:
28661diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28662index b24c2d8..e1e4e25 100644
28663--- a/arch/x86/kvm/emulate.c
28664+++ b/arch/x86/kvm/emulate.c
28665@@ -3503,7 +3503,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28666 int cr = ctxt->modrm_reg;
28667 u64 efer = 0;
28668
28669- static u64 cr_reserved_bits[] = {
28670+ static const u64 cr_reserved_bits[] = {
28671 0xffffffff00000000ULL,
28672 0, 0, 0, /* CR3 checked later */
28673 CR4_RESERVED_BITS,
28674diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28675index d52dcf0..cec7e84 100644
28676--- a/arch/x86/kvm/lapic.c
28677+++ b/arch/x86/kvm/lapic.c
28678@@ -55,7 +55,7 @@
28679 #define APIC_BUS_CYCLE_NS 1
28680
28681 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28682-#define apic_debug(fmt, arg...)
28683+#define apic_debug(fmt, arg...) do {} while (0)
28684
28685 #define APIC_LVT_NUM 6
28686 /* 14 is the version for Xeon and Pentium 8.4.8*/
28687diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28688index fd49c86..77e1aa0 100644
28689--- a/arch/x86/kvm/paging_tmpl.h
28690+++ b/arch/x86/kvm/paging_tmpl.h
28691@@ -343,7 +343,7 @@ retry_walk:
28692 if (unlikely(kvm_is_error_hva(host_addr)))
28693 goto error;
28694
28695- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28696+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28697 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28698 goto error;
28699 walker->ptep_user[walker->level - 1] = ptep_user;
28700diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28701index 41dd038..de331cf 100644
28702--- a/arch/x86/kvm/svm.c
28703+++ b/arch/x86/kvm/svm.c
28704@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28705 int cpu = raw_smp_processor_id();
28706
28707 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28708+
28709+ pax_open_kernel();
28710 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28711+ pax_close_kernel();
28712+
28713 load_TR_desc();
28714 }
28715
28716@@ -3969,6 +3973,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28717 #endif
28718 #endif
28719
28720+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28721+ __set_fs(current_thread_info()->addr_limit);
28722+#endif
28723+
28724 reload_tss(vcpu);
28725
28726 local_irq_disable();
28727diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28728index d4c58d8..eaf2568 100644
28729--- a/arch/x86/kvm/vmx.c
28730+++ b/arch/x86/kvm/vmx.c
28731@@ -1380,12 +1380,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28732 #endif
28733 }
28734
28735-static void vmcs_clear_bits(unsigned long field, u32 mask)
28736+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28737 {
28738 vmcs_writel(field, vmcs_readl(field) & ~mask);
28739 }
28740
28741-static void vmcs_set_bits(unsigned long field, u32 mask)
28742+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28743 {
28744 vmcs_writel(field, vmcs_readl(field) | mask);
28745 }
28746@@ -1645,7 +1645,11 @@ static void reload_tss(void)
28747 struct desc_struct *descs;
28748
28749 descs = (void *)gdt->address;
28750+
28751+ pax_open_kernel();
28752 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28753+ pax_close_kernel();
28754+
28755 load_TR_desc();
28756 }
28757
28758@@ -1881,6 +1885,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28759 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28760 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28761
28762+#ifdef CONFIG_PAX_PER_CPU_PGD
28763+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28764+#endif
28765+
28766 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28767 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28768 vmx->loaded_vmcs->cpu = cpu;
28769@@ -2170,7 +2178,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28770 * reads and returns guest's timestamp counter "register"
28771 * guest_tsc = host_tsc + tsc_offset -- 21.3
28772 */
28773-static u64 guest_read_tsc(void)
28774+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28775 {
28776 u64 host_tsc, tsc_offset;
28777
28778@@ -4252,7 +4260,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28779 unsigned long cr4;
28780
28781 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28782+
28783+#ifndef CONFIG_PAX_PER_CPU_PGD
28784 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28785+#endif
28786
28787 /* Save the most likely value for this task's CR4 in the VMCS. */
28788 cr4 = read_cr4();
28789@@ -4279,7 +4290,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28790 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28791 vmx->host_idt_base = dt.address;
28792
28793- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28794+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28795
28796 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28797 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28798@@ -5876,11 +5887,16 @@ static __init int hardware_setup(void)
28799 * page upon invalidation. No need to do anything if the
28800 * processor does not have the APIC_ACCESS_ADDR VMCS field.
28801 */
28802- kvm_x86_ops->set_apic_access_page_addr = NULL;
28803+ pax_open_kernel();
28804+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28805+ pax_close_kernel();
28806 }
28807
28808- if (!cpu_has_vmx_tpr_shadow())
28809- kvm_x86_ops->update_cr8_intercept = NULL;
28810+ if (!cpu_has_vmx_tpr_shadow()) {
28811+ pax_open_kernel();
28812+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28813+ pax_close_kernel();
28814+ }
28815
28816 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28817 kvm_disable_largepages();
28818@@ -5891,13 +5907,15 @@ static __init int hardware_setup(void)
28819 if (!cpu_has_vmx_apicv())
28820 enable_apicv = 0;
28821
28822+ pax_open_kernel();
28823 if (enable_apicv)
28824- kvm_x86_ops->update_cr8_intercept = NULL;
28825+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28826 else {
28827- kvm_x86_ops->hwapic_irr_update = NULL;
28828- kvm_x86_ops->deliver_posted_interrupt = NULL;
28829- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28830+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28831+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28832+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28833 }
28834+ pax_close_kernel();
28835
28836 if (nested)
28837 nested_vmx_setup_ctls_msrs();
28838@@ -7846,6 +7864,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28839 "jmp 2f \n\t"
28840 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28841 "2: "
28842+
28843+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28844+ "ljmp %[cs],$3f\n\t"
28845+ "3: "
28846+#endif
28847+
28848 /* Save guest registers, load host registers, keep flags */
28849 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28850 "pop %0 \n\t"
28851@@ -7898,6 +7922,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28852 #endif
28853 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28854 [wordsize]"i"(sizeof(ulong))
28855+
28856+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28857+ ,[cs]"i"(__KERNEL_CS)
28858+#endif
28859+
28860 : "cc", "memory"
28861 #ifdef CONFIG_X86_64
28862 , "rax", "rbx", "rdi", "rsi"
28863@@ -7911,7 +7940,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28864 if (debugctlmsr)
28865 update_debugctlmsr(debugctlmsr);
28866
28867-#ifndef CONFIG_X86_64
28868+#ifdef CONFIG_X86_32
28869 /*
28870 * The sysexit path does not restore ds/es, so we must set them to
28871 * a reasonable value ourselves.
28872@@ -7920,8 +7949,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28873 * may be executed in interrupt context, which saves and restore segments
28874 * around it, nullifying its effect.
28875 */
28876- loadsegment(ds, __USER_DS);
28877- loadsegment(es, __USER_DS);
28878+ loadsegment(ds, __KERNEL_DS);
28879+ loadsegment(es, __KERNEL_DS);
28880+ loadsegment(ss, __KERNEL_DS);
28881+
28882+#ifdef CONFIG_PAX_KERNEXEC
28883+ loadsegment(fs, __KERNEL_PERCPU);
28884+#endif
28885+
28886+#ifdef CONFIG_PAX_MEMORY_UDEREF
28887+ __set_fs(current_thread_info()->addr_limit);
28888+#endif
28889+
28890 #endif
28891
28892 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28893diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28894index c259814..9a0345b 100644
28895--- a/arch/x86/kvm/x86.c
28896+++ b/arch/x86/kvm/x86.c
28897@@ -1882,8 +1882,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28898 {
28899 struct kvm *kvm = vcpu->kvm;
28900 int lm = is_long_mode(vcpu);
28901- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28902- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28903+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28904+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28905 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28906 : kvm->arch.xen_hvm_config.blob_size_32;
28907 u32 page_num = data & ~PAGE_MASK;
28908@@ -2810,6 +2810,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28909 if (n < msr_list.nmsrs)
28910 goto out;
28911 r = -EFAULT;
28912+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28913+ goto out;
28914 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28915 num_msrs_to_save * sizeof(u32)))
28916 goto out;
28917@@ -5746,7 +5748,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28918 };
28919 #endif
28920
28921-int kvm_arch_init(void *opaque)
28922+int kvm_arch_init(const void *opaque)
28923 {
28924 int r;
28925 struct kvm_x86_ops *ops = opaque;
28926diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28927index c1c1544..f90c9d5 100644
28928--- a/arch/x86/lguest/boot.c
28929+++ b/arch/x86/lguest/boot.c
28930@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28931 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28932 * Launcher to reboot us.
28933 */
28934-static void lguest_restart(char *reason)
28935+static __noreturn void lguest_restart(char *reason)
28936 {
28937 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28938+ BUG();
28939 }
28940
28941 /*G:050
28942diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28943index 00933d5..3a64af9 100644
28944--- a/arch/x86/lib/atomic64_386_32.S
28945+++ b/arch/x86/lib/atomic64_386_32.S
28946@@ -48,6 +48,10 @@ BEGIN(read)
28947 movl (v), %eax
28948 movl 4(v), %edx
28949 RET_ENDP
28950+BEGIN(read_unchecked)
28951+ movl (v), %eax
28952+ movl 4(v), %edx
28953+RET_ENDP
28954 #undef v
28955
28956 #define v %esi
28957@@ -55,6 +59,10 @@ BEGIN(set)
28958 movl %ebx, (v)
28959 movl %ecx, 4(v)
28960 RET_ENDP
28961+BEGIN(set_unchecked)
28962+ movl %ebx, (v)
28963+ movl %ecx, 4(v)
28964+RET_ENDP
28965 #undef v
28966
28967 #define v %esi
28968@@ -70,6 +78,20 @@ RET_ENDP
28969 BEGIN(add)
28970 addl %eax, (v)
28971 adcl %edx, 4(v)
28972+
28973+#ifdef CONFIG_PAX_REFCOUNT
28974+ jno 0f
28975+ subl %eax, (v)
28976+ sbbl %edx, 4(v)
28977+ int $4
28978+0:
28979+ _ASM_EXTABLE(0b, 0b)
28980+#endif
28981+
28982+RET_ENDP
28983+BEGIN(add_unchecked)
28984+ addl %eax, (v)
28985+ adcl %edx, 4(v)
28986 RET_ENDP
28987 #undef v
28988
28989@@ -77,6 +99,24 @@ RET_ENDP
28990 BEGIN(add_return)
28991 addl (v), %eax
28992 adcl 4(v), %edx
28993+
28994+#ifdef CONFIG_PAX_REFCOUNT
28995+ into
28996+1234:
28997+ _ASM_EXTABLE(1234b, 2f)
28998+#endif
28999+
29000+ movl %eax, (v)
29001+ movl %edx, 4(v)
29002+
29003+#ifdef CONFIG_PAX_REFCOUNT
29004+2:
29005+#endif
29006+
29007+RET_ENDP
29008+BEGIN(add_return_unchecked)
29009+ addl (v), %eax
29010+ adcl 4(v), %edx
29011 movl %eax, (v)
29012 movl %edx, 4(v)
29013 RET_ENDP
29014@@ -86,6 +126,20 @@ RET_ENDP
29015 BEGIN(sub)
29016 subl %eax, (v)
29017 sbbl %edx, 4(v)
29018+
29019+#ifdef CONFIG_PAX_REFCOUNT
29020+ jno 0f
29021+ addl %eax, (v)
29022+ adcl %edx, 4(v)
29023+ int $4
29024+0:
29025+ _ASM_EXTABLE(0b, 0b)
29026+#endif
29027+
29028+RET_ENDP
29029+BEGIN(sub_unchecked)
29030+ subl %eax, (v)
29031+ sbbl %edx, 4(v)
29032 RET_ENDP
29033 #undef v
29034
29035@@ -96,6 +150,27 @@ BEGIN(sub_return)
29036 sbbl $0, %edx
29037 addl (v), %eax
29038 adcl 4(v), %edx
29039+
29040+#ifdef CONFIG_PAX_REFCOUNT
29041+ into
29042+1234:
29043+ _ASM_EXTABLE(1234b, 2f)
29044+#endif
29045+
29046+ movl %eax, (v)
29047+ movl %edx, 4(v)
29048+
29049+#ifdef CONFIG_PAX_REFCOUNT
29050+2:
29051+#endif
29052+
29053+RET_ENDP
29054+BEGIN(sub_return_unchecked)
29055+ negl %edx
29056+ negl %eax
29057+ sbbl $0, %edx
29058+ addl (v), %eax
29059+ adcl 4(v), %edx
29060 movl %eax, (v)
29061 movl %edx, 4(v)
29062 RET_ENDP
29063@@ -105,6 +180,20 @@ RET_ENDP
29064 BEGIN(inc)
29065 addl $1, (v)
29066 adcl $0, 4(v)
29067+
29068+#ifdef CONFIG_PAX_REFCOUNT
29069+ jno 0f
29070+ subl $1, (v)
29071+ sbbl $0, 4(v)
29072+ int $4
29073+0:
29074+ _ASM_EXTABLE(0b, 0b)
29075+#endif
29076+
29077+RET_ENDP
29078+BEGIN(inc_unchecked)
29079+ addl $1, (v)
29080+ adcl $0, 4(v)
29081 RET_ENDP
29082 #undef v
29083
29084@@ -114,6 +203,26 @@ BEGIN(inc_return)
29085 movl 4(v), %edx
29086 addl $1, %eax
29087 adcl $0, %edx
29088+
29089+#ifdef CONFIG_PAX_REFCOUNT
29090+ into
29091+1234:
29092+ _ASM_EXTABLE(1234b, 2f)
29093+#endif
29094+
29095+ movl %eax, (v)
29096+ movl %edx, 4(v)
29097+
29098+#ifdef CONFIG_PAX_REFCOUNT
29099+2:
29100+#endif
29101+
29102+RET_ENDP
29103+BEGIN(inc_return_unchecked)
29104+ movl (v), %eax
29105+ movl 4(v), %edx
29106+ addl $1, %eax
29107+ adcl $0, %edx
29108 movl %eax, (v)
29109 movl %edx, 4(v)
29110 RET_ENDP
29111@@ -123,6 +232,20 @@ RET_ENDP
29112 BEGIN(dec)
29113 subl $1, (v)
29114 sbbl $0, 4(v)
29115+
29116+#ifdef CONFIG_PAX_REFCOUNT
29117+ jno 0f
29118+ addl $1, (v)
29119+ adcl $0, 4(v)
29120+ int $4
29121+0:
29122+ _ASM_EXTABLE(0b, 0b)
29123+#endif
29124+
29125+RET_ENDP
29126+BEGIN(dec_unchecked)
29127+ subl $1, (v)
29128+ sbbl $0, 4(v)
29129 RET_ENDP
29130 #undef v
29131
29132@@ -132,6 +255,26 @@ BEGIN(dec_return)
29133 movl 4(v), %edx
29134 subl $1, %eax
29135 sbbl $0, %edx
29136+
29137+#ifdef CONFIG_PAX_REFCOUNT
29138+ into
29139+1234:
29140+ _ASM_EXTABLE(1234b, 2f)
29141+#endif
29142+
29143+ movl %eax, (v)
29144+ movl %edx, 4(v)
29145+
29146+#ifdef CONFIG_PAX_REFCOUNT
29147+2:
29148+#endif
29149+
29150+RET_ENDP
29151+BEGIN(dec_return_unchecked)
29152+ movl (v), %eax
29153+ movl 4(v), %edx
29154+ subl $1, %eax
29155+ sbbl $0, %edx
29156 movl %eax, (v)
29157 movl %edx, 4(v)
29158 RET_ENDP
29159@@ -143,6 +286,13 @@ BEGIN(add_unless)
29160 adcl %edx, %edi
29161 addl (v), %eax
29162 adcl 4(v), %edx
29163+
29164+#ifdef CONFIG_PAX_REFCOUNT
29165+ into
29166+1234:
29167+ _ASM_EXTABLE(1234b, 2f)
29168+#endif
29169+
29170 cmpl %eax, %ecx
29171 je 3f
29172 1:
29173@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29174 1:
29175 addl $1, %eax
29176 adcl $0, %edx
29177+
29178+#ifdef CONFIG_PAX_REFCOUNT
29179+ into
29180+1234:
29181+ _ASM_EXTABLE(1234b, 2f)
29182+#endif
29183+
29184 movl %eax, (v)
29185 movl %edx, 4(v)
29186 movl $1, %eax
29187@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29188 movl 4(v), %edx
29189 subl $1, %eax
29190 sbbl $0, %edx
29191+
29192+#ifdef CONFIG_PAX_REFCOUNT
29193+ into
29194+1234:
29195+ _ASM_EXTABLE(1234b, 1f)
29196+#endif
29197+
29198 js 1f
29199 movl %eax, (v)
29200 movl %edx, 4(v)
29201diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29202index f5cc9eb..51fa319 100644
29203--- a/arch/x86/lib/atomic64_cx8_32.S
29204+++ b/arch/x86/lib/atomic64_cx8_32.S
29205@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29206 CFI_STARTPROC
29207
29208 read64 %ecx
29209+ pax_force_retaddr
29210 ret
29211 CFI_ENDPROC
29212 ENDPROC(atomic64_read_cx8)
29213
29214+ENTRY(atomic64_read_unchecked_cx8)
29215+ CFI_STARTPROC
29216+
29217+ read64 %ecx
29218+ pax_force_retaddr
29219+ ret
29220+ CFI_ENDPROC
29221+ENDPROC(atomic64_read_unchecked_cx8)
29222+
29223 ENTRY(atomic64_set_cx8)
29224 CFI_STARTPROC
29225
29226@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29227 cmpxchg8b (%esi)
29228 jne 1b
29229
29230+ pax_force_retaddr
29231 ret
29232 CFI_ENDPROC
29233 ENDPROC(atomic64_set_cx8)
29234
29235+ENTRY(atomic64_set_unchecked_cx8)
29236+ CFI_STARTPROC
29237+
29238+1:
29239+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29240+ * are atomic on 586 and newer */
29241+ cmpxchg8b (%esi)
29242+ jne 1b
29243+
29244+ pax_force_retaddr
29245+ ret
29246+ CFI_ENDPROC
29247+ENDPROC(atomic64_set_unchecked_cx8)
29248+
29249 ENTRY(atomic64_xchg_cx8)
29250 CFI_STARTPROC
29251
29252@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29253 cmpxchg8b (%esi)
29254 jne 1b
29255
29256+ pax_force_retaddr
29257 ret
29258 CFI_ENDPROC
29259 ENDPROC(atomic64_xchg_cx8)
29260
29261-.macro addsub_return func ins insc
29262-ENTRY(atomic64_\func\()_return_cx8)
29263+.macro addsub_return func ins insc unchecked=""
29264+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29265 CFI_STARTPROC
29266 SAVE ebp
29267 SAVE ebx
29268@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29269 movl %edx, %ecx
29270 \ins\()l %esi, %ebx
29271 \insc\()l %edi, %ecx
29272+
29273+.ifb \unchecked
29274+#ifdef CONFIG_PAX_REFCOUNT
29275+ into
29276+2:
29277+ _ASM_EXTABLE(2b, 3f)
29278+#endif
29279+.endif
29280+
29281 LOCK_PREFIX
29282 cmpxchg8b (%ebp)
29283 jne 1b
29284-
29285-10:
29286 movl %ebx, %eax
29287 movl %ecx, %edx
29288+
29289+.ifb \unchecked
29290+#ifdef CONFIG_PAX_REFCOUNT
29291+3:
29292+#endif
29293+.endif
29294+
29295 RESTORE edi
29296 RESTORE esi
29297 RESTORE ebx
29298 RESTORE ebp
29299+ pax_force_retaddr
29300 ret
29301 CFI_ENDPROC
29302-ENDPROC(atomic64_\func\()_return_cx8)
29303+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29304 .endm
29305
29306 addsub_return add add adc
29307 addsub_return sub sub sbb
29308+addsub_return add add adc _unchecked
29309+addsub_return sub sub sbb _unchecked
29310
29311-.macro incdec_return func ins insc
29312-ENTRY(atomic64_\func\()_return_cx8)
29313+.macro incdec_return func ins insc unchecked=""
29314+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29315 CFI_STARTPROC
29316 SAVE ebx
29317
29318@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29319 movl %edx, %ecx
29320 \ins\()l $1, %ebx
29321 \insc\()l $0, %ecx
29322+
29323+.ifb \unchecked
29324+#ifdef CONFIG_PAX_REFCOUNT
29325+ into
29326+2:
29327+ _ASM_EXTABLE(2b, 3f)
29328+#endif
29329+.endif
29330+
29331 LOCK_PREFIX
29332 cmpxchg8b (%esi)
29333 jne 1b
29334
29335-10:
29336 movl %ebx, %eax
29337 movl %ecx, %edx
29338+
29339+.ifb \unchecked
29340+#ifdef CONFIG_PAX_REFCOUNT
29341+3:
29342+#endif
29343+.endif
29344+
29345 RESTORE ebx
29346+ pax_force_retaddr
29347 ret
29348 CFI_ENDPROC
29349-ENDPROC(atomic64_\func\()_return_cx8)
29350+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29351 .endm
29352
29353 incdec_return inc add adc
29354 incdec_return dec sub sbb
29355+incdec_return inc add adc _unchecked
29356+incdec_return dec sub sbb _unchecked
29357
29358 ENTRY(atomic64_dec_if_positive_cx8)
29359 CFI_STARTPROC
29360@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29361 movl %edx, %ecx
29362 subl $1, %ebx
29363 sbb $0, %ecx
29364+
29365+#ifdef CONFIG_PAX_REFCOUNT
29366+ into
29367+1234:
29368+ _ASM_EXTABLE(1234b, 2f)
29369+#endif
29370+
29371 js 2f
29372 LOCK_PREFIX
29373 cmpxchg8b (%esi)
29374@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29375 movl %ebx, %eax
29376 movl %ecx, %edx
29377 RESTORE ebx
29378+ pax_force_retaddr
29379 ret
29380 CFI_ENDPROC
29381 ENDPROC(atomic64_dec_if_positive_cx8)
29382@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29383 movl %edx, %ecx
29384 addl %ebp, %ebx
29385 adcl %edi, %ecx
29386+
29387+#ifdef CONFIG_PAX_REFCOUNT
29388+ into
29389+1234:
29390+ _ASM_EXTABLE(1234b, 3f)
29391+#endif
29392+
29393 LOCK_PREFIX
29394 cmpxchg8b (%esi)
29395 jne 1b
29396@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29397 CFI_ADJUST_CFA_OFFSET -8
29398 RESTORE ebx
29399 RESTORE ebp
29400+ pax_force_retaddr
29401 ret
29402 4:
29403 cmpl %edx, 4(%esp)
29404@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29405 xorl %ecx, %ecx
29406 addl $1, %ebx
29407 adcl %edx, %ecx
29408+
29409+#ifdef CONFIG_PAX_REFCOUNT
29410+ into
29411+1234:
29412+ _ASM_EXTABLE(1234b, 3f)
29413+#endif
29414+
29415 LOCK_PREFIX
29416 cmpxchg8b (%esi)
29417 jne 1b
29418@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29419 movl $1, %eax
29420 3:
29421 RESTORE ebx
29422+ pax_force_retaddr
29423 ret
29424 CFI_ENDPROC
29425 ENDPROC(atomic64_inc_not_zero_cx8)
29426diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29427index e78b8eee..7e173a8 100644
29428--- a/arch/x86/lib/checksum_32.S
29429+++ b/arch/x86/lib/checksum_32.S
29430@@ -29,7 +29,8 @@
29431 #include <asm/dwarf2.h>
29432 #include <asm/errno.h>
29433 #include <asm/asm.h>
29434-
29435+#include <asm/segment.h>
29436+
29437 /*
29438 * computes a partial checksum, e.g. for TCP/UDP fragments
29439 */
29440@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29441
29442 #define ARGBASE 16
29443 #define FP 12
29444-
29445-ENTRY(csum_partial_copy_generic)
29446+
29447+ENTRY(csum_partial_copy_generic_to_user)
29448 CFI_STARTPROC
29449+
29450+#ifdef CONFIG_PAX_MEMORY_UDEREF
29451+ pushl_cfi %gs
29452+ popl_cfi %es
29453+ jmp csum_partial_copy_generic
29454+#endif
29455+
29456+ENTRY(csum_partial_copy_generic_from_user)
29457+
29458+#ifdef CONFIG_PAX_MEMORY_UDEREF
29459+ pushl_cfi %gs
29460+ popl_cfi %ds
29461+#endif
29462+
29463+ENTRY(csum_partial_copy_generic)
29464 subl $4,%esp
29465 CFI_ADJUST_CFA_OFFSET 4
29466 pushl_cfi %edi
29467@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29468 jmp 4f
29469 SRC(1: movw (%esi), %bx )
29470 addl $2, %esi
29471-DST( movw %bx, (%edi) )
29472+DST( movw %bx, %es:(%edi) )
29473 addl $2, %edi
29474 addw %bx, %ax
29475 adcl $0, %eax
29476@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29477 SRC(1: movl (%esi), %ebx )
29478 SRC( movl 4(%esi), %edx )
29479 adcl %ebx, %eax
29480-DST( movl %ebx, (%edi) )
29481+DST( movl %ebx, %es:(%edi) )
29482 adcl %edx, %eax
29483-DST( movl %edx, 4(%edi) )
29484+DST( movl %edx, %es:4(%edi) )
29485
29486 SRC( movl 8(%esi), %ebx )
29487 SRC( movl 12(%esi), %edx )
29488 adcl %ebx, %eax
29489-DST( movl %ebx, 8(%edi) )
29490+DST( movl %ebx, %es:8(%edi) )
29491 adcl %edx, %eax
29492-DST( movl %edx, 12(%edi) )
29493+DST( movl %edx, %es:12(%edi) )
29494
29495 SRC( movl 16(%esi), %ebx )
29496 SRC( movl 20(%esi), %edx )
29497 adcl %ebx, %eax
29498-DST( movl %ebx, 16(%edi) )
29499+DST( movl %ebx, %es:16(%edi) )
29500 adcl %edx, %eax
29501-DST( movl %edx, 20(%edi) )
29502+DST( movl %edx, %es:20(%edi) )
29503
29504 SRC( movl 24(%esi), %ebx )
29505 SRC( movl 28(%esi), %edx )
29506 adcl %ebx, %eax
29507-DST( movl %ebx, 24(%edi) )
29508+DST( movl %ebx, %es:24(%edi) )
29509 adcl %edx, %eax
29510-DST( movl %edx, 28(%edi) )
29511+DST( movl %edx, %es:28(%edi) )
29512
29513 lea 32(%esi), %esi
29514 lea 32(%edi), %edi
29515@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29516 shrl $2, %edx # This clears CF
29517 SRC(3: movl (%esi), %ebx )
29518 adcl %ebx, %eax
29519-DST( movl %ebx, (%edi) )
29520+DST( movl %ebx, %es:(%edi) )
29521 lea 4(%esi), %esi
29522 lea 4(%edi), %edi
29523 dec %edx
29524@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29525 jb 5f
29526 SRC( movw (%esi), %cx )
29527 leal 2(%esi), %esi
29528-DST( movw %cx, (%edi) )
29529+DST( movw %cx, %es:(%edi) )
29530 leal 2(%edi), %edi
29531 je 6f
29532 shll $16,%ecx
29533 SRC(5: movb (%esi), %cl )
29534-DST( movb %cl, (%edi) )
29535+DST( movb %cl, %es:(%edi) )
29536 6: addl %ecx, %eax
29537 adcl $0, %eax
29538 7:
29539@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29540
29541 6001:
29542 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29543- movl $-EFAULT, (%ebx)
29544+ movl $-EFAULT, %ss:(%ebx)
29545
29546 # zero the complete destination - computing the rest
29547 # is too much work
29548@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29549
29550 6002:
29551 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29552- movl $-EFAULT,(%ebx)
29553+ movl $-EFAULT,%ss:(%ebx)
29554 jmp 5000b
29555
29556 .previous
29557
29558+ pushl_cfi %ss
29559+ popl_cfi %ds
29560+ pushl_cfi %ss
29561+ popl_cfi %es
29562 popl_cfi %ebx
29563 CFI_RESTORE ebx
29564 popl_cfi %esi
29565@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29566 popl_cfi %ecx # equivalent to addl $4,%esp
29567 ret
29568 CFI_ENDPROC
29569-ENDPROC(csum_partial_copy_generic)
29570+ENDPROC(csum_partial_copy_generic_to_user)
29571
29572 #else
29573
29574 /* Version for PentiumII/PPro */
29575
29576 #define ROUND1(x) \
29577+ nop; nop; nop; \
29578 SRC(movl x(%esi), %ebx ) ; \
29579 addl %ebx, %eax ; \
29580- DST(movl %ebx, x(%edi) ) ;
29581+ DST(movl %ebx, %es:x(%edi)) ;
29582
29583 #define ROUND(x) \
29584+ nop; nop; nop; \
29585 SRC(movl x(%esi), %ebx ) ; \
29586 adcl %ebx, %eax ; \
29587- DST(movl %ebx, x(%edi) ) ;
29588+ DST(movl %ebx, %es:x(%edi)) ;
29589
29590 #define ARGBASE 12
29591-
29592-ENTRY(csum_partial_copy_generic)
29593+
29594+ENTRY(csum_partial_copy_generic_to_user)
29595 CFI_STARTPROC
29596+
29597+#ifdef CONFIG_PAX_MEMORY_UDEREF
29598+ pushl_cfi %gs
29599+ popl_cfi %es
29600+ jmp csum_partial_copy_generic
29601+#endif
29602+
29603+ENTRY(csum_partial_copy_generic_from_user)
29604+
29605+#ifdef CONFIG_PAX_MEMORY_UDEREF
29606+ pushl_cfi %gs
29607+ popl_cfi %ds
29608+#endif
29609+
29610+ENTRY(csum_partial_copy_generic)
29611 pushl_cfi %ebx
29612 CFI_REL_OFFSET ebx, 0
29613 pushl_cfi %edi
29614@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29615 subl %ebx, %edi
29616 lea -1(%esi),%edx
29617 andl $-32,%edx
29618- lea 3f(%ebx,%ebx), %ebx
29619+ lea 3f(%ebx,%ebx,2), %ebx
29620 testl %esi, %esi
29621 jmp *%ebx
29622 1: addl $64,%esi
29623@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29624 jb 5f
29625 SRC( movw (%esi), %dx )
29626 leal 2(%esi), %esi
29627-DST( movw %dx, (%edi) )
29628+DST( movw %dx, %es:(%edi) )
29629 leal 2(%edi), %edi
29630 je 6f
29631 shll $16,%edx
29632 5:
29633 SRC( movb (%esi), %dl )
29634-DST( movb %dl, (%edi) )
29635+DST( movb %dl, %es:(%edi) )
29636 6: addl %edx, %eax
29637 adcl $0, %eax
29638 7:
29639 .section .fixup, "ax"
29640 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29641- movl $-EFAULT, (%ebx)
29642+ movl $-EFAULT, %ss:(%ebx)
29643 # zero the complete destination (computing the rest is too much work)
29644 movl ARGBASE+8(%esp),%edi # dst
29645 movl ARGBASE+12(%esp),%ecx # len
29646@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29647 rep; stosb
29648 jmp 7b
29649 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29650- movl $-EFAULT, (%ebx)
29651+ movl $-EFAULT, %ss:(%ebx)
29652 jmp 7b
29653 .previous
29654
29655+#ifdef CONFIG_PAX_MEMORY_UDEREF
29656+ pushl_cfi %ss
29657+ popl_cfi %ds
29658+ pushl_cfi %ss
29659+ popl_cfi %es
29660+#endif
29661+
29662 popl_cfi %esi
29663 CFI_RESTORE esi
29664 popl_cfi %edi
29665@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29666 CFI_RESTORE ebx
29667 ret
29668 CFI_ENDPROC
29669-ENDPROC(csum_partial_copy_generic)
29670+ENDPROC(csum_partial_copy_generic_to_user)
29671
29672 #undef ROUND
29673 #undef ROUND1
29674diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29675index f2145cf..cea889d 100644
29676--- a/arch/x86/lib/clear_page_64.S
29677+++ b/arch/x86/lib/clear_page_64.S
29678@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29679 movl $4096/8,%ecx
29680 xorl %eax,%eax
29681 rep stosq
29682+ pax_force_retaddr
29683 ret
29684 CFI_ENDPROC
29685 ENDPROC(clear_page_c)
29686@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29687 movl $4096,%ecx
29688 xorl %eax,%eax
29689 rep stosb
29690+ pax_force_retaddr
29691 ret
29692 CFI_ENDPROC
29693 ENDPROC(clear_page_c_e)
29694@@ -43,6 +45,7 @@ ENTRY(clear_page)
29695 leaq 64(%rdi),%rdi
29696 jnz .Lloop
29697 nop
29698+ pax_force_retaddr
29699 ret
29700 CFI_ENDPROC
29701 .Lclear_page_end:
29702@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29703
29704 #include <asm/cpufeature.h>
29705
29706- .section .altinstr_replacement,"ax"
29707+ .section .altinstr_replacement,"a"
29708 1: .byte 0xeb /* jmp <disp8> */
29709 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29710 2: .byte 0xeb /* jmp <disp8> */
29711diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29712index 40a1725..5d12ac4 100644
29713--- a/arch/x86/lib/cmpxchg16b_emu.S
29714+++ b/arch/x86/lib/cmpxchg16b_emu.S
29715@@ -8,6 +8,7 @@
29716 #include <linux/linkage.h>
29717 #include <asm/dwarf2.h>
29718 #include <asm/percpu.h>
29719+#include <asm/alternative-asm.h>
29720
29721 .text
29722
29723@@ -46,12 +47,14 @@ CFI_STARTPROC
29724 CFI_REMEMBER_STATE
29725 popfq_cfi
29726 mov $1, %al
29727+ pax_force_retaddr
29728 ret
29729
29730 CFI_RESTORE_STATE
29731 .Lnot_same:
29732 popfq_cfi
29733 xor %al,%al
29734+ pax_force_retaddr
29735 ret
29736
29737 CFI_ENDPROC
29738diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29739index 176cca6..e0d658e 100644
29740--- a/arch/x86/lib/copy_page_64.S
29741+++ b/arch/x86/lib/copy_page_64.S
29742@@ -9,6 +9,7 @@ copy_page_rep:
29743 CFI_STARTPROC
29744 movl $4096/8, %ecx
29745 rep movsq
29746+ pax_force_retaddr
29747 ret
29748 CFI_ENDPROC
29749 ENDPROC(copy_page_rep)
29750@@ -24,8 +25,8 @@ ENTRY(copy_page)
29751 CFI_ADJUST_CFA_OFFSET 2*8
29752 movq %rbx, (%rsp)
29753 CFI_REL_OFFSET rbx, 0
29754- movq %r12, 1*8(%rsp)
29755- CFI_REL_OFFSET r12, 1*8
29756+ movq %r13, 1*8(%rsp)
29757+ CFI_REL_OFFSET r13, 1*8
29758
29759 movl $(4096/64)-5, %ecx
29760 .p2align 4
29761@@ -38,7 +39,7 @@ ENTRY(copy_page)
29762 movq 0x8*4(%rsi), %r9
29763 movq 0x8*5(%rsi), %r10
29764 movq 0x8*6(%rsi), %r11
29765- movq 0x8*7(%rsi), %r12
29766+ movq 0x8*7(%rsi), %r13
29767
29768 prefetcht0 5*64(%rsi)
29769
29770@@ -49,7 +50,7 @@ ENTRY(copy_page)
29771 movq %r9, 0x8*4(%rdi)
29772 movq %r10, 0x8*5(%rdi)
29773 movq %r11, 0x8*6(%rdi)
29774- movq %r12, 0x8*7(%rdi)
29775+ movq %r13, 0x8*7(%rdi)
29776
29777 leaq 64 (%rsi), %rsi
29778 leaq 64 (%rdi), %rdi
29779@@ -68,7 +69,7 @@ ENTRY(copy_page)
29780 movq 0x8*4(%rsi), %r9
29781 movq 0x8*5(%rsi), %r10
29782 movq 0x8*6(%rsi), %r11
29783- movq 0x8*7(%rsi), %r12
29784+ movq 0x8*7(%rsi), %r13
29785
29786 movq %rax, 0x8*0(%rdi)
29787 movq %rbx, 0x8*1(%rdi)
29788@@ -77,7 +78,7 @@ ENTRY(copy_page)
29789 movq %r9, 0x8*4(%rdi)
29790 movq %r10, 0x8*5(%rdi)
29791 movq %r11, 0x8*6(%rdi)
29792- movq %r12, 0x8*7(%rdi)
29793+ movq %r13, 0x8*7(%rdi)
29794
29795 leaq 64(%rdi), %rdi
29796 leaq 64(%rsi), %rsi
29797@@ -85,10 +86,11 @@ ENTRY(copy_page)
29798
29799 movq (%rsp), %rbx
29800 CFI_RESTORE rbx
29801- movq 1*8(%rsp), %r12
29802- CFI_RESTORE r12
29803+ movq 1*8(%rsp), %r13
29804+ CFI_RESTORE r13
29805 addq $2*8, %rsp
29806 CFI_ADJUST_CFA_OFFSET -2*8
29807+ pax_force_retaddr
29808 ret
29809 .Lcopy_page_end:
29810 CFI_ENDPROC
29811@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29812
29813 #include <asm/cpufeature.h>
29814
29815- .section .altinstr_replacement,"ax"
29816+ .section .altinstr_replacement,"a"
29817 1: .byte 0xeb /* jmp <disp8> */
29818 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29819 2:
29820diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29821index dee945d..a84067b 100644
29822--- a/arch/x86/lib/copy_user_64.S
29823+++ b/arch/x86/lib/copy_user_64.S
29824@@ -18,31 +18,7 @@
29825 #include <asm/alternative-asm.h>
29826 #include <asm/asm.h>
29827 #include <asm/smap.h>
29828-
29829-/*
29830- * By placing feature2 after feature1 in altinstructions section, we logically
29831- * implement:
29832- * If CPU has feature2, jmp to alt2 is used
29833- * else if CPU has feature1, jmp to alt1 is used
29834- * else jmp to orig is used.
29835- */
29836- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29837-0:
29838- .byte 0xe9 /* 32bit jump */
29839- .long \orig-1f /* by default jump to orig */
29840-1:
29841- .section .altinstr_replacement,"ax"
29842-2: .byte 0xe9 /* near jump with 32bit immediate */
29843- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29844-3: .byte 0xe9 /* near jump with 32bit immediate */
29845- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29846- .previous
29847-
29848- .section .altinstructions,"a"
29849- altinstruction_entry 0b,2b,\feature1,5,5
29850- altinstruction_entry 0b,3b,\feature2,5,5
29851- .previous
29852- .endm
29853+#include <asm/pgtable.h>
29854
29855 .macro ALIGN_DESTINATION
29856 #ifdef FIX_ALIGNMENT
29857@@ -70,52 +46,6 @@
29858 #endif
29859 .endm
29860
29861-/* Standard copy_to_user with segment limit checking */
29862-ENTRY(_copy_to_user)
29863- CFI_STARTPROC
29864- GET_THREAD_INFO(%rax)
29865- movq %rdi,%rcx
29866- addq %rdx,%rcx
29867- jc bad_to_user
29868- cmpq TI_addr_limit(%rax),%rcx
29869- ja bad_to_user
29870- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29871- copy_user_generic_unrolled,copy_user_generic_string, \
29872- copy_user_enhanced_fast_string
29873- CFI_ENDPROC
29874-ENDPROC(_copy_to_user)
29875-
29876-/* Standard copy_from_user with segment limit checking */
29877-ENTRY(_copy_from_user)
29878- CFI_STARTPROC
29879- GET_THREAD_INFO(%rax)
29880- movq %rsi,%rcx
29881- addq %rdx,%rcx
29882- jc bad_from_user
29883- cmpq TI_addr_limit(%rax),%rcx
29884- ja bad_from_user
29885- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29886- copy_user_generic_unrolled,copy_user_generic_string, \
29887- copy_user_enhanced_fast_string
29888- CFI_ENDPROC
29889-ENDPROC(_copy_from_user)
29890-
29891- .section .fixup,"ax"
29892- /* must zero dest */
29893-ENTRY(bad_from_user)
29894-bad_from_user:
29895- CFI_STARTPROC
29896- movl %edx,%ecx
29897- xorl %eax,%eax
29898- rep
29899- stosb
29900-bad_to_user:
29901- movl %edx,%eax
29902- ret
29903- CFI_ENDPROC
29904-ENDPROC(bad_from_user)
29905- .previous
29906-
29907 /*
29908 * copy_user_generic_unrolled - memory copy with exception handling.
29909 * This version is for CPUs like P4 that don't have efficient micro
29910@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29911 */
29912 ENTRY(copy_user_generic_unrolled)
29913 CFI_STARTPROC
29914+ ASM_PAX_OPEN_USERLAND
29915 ASM_STAC
29916 cmpl $8,%edx
29917 jb 20f /* less then 8 bytes, go to byte copy loop */
29918@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29919 jnz 21b
29920 23: xor %eax,%eax
29921 ASM_CLAC
29922+ ASM_PAX_CLOSE_USERLAND
29923+ pax_force_retaddr
29924 ret
29925
29926 .section .fixup,"ax"
29927@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29928 */
29929 ENTRY(copy_user_generic_string)
29930 CFI_STARTPROC
29931+ ASM_PAX_OPEN_USERLAND
29932 ASM_STAC
29933 cmpl $8,%edx
29934 jb 2f /* less than 8 bytes, go to byte copy loop */
29935@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29936 movsb
29937 xorl %eax,%eax
29938 ASM_CLAC
29939+ ASM_PAX_CLOSE_USERLAND
29940+ pax_force_retaddr
29941 ret
29942
29943 .section .fixup,"ax"
29944@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29945 */
29946 ENTRY(copy_user_enhanced_fast_string)
29947 CFI_STARTPROC
29948+ ASM_PAX_OPEN_USERLAND
29949 ASM_STAC
29950 movl %edx,%ecx
29951 1: rep
29952 movsb
29953 xorl %eax,%eax
29954 ASM_CLAC
29955+ ASM_PAX_CLOSE_USERLAND
29956+ pax_force_retaddr
29957 ret
29958
29959 .section .fixup,"ax"
29960diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
29961index 6a4f43c..c70fb52 100644
29962--- a/arch/x86/lib/copy_user_nocache_64.S
29963+++ b/arch/x86/lib/copy_user_nocache_64.S
29964@@ -8,6 +8,7 @@
29965
29966 #include <linux/linkage.h>
29967 #include <asm/dwarf2.h>
29968+#include <asm/alternative-asm.h>
29969
29970 #define FIX_ALIGNMENT 1
29971
29972@@ -16,6 +17,7 @@
29973 #include <asm/thread_info.h>
29974 #include <asm/asm.h>
29975 #include <asm/smap.h>
29976+#include <asm/pgtable.h>
29977
29978 .macro ALIGN_DESTINATION
29979 #ifdef FIX_ALIGNMENT
29980@@ -49,6 +51,16 @@
29981 */
29982 ENTRY(__copy_user_nocache)
29983 CFI_STARTPROC
29984+
29985+#ifdef CONFIG_PAX_MEMORY_UDEREF
29986+ mov pax_user_shadow_base,%rcx
29987+ cmp %rcx,%rsi
29988+ jae 1f
29989+ add %rcx,%rsi
29990+1:
29991+#endif
29992+
29993+ ASM_PAX_OPEN_USERLAND
29994 ASM_STAC
29995 cmpl $8,%edx
29996 jb 20f /* less then 8 bytes, go to byte copy loop */
29997@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
29998 jnz 21b
29999 23: xorl %eax,%eax
30000 ASM_CLAC
30001+ ASM_PAX_CLOSE_USERLAND
30002 sfence
30003+ pax_force_retaddr
30004 ret
30005
30006 .section .fixup,"ax"
30007diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30008index 2419d5f..fe52d0e 100644
30009--- a/arch/x86/lib/csum-copy_64.S
30010+++ b/arch/x86/lib/csum-copy_64.S
30011@@ -9,6 +9,7 @@
30012 #include <asm/dwarf2.h>
30013 #include <asm/errno.h>
30014 #include <asm/asm.h>
30015+#include <asm/alternative-asm.h>
30016
30017 /*
30018 * Checksum copy with exception handling.
30019@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30020 CFI_ADJUST_CFA_OFFSET 7*8
30021 movq %rbx, 2*8(%rsp)
30022 CFI_REL_OFFSET rbx, 2*8
30023- movq %r12, 3*8(%rsp)
30024- CFI_REL_OFFSET r12, 3*8
30025+ movq %r15, 3*8(%rsp)
30026+ CFI_REL_OFFSET r15, 3*8
30027 movq %r14, 4*8(%rsp)
30028 CFI_REL_OFFSET r14, 4*8
30029 movq %r13, 5*8(%rsp)
30030@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30031 movl %edx, %ecx
30032
30033 xorl %r9d, %r9d
30034- movq %rcx, %r12
30035+ movq %rcx, %r15
30036
30037- shrq $6, %r12
30038+ shrq $6, %r15
30039 jz .Lhandle_tail /* < 64 */
30040
30041 clc
30042
30043 /* main loop. clear in 64 byte blocks */
30044 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30045- /* r11: temp3, rdx: temp4, r12 loopcnt */
30046+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30047 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30048 .p2align 4
30049 .Lloop:
30050@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30051 adcq %r14, %rax
30052 adcq %r13, %rax
30053
30054- decl %r12d
30055+ decl %r15d
30056
30057 dest
30058 movq %rbx, (%rsi)
30059@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30060 .Lende:
30061 movq 2*8(%rsp), %rbx
30062 CFI_RESTORE rbx
30063- movq 3*8(%rsp), %r12
30064- CFI_RESTORE r12
30065+ movq 3*8(%rsp), %r15
30066+ CFI_RESTORE r15
30067 movq 4*8(%rsp), %r14
30068 CFI_RESTORE r14
30069 movq 5*8(%rsp), %r13
30070@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30071 CFI_RESTORE rbp
30072 addq $7*8, %rsp
30073 CFI_ADJUST_CFA_OFFSET -7*8
30074+ pax_force_retaddr
30075 ret
30076 CFI_RESTORE_STATE
30077
30078diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30079index 1318f75..44c30fd 100644
30080--- a/arch/x86/lib/csum-wrappers_64.c
30081+++ b/arch/x86/lib/csum-wrappers_64.c
30082@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30083 len -= 2;
30084 }
30085 }
30086+ pax_open_userland();
30087 stac();
30088- isum = csum_partial_copy_generic((__force const void *)src,
30089+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30090 dst, len, isum, errp, NULL);
30091 clac();
30092+ pax_close_userland();
30093 if (unlikely(*errp))
30094 goto out_err;
30095
30096@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30097 }
30098
30099 *errp = 0;
30100+ pax_open_userland();
30101 stac();
30102- ret = csum_partial_copy_generic(src, (void __force *)dst,
30103+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30104 len, isum, NULL, errp);
30105 clac();
30106+ pax_close_userland();
30107 return ret;
30108 }
30109 EXPORT_SYMBOL(csum_partial_copy_to_user);
30110diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30111index a451235..1daa956 100644
30112--- a/arch/x86/lib/getuser.S
30113+++ b/arch/x86/lib/getuser.S
30114@@ -33,17 +33,40 @@
30115 #include <asm/thread_info.h>
30116 #include <asm/asm.h>
30117 #include <asm/smap.h>
30118+#include <asm/segment.h>
30119+#include <asm/pgtable.h>
30120+#include <asm/alternative-asm.h>
30121+
30122+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30123+#define __copyuser_seg gs;
30124+#else
30125+#define __copyuser_seg
30126+#endif
30127
30128 .text
30129 ENTRY(__get_user_1)
30130 CFI_STARTPROC
30131+
30132+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30133 GET_THREAD_INFO(%_ASM_DX)
30134 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30135 jae bad_get_user
30136 ASM_STAC
30137-1: movzbl (%_ASM_AX),%edx
30138+
30139+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30140+ mov pax_user_shadow_base,%_ASM_DX
30141+ cmp %_ASM_DX,%_ASM_AX
30142+ jae 1234f
30143+ add %_ASM_DX,%_ASM_AX
30144+1234:
30145+#endif
30146+
30147+#endif
30148+
30149+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30150 xor %eax,%eax
30151 ASM_CLAC
30152+ pax_force_retaddr
30153 ret
30154 CFI_ENDPROC
30155 ENDPROC(__get_user_1)
30156@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30157 ENTRY(__get_user_2)
30158 CFI_STARTPROC
30159 add $1,%_ASM_AX
30160+
30161+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30162 jc bad_get_user
30163 GET_THREAD_INFO(%_ASM_DX)
30164 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30165 jae bad_get_user
30166 ASM_STAC
30167-2: movzwl -1(%_ASM_AX),%edx
30168+
30169+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30170+ mov pax_user_shadow_base,%_ASM_DX
30171+ cmp %_ASM_DX,%_ASM_AX
30172+ jae 1234f
30173+ add %_ASM_DX,%_ASM_AX
30174+1234:
30175+#endif
30176+
30177+#endif
30178+
30179+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30180 xor %eax,%eax
30181 ASM_CLAC
30182+ pax_force_retaddr
30183 ret
30184 CFI_ENDPROC
30185 ENDPROC(__get_user_2)
30186@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30187 ENTRY(__get_user_4)
30188 CFI_STARTPROC
30189 add $3,%_ASM_AX
30190+
30191+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30192 jc bad_get_user
30193 GET_THREAD_INFO(%_ASM_DX)
30194 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30195 jae bad_get_user
30196 ASM_STAC
30197-3: movl -3(%_ASM_AX),%edx
30198+
30199+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30200+ mov pax_user_shadow_base,%_ASM_DX
30201+ cmp %_ASM_DX,%_ASM_AX
30202+ jae 1234f
30203+ add %_ASM_DX,%_ASM_AX
30204+1234:
30205+#endif
30206+
30207+#endif
30208+
30209+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30210 xor %eax,%eax
30211 ASM_CLAC
30212+ pax_force_retaddr
30213 ret
30214 CFI_ENDPROC
30215 ENDPROC(__get_user_4)
30216@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30217 GET_THREAD_INFO(%_ASM_DX)
30218 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30219 jae bad_get_user
30220+
30221+#ifdef CONFIG_PAX_MEMORY_UDEREF
30222+ mov pax_user_shadow_base,%_ASM_DX
30223+ cmp %_ASM_DX,%_ASM_AX
30224+ jae 1234f
30225+ add %_ASM_DX,%_ASM_AX
30226+1234:
30227+#endif
30228+
30229 ASM_STAC
30230 4: movq -7(%_ASM_AX),%rdx
30231 xor %eax,%eax
30232 ASM_CLAC
30233+ pax_force_retaddr
30234 ret
30235 #else
30236 add $7,%_ASM_AX
30237@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30238 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30239 jae bad_get_user_8
30240 ASM_STAC
30241-4: movl -7(%_ASM_AX),%edx
30242-5: movl -3(%_ASM_AX),%ecx
30243+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30244+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30245 xor %eax,%eax
30246 ASM_CLAC
30247+ pax_force_retaddr
30248 ret
30249 #endif
30250 CFI_ENDPROC
30251@@ -113,6 +175,7 @@ bad_get_user:
30252 xor %edx,%edx
30253 mov $(-EFAULT),%_ASM_AX
30254 ASM_CLAC
30255+ pax_force_retaddr
30256 ret
30257 CFI_ENDPROC
30258 END(bad_get_user)
30259@@ -124,6 +187,7 @@ bad_get_user_8:
30260 xor %ecx,%ecx
30261 mov $(-EFAULT),%_ASM_AX
30262 ASM_CLAC
30263+ pax_force_retaddr
30264 ret
30265 CFI_ENDPROC
30266 END(bad_get_user_8)
30267diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30268index 1313ae6..84f25ea 100644
30269--- a/arch/x86/lib/insn.c
30270+++ b/arch/x86/lib/insn.c
30271@@ -20,8 +20,10 @@
30272
30273 #ifdef __KERNEL__
30274 #include <linux/string.h>
30275+#include <asm/pgtable_types.h>
30276 #else
30277 #include <string.h>
30278+#define ktla_ktva(addr) addr
30279 #endif
30280 #include <asm/inat.h>
30281 #include <asm/insn.h>
30282@@ -53,9 +55,9 @@
30283 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30284 {
30285 memset(insn, 0, sizeof(*insn));
30286- insn->kaddr = kaddr;
30287- insn->end_kaddr = kaddr + buf_len;
30288- insn->next_byte = kaddr;
30289+ insn->kaddr = ktla_ktva(kaddr);
30290+ insn->end_kaddr = insn->kaddr + buf_len;
30291+ insn->next_byte = insn->kaddr;
30292 insn->x86_64 = x86_64 ? 1 : 0;
30293 insn->opnd_bytes = 4;
30294 if (x86_64)
30295diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30296index 05a95e7..326f2fa 100644
30297--- a/arch/x86/lib/iomap_copy_64.S
30298+++ b/arch/x86/lib/iomap_copy_64.S
30299@@ -17,6 +17,7 @@
30300
30301 #include <linux/linkage.h>
30302 #include <asm/dwarf2.h>
30303+#include <asm/alternative-asm.h>
30304
30305 /*
30306 * override generic version in lib/iomap_copy.c
30307@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30308 CFI_STARTPROC
30309 movl %edx,%ecx
30310 rep movsd
30311+ pax_force_retaddr
30312 ret
30313 CFI_ENDPROC
30314 ENDPROC(__iowrite32_copy)
30315diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30316index 56313a3..0db417e 100644
30317--- a/arch/x86/lib/memcpy_64.S
30318+++ b/arch/x86/lib/memcpy_64.S
30319@@ -24,7 +24,7 @@
30320 * This gets patched over the unrolled variant (below) via the
30321 * alternative instructions framework:
30322 */
30323- .section .altinstr_replacement, "ax", @progbits
30324+ .section .altinstr_replacement, "a", @progbits
30325 .Lmemcpy_c:
30326 movq %rdi, %rax
30327 movq %rdx, %rcx
30328@@ -33,6 +33,7 @@
30329 rep movsq
30330 movl %edx, %ecx
30331 rep movsb
30332+ pax_force_retaddr
30333 ret
30334 .Lmemcpy_e:
30335 .previous
30336@@ -44,11 +45,12 @@
30337 * This gets patched over the unrolled variant (below) via the
30338 * alternative instructions framework:
30339 */
30340- .section .altinstr_replacement, "ax", @progbits
30341+ .section .altinstr_replacement, "a", @progbits
30342 .Lmemcpy_c_e:
30343 movq %rdi, %rax
30344 movq %rdx, %rcx
30345 rep movsb
30346+ pax_force_retaddr
30347 ret
30348 .Lmemcpy_e_e:
30349 .previous
30350@@ -136,6 +138,7 @@ ENTRY(memcpy)
30351 movq %r9, 1*8(%rdi)
30352 movq %r10, -2*8(%rdi, %rdx)
30353 movq %r11, -1*8(%rdi, %rdx)
30354+ pax_force_retaddr
30355 retq
30356 .p2align 4
30357 .Lless_16bytes:
30358@@ -148,6 +151,7 @@ ENTRY(memcpy)
30359 movq -1*8(%rsi, %rdx), %r9
30360 movq %r8, 0*8(%rdi)
30361 movq %r9, -1*8(%rdi, %rdx)
30362+ pax_force_retaddr
30363 retq
30364 .p2align 4
30365 .Lless_8bytes:
30366@@ -161,6 +165,7 @@ ENTRY(memcpy)
30367 movl -4(%rsi, %rdx), %r8d
30368 movl %ecx, (%rdi)
30369 movl %r8d, -4(%rdi, %rdx)
30370+ pax_force_retaddr
30371 retq
30372 .p2align 4
30373 .Lless_3bytes:
30374@@ -179,6 +184,7 @@ ENTRY(memcpy)
30375 movb %cl, (%rdi)
30376
30377 .Lend:
30378+ pax_force_retaddr
30379 retq
30380 CFI_ENDPROC
30381 ENDPROC(memcpy)
30382diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30383index 65268a6..dd1de11 100644
30384--- a/arch/x86/lib/memmove_64.S
30385+++ b/arch/x86/lib/memmove_64.S
30386@@ -202,14 +202,16 @@ ENTRY(memmove)
30387 movb (%rsi), %r11b
30388 movb %r11b, (%rdi)
30389 13:
30390+ pax_force_retaddr
30391 retq
30392 CFI_ENDPROC
30393
30394- .section .altinstr_replacement,"ax"
30395+ .section .altinstr_replacement,"a"
30396 .Lmemmove_begin_forward_efs:
30397 /* Forward moving data. */
30398 movq %rdx, %rcx
30399 rep movsb
30400+ pax_force_retaddr
30401 retq
30402 .Lmemmove_end_forward_efs:
30403 .previous
30404diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30405index 2dcb380..2eb79fe 100644
30406--- a/arch/x86/lib/memset_64.S
30407+++ b/arch/x86/lib/memset_64.S
30408@@ -16,7 +16,7 @@
30409 *
30410 * rax original destination
30411 */
30412- .section .altinstr_replacement, "ax", @progbits
30413+ .section .altinstr_replacement, "a", @progbits
30414 .Lmemset_c:
30415 movq %rdi,%r9
30416 movq %rdx,%rcx
30417@@ -30,6 +30,7 @@
30418 movl %edx,%ecx
30419 rep stosb
30420 movq %r9,%rax
30421+ pax_force_retaddr
30422 ret
30423 .Lmemset_e:
30424 .previous
30425@@ -45,13 +46,14 @@
30426 *
30427 * rax original destination
30428 */
30429- .section .altinstr_replacement, "ax", @progbits
30430+ .section .altinstr_replacement, "a", @progbits
30431 .Lmemset_c_e:
30432 movq %rdi,%r9
30433 movb %sil,%al
30434 movq %rdx,%rcx
30435 rep stosb
30436 movq %r9,%rax
30437+ pax_force_retaddr
30438 ret
30439 .Lmemset_e_e:
30440 .previous
30441@@ -118,6 +120,7 @@ ENTRY(__memset)
30442
30443 .Lende:
30444 movq %r10,%rax
30445+ pax_force_retaddr
30446 ret
30447
30448 CFI_RESTORE_STATE
30449diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30450index c9f2d9b..e7fd2c0 100644
30451--- a/arch/x86/lib/mmx_32.c
30452+++ b/arch/x86/lib/mmx_32.c
30453@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30454 {
30455 void *p;
30456 int i;
30457+ unsigned long cr0;
30458
30459 if (unlikely(in_interrupt()))
30460 return __memcpy(to, from, len);
30461@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30462 kernel_fpu_begin();
30463
30464 __asm__ __volatile__ (
30465- "1: prefetch (%0)\n" /* This set is 28 bytes */
30466- " prefetch 64(%0)\n"
30467- " prefetch 128(%0)\n"
30468- " prefetch 192(%0)\n"
30469- " prefetch 256(%0)\n"
30470+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30471+ " prefetch 64(%1)\n"
30472+ " prefetch 128(%1)\n"
30473+ " prefetch 192(%1)\n"
30474+ " prefetch 256(%1)\n"
30475 "2: \n"
30476 ".section .fixup, \"ax\"\n"
30477- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30478+ "3: \n"
30479+
30480+#ifdef CONFIG_PAX_KERNEXEC
30481+ " movl %%cr0, %0\n"
30482+ " movl %0, %%eax\n"
30483+ " andl $0xFFFEFFFF, %%eax\n"
30484+ " movl %%eax, %%cr0\n"
30485+#endif
30486+
30487+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30488+
30489+#ifdef CONFIG_PAX_KERNEXEC
30490+ " movl %0, %%cr0\n"
30491+#endif
30492+
30493 " jmp 2b\n"
30494 ".previous\n"
30495 _ASM_EXTABLE(1b, 3b)
30496- : : "r" (from));
30497+ : "=&r" (cr0) : "r" (from) : "ax");
30498
30499 for ( ; i > 5; i--) {
30500 __asm__ __volatile__ (
30501- "1: prefetch 320(%0)\n"
30502- "2: movq (%0), %%mm0\n"
30503- " movq 8(%0), %%mm1\n"
30504- " movq 16(%0), %%mm2\n"
30505- " movq 24(%0), %%mm3\n"
30506- " movq %%mm0, (%1)\n"
30507- " movq %%mm1, 8(%1)\n"
30508- " movq %%mm2, 16(%1)\n"
30509- " movq %%mm3, 24(%1)\n"
30510- " movq 32(%0), %%mm0\n"
30511- " movq 40(%0), %%mm1\n"
30512- " movq 48(%0), %%mm2\n"
30513- " movq 56(%0), %%mm3\n"
30514- " movq %%mm0, 32(%1)\n"
30515- " movq %%mm1, 40(%1)\n"
30516- " movq %%mm2, 48(%1)\n"
30517- " movq %%mm3, 56(%1)\n"
30518+ "1: prefetch 320(%1)\n"
30519+ "2: movq (%1), %%mm0\n"
30520+ " movq 8(%1), %%mm1\n"
30521+ " movq 16(%1), %%mm2\n"
30522+ " movq 24(%1), %%mm3\n"
30523+ " movq %%mm0, (%2)\n"
30524+ " movq %%mm1, 8(%2)\n"
30525+ " movq %%mm2, 16(%2)\n"
30526+ " movq %%mm3, 24(%2)\n"
30527+ " movq 32(%1), %%mm0\n"
30528+ " movq 40(%1), %%mm1\n"
30529+ " movq 48(%1), %%mm2\n"
30530+ " movq 56(%1), %%mm3\n"
30531+ " movq %%mm0, 32(%2)\n"
30532+ " movq %%mm1, 40(%2)\n"
30533+ " movq %%mm2, 48(%2)\n"
30534+ " movq %%mm3, 56(%2)\n"
30535 ".section .fixup, \"ax\"\n"
30536- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30537+ "3:\n"
30538+
30539+#ifdef CONFIG_PAX_KERNEXEC
30540+ " movl %%cr0, %0\n"
30541+ " movl %0, %%eax\n"
30542+ " andl $0xFFFEFFFF, %%eax\n"
30543+ " movl %%eax, %%cr0\n"
30544+#endif
30545+
30546+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30547+
30548+#ifdef CONFIG_PAX_KERNEXEC
30549+ " movl %0, %%cr0\n"
30550+#endif
30551+
30552 " jmp 2b\n"
30553 ".previous\n"
30554 _ASM_EXTABLE(1b, 3b)
30555- : : "r" (from), "r" (to) : "memory");
30556+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30557
30558 from += 64;
30559 to += 64;
30560@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30561 static void fast_copy_page(void *to, void *from)
30562 {
30563 int i;
30564+ unsigned long cr0;
30565
30566 kernel_fpu_begin();
30567
30568@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30569 * but that is for later. -AV
30570 */
30571 __asm__ __volatile__(
30572- "1: prefetch (%0)\n"
30573- " prefetch 64(%0)\n"
30574- " prefetch 128(%0)\n"
30575- " prefetch 192(%0)\n"
30576- " prefetch 256(%0)\n"
30577+ "1: prefetch (%1)\n"
30578+ " prefetch 64(%1)\n"
30579+ " prefetch 128(%1)\n"
30580+ " prefetch 192(%1)\n"
30581+ " prefetch 256(%1)\n"
30582 "2: \n"
30583 ".section .fixup, \"ax\"\n"
30584- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30585+ "3: \n"
30586+
30587+#ifdef CONFIG_PAX_KERNEXEC
30588+ " movl %%cr0, %0\n"
30589+ " movl %0, %%eax\n"
30590+ " andl $0xFFFEFFFF, %%eax\n"
30591+ " movl %%eax, %%cr0\n"
30592+#endif
30593+
30594+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30595+
30596+#ifdef CONFIG_PAX_KERNEXEC
30597+ " movl %0, %%cr0\n"
30598+#endif
30599+
30600 " jmp 2b\n"
30601 ".previous\n"
30602- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30603+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30604
30605 for (i = 0; i < (4096-320)/64; i++) {
30606 __asm__ __volatile__ (
30607- "1: prefetch 320(%0)\n"
30608- "2: movq (%0), %%mm0\n"
30609- " movntq %%mm0, (%1)\n"
30610- " movq 8(%0), %%mm1\n"
30611- " movntq %%mm1, 8(%1)\n"
30612- " movq 16(%0), %%mm2\n"
30613- " movntq %%mm2, 16(%1)\n"
30614- " movq 24(%0), %%mm3\n"
30615- " movntq %%mm3, 24(%1)\n"
30616- " movq 32(%0), %%mm4\n"
30617- " movntq %%mm4, 32(%1)\n"
30618- " movq 40(%0), %%mm5\n"
30619- " movntq %%mm5, 40(%1)\n"
30620- " movq 48(%0), %%mm6\n"
30621- " movntq %%mm6, 48(%1)\n"
30622- " movq 56(%0), %%mm7\n"
30623- " movntq %%mm7, 56(%1)\n"
30624+ "1: prefetch 320(%1)\n"
30625+ "2: movq (%1), %%mm0\n"
30626+ " movntq %%mm0, (%2)\n"
30627+ " movq 8(%1), %%mm1\n"
30628+ " movntq %%mm1, 8(%2)\n"
30629+ " movq 16(%1), %%mm2\n"
30630+ " movntq %%mm2, 16(%2)\n"
30631+ " movq 24(%1), %%mm3\n"
30632+ " movntq %%mm3, 24(%2)\n"
30633+ " movq 32(%1), %%mm4\n"
30634+ " movntq %%mm4, 32(%2)\n"
30635+ " movq 40(%1), %%mm5\n"
30636+ " movntq %%mm5, 40(%2)\n"
30637+ " movq 48(%1), %%mm6\n"
30638+ " movntq %%mm6, 48(%2)\n"
30639+ " movq 56(%1), %%mm7\n"
30640+ " movntq %%mm7, 56(%2)\n"
30641 ".section .fixup, \"ax\"\n"
30642- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30643+ "3:\n"
30644+
30645+#ifdef CONFIG_PAX_KERNEXEC
30646+ " movl %%cr0, %0\n"
30647+ " movl %0, %%eax\n"
30648+ " andl $0xFFFEFFFF, %%eax\n"
30649+ " movl %%eax, %%cr0\n"
30650+#endif
30651+
30652+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30653+
30654+#ifdef CONFIG_PAX_KERNEXEC
30655+ " movl %0, %%cr0\n"
30656+#endif
30657+
30658 " jmp 2b\n"
30659 ".previous\n"
30660- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30661+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30662
30663 from += 64;
30664 to += 64;
30665@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30666 static void fast_copy_page(void *to, void *from)
30667 {
30668 int i;
30669+ unsigned long cr0;
30670
30671 kernel_fpu_begin();
30672
30673 __asm__ __volatile__ (
30674- "1: prefetch (%0)\n"
30675- " prefetch 64(%0)\n"
30676- " prefetch 128(%0)\n"
30677- " prefetch 192(%0)\n"
30678- " prefetch 256(%0)\n"
30679+ "1: prefetch (%1)\n"
30680+ " prefetch 64(%1)\n"
30681+ " prefetch 128(%1)\n"
30682+ " prefetch 192(%1)\n"
30683+ " prefetch 256(%1)\n"
30684 "2: \n"
30685 ".section .fixup, \"ax\"\n"
30686- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30687+ "3: \n"
30688+
30689+#ifdef CONFIG_PAX_KERNEXEC
30690+ " movl %%cr0, %0\n"
30691+ " movl %0, %%eax\n"
30692+ " andl $0xFFFEFFFF, %%eax\n"
30693+ " movl %%eax, %%cr0\n"
30694+#endif
30695+
30696+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30697+
30698+#ifdef CONFIG_PAX_KERNEXEC
30699+ " movl %0, %%cr0\n"
30700+#endif
30701+
30702 " jmp 2b\n"
30703 ".previous\n"
30704- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30705+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30706
30707 for (i = 0; i < 4096/64; i++) {
30708 __asm__ __volatile__ (
30709- "1: prefetch 320(%0)\n"
30710- "2: movq (%0), %%mm0\n"
30711- " movq 8(%0), %%mm1\n"
30712- " movq 16(%0), %%mm2\n"
30713- " movq 24(%0), %%mm3\n"
30714- " movq %%mm0, (%1)\n"
30715- " movq %%mm1, 8(%1)\n"
30716- " movq %%mm2, 16(%1)\n"
30717- " movq %%mm3, 24(%1)\n"
30718- " movq 32(%0), %%mm0\n"
30719- " movq 40(%0), %%mm1\n"
30720- " movq 48(%0), %%mm2\n"
30721- " movq 56(%0), %%mm3\n"
30722- " movq %%mm0, 32(%1)\n"
30723- " movq %%mm1, 40(%1)\n"
30724- " movq %%mm2, 48(%1)\n"
30725- " movq %%mm3, 56(%1)\n"
30726+ "1: prefetch 320(%1)\n"
30727+ "2: movq (%1), %%mm0\n"
30728+ " movq 8(%1), %%mm1\n"
30729+ " movq 16(%1), %%mm2\n"
30730+ " movq 24(%1), %%mm3\n"
30731+ " movq %%mm0, (%2)\n"
30732+ " movq %%mm1, 8(%2)\n"
30733+ " movq %%mm2, 16(%2)\n"
30734+ " movq %%mm3, 24(%2)\n"
30735+ " movq 32(%1), %%mm0\n"
30736+ " movq 40(%1), %%mm1\n"
30737+ " movq 48(%1), %%mm2\n"
30738+ " movq 56(%1), %%mm3\n"
30739+ " movq %%mm0, 32(%2)\n"
30740+ " movq %%mm1, 40(%2)\n"
30741+ " movq %%mm2, 48(%2)\n"
30742+ " movq %%mm3, 56(%2)\n"
30743 ".section .fixup, \"ax\"\n"
30744- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30745+ "3:\n"
30746+
30747+#ifdef CONFIG_PAX_KERNEXEC
30748+ " movl %%cr0, %0\n"
30749+ " movl %0, %%eax\n"
30750+ " andl $0xFFFEFFFF, %%eax\n"
30751+ " movl %%eax, %%cr0\n"
30752+#endif
30753+
30754+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30755+
30756+#ifdef CONFIG_PAX_KERNEXEC
30757+ " movl %0, %%cr0\n"
30758+#endif
30759+
30760 " jmp 2b\n"
30761 ".previous\n"
30762 _ASM_EXTABLE(1b, 3b)
30763- : : "r" (from), "r" (to) : "memory");
30764+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30765
30766 from += 64;
30767 to += 64;
30768diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30769index f6d13ee..d789440 100644
30770--- a/arch/x86/lib/msr-reg.S
30771+++ b/arch/x86/lib/msr-reg.S
30772@@ -3,6 +3,7 @@
30773 #include <asm/dwarf2.h>
30774 #include <asm/asm.h>
30775 #include <asm/msr.h>
30776+#include <asm/alternative-asm.h>
30777
30778 #ifdef CONFIG_X86_64
30779 /*
30780@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30781 movl %edi, 28(%r10)
30782 popq_cfi %rbp
30783 popq_cfi %rbx
30784+ pax_force_retaddr
30785 ret
30786 3:
30787 CFI_RESTORE_STATE
30788diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30789index fc6ba17..d4d989d 100644
30790--- a/arch/x86/lib/putuser.S
30791+++ b/arch/x86/lib/putuser.S
30792@@ -16,7 +16,9 @@
30793 #include <asm/errno.h>
30794 #include <asm/asm.h>
30795 #include <asm/smap.h>
30796-
30797+#include <asm/segment.h>
30798+#include <asm/pgtable.h>
30799+#include <asm/alternative-asm.h>
30800
30801 /*
30802 * __put_user_X
30803@@ -30,57 +32,125 @@
30804 * as they get called from within inline assembly.
30805 */
30806
30807-#define ENTER CFI_STARTPROC ; \
30808- GET_THREAD_INFO(%_ASM_BX)
30809-#define EXIT ASM_CLAC ; \
30810- ret ; \
30811+#define ENTER CFI_STARTPROC
30812+#define EXIT ASM_CLAC ; \
30813+ pax_force_retaddr ; \
30814+ ret ; \
30815 CFI_ENDPROC
30816
30817+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30818+#define _DEST %_ASM_CX,%_ASM_BX
30819+#else
30820+#define _DEST %_ASM_CX
30821+#endif
30822+
30823+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30824+#define __copyuser_seg gs;
30825+#else
30826+#define __copyuser_seg
30827+#endif
30828+
30829 .text
30830 ENTRY(__put_user_1)
30831 ENTER
30832+
30833+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30834+ GET_THREAD_INFO(%_ASM_BX)
30835 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30836 jae bad_put_user
30837 ASM_STAC
30838-1: movb %al,(%_ASM_CX)
30839+
30840+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30841+ mov pax_user_shadow_base,%_ASM_BX
30842+ cmp %_ASM_BX,%_ASM_CX
30843+ jb 1234f
30844+ xor %ebx,%ebx
30845+1234:
30846+#endif
30847+
30848+#endif
30849+
30850+1: __copyuser_seg movb %al,(_DEST)
30851 xor %eax,%eax
30852 EXIT
30853 ENDPROC(__put_user_1)
30854
30855 ENTRY(__put_user_2)
30856 ENTER
30857+
30858+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30859+ GET_THREAD_INFO(%_ASM_BX)
30860 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30861 sub $1,%_ASM_BX
30862 cmp %_ASM_BX,%_ASM_CX
30863 jae bad_put_user
30864 ASM_STAC
30865-2: movw %ax,(%_ASM_CX)
30866+
30867+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30868+ mov pax_user_shadow_base,%_ASM_BX
30869+ cmp %_ASM_BX,%_ASM_CX
30870+ jb 1234f
30871+ xor %ebx,%ebx
30872+1234:
30873+#endif
30874+
30875+#endif
30876+
30877+2: __copyuser_seg movw %ax,(_DEST)
30878 xor %eax,%eax
30879 EXIT
30880 ENDPROC(__put_user_2)
30881
30882 ENTRY(__put_user_4)
30883 ENTER
30884+
30885+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30886+ GET_THREAD_INFO(%_ASM_BX)
30887 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30888 sub $3,%_ASM_BX
30889 cmp %_ASM_BX,%_ASM_CX
30890 jae bad_put_user
30891 ASM_STAC
30892-3: movl %eax,(%_ASM_CX)
30893+
30894+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30895+ mov pax_user_shadow_base,%_ASM_BX
30896+ cmp %_ASM_BX,%_ASM_CX
30897+ jb 1234f
30898+ xor %ebx,%ebx
30899+1234:
30900+#endif
30901+
30902+#endif
30903+
30904+3: __copyuser_seg movl %eax,(_DEST)
30905 xor %eax,%eax
30906 EXIT
30907 ENDPROC(__put_user_4)
30908
30909 ENTRY(__put_user_8)
30910 ENTER
30911+
30912+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30913+ GET_THREAD_INFO(%_ASM_BX)
30914 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30915 sub $7,%_ASM_BX
30916 cmp %_ASM_BX,%_ASM_CX
30917 jae bad_put_user
30918 ASM_STAC
30919-4: mov %_ASM_AX,(%_ASM_CX)
30920+
30921+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30922+ mov pax_user_shadow_base,%_ASM_BX
30923+ cmp %_ASM_BX,%_ASM_CX
30924+ jb 1234f
30925+ xor %ebx,%ebx
30926+1234:
30927+#endif
30928+
30929+#endif
30930+
30931+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30932 #ifdef CONFIG_X86_32
30933-5: movl %edx,4(%_ASM_CX)
30934+5: __copyuser_seg movl %edx,4(_DEST)
30935 #endif
30936 xor %eax,%eax
30937 EXIT
30938diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30939index 5dff5f0..cadebf4 100644
30940--- a/arch/x86/lib/rwsem.S
30941+++ b/arch/x86/lib/rwsem.S
30942@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30943 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30944 CFI_RESTORE __ASM_REG(dx)
30945 restore_common_regs
30946+ pax_force_retaddr
30947 ret
30948 CFI_ENDPROC
30949 ENDPROC(call_rwsem_down_read_failed)
30950@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30951 movq %rax,%rdi
30952 call rwsem_down_write_failed
30953 restore_common_regs
30954+ pax_force_retaddr
30955 ret
30956 CFI_ENDPROC
30957 ENDPROC(call_rwsem_down_write_failed)
30958@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
30959 movq %rax,%rdi
30960 call rwsem_wake
30961 restore_common_regs
30962-1: ret
30963+1: pax_force_retaddr
30964+ ret
30965 CFI_ENDPROC
30966 ENDPROC(call_rwsem_wake)
30967
30968@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
30969 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30970 CFI_RESTORE __ASM_REG(dx)
30971 restore_common_regs
30972+ pax_force_retaddr
30973 ret
30974 CFI_ENDPROC
30975 ENDPROC(call_rwsem_downgrade_wake)
30976diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
30977index b30b5eb..2b57052 100644
30978--- a/arch/x86/lib/thunk_64.S
30979+++ b/arch/x86/lib/thunk_64.S
30980@@ -9,6 +9,7 @@
30981 #include <asm/dwarf2.h>
30982 #include <asm/calling.h>
30983 #include <asm/asm.h>
30984+#include <asm/alternative-asm.h>
30985
30986 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
30987 .macro THUNK name, func, put_ret_addr_in_rdi=0
30988@@ -16,11 +17,11 @@
30989 \name:
30990 CFI_STARTPROC
30991
30992- /* this one pushes 9 elems, the next one would be %rIP */
30993- SAVE_ARGS
30994+ /* this one pushes 15+1 elems, the next one would be %rIP */
30995+ SAVE_ARGS 8
30996
30997 .if \put_ret_addr_in_rdi
30998- movq_cfi_restore 9*8, rdi
30999+ movq_cfi_restore RIP, rdi
31000 .endif
31001
31002 call \func
31003@@ -47,9 +48,10 @@
31004
31005 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31006 CFI_STARTPROC
31007- SAVE_ARGS
31008+ SAVE_ARGS 8
31009 restore:
31010- RESTORE_ARGS
31011+ RESTORE_ARGS 1,8
31012+ pax_force_retaddr
31013 ret
31014 CFI_ENDPROC
31015 _ASM_NOKPROBE(restore)
31016diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31017index e2f5e21..4b22130 100644
31018--- a/arch/x86/lib/usercopy_32.c
31019+++ b/arch/x86/lib/usercopy_32.c
31020@@ -42,11 +42,13 @@ do { \
31021 int __d0; \
31022 might_fault(); \
31023 __asm__ __volatile__( \
31024+ __COPYUSER_SET_ES \
31025 ASM_STAC "\n" \
31026 "0: rep; stosl\n" \
31027 " movl %2,%0\n" \
31028 "1: rep; stosb\n" \
31029 "2: " ASM_CLAC "\n" \
31030+ __COPYUSER_RESTORE_ES \
31031 ".section .fixup,\"ax\"\n" \
31032 "3: lea 0(%2,%0,4),%0\n" \
31033 " jmp 2b\n" \
31034@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31035
31036 #ifdef CONFIG_X86_INTEL_USERCOPY
31037 static unsigned long
31038-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31039+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31040 {
31041 int d0, d1;
31042 __asm__ __volatile__(
31043@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31044 " .align 2,0x90\n"
31045 "3: movl 0(%4), %%eax\n"
31046 "4: movl 4(%4), %%edx\n"
31047- "5: movl %%eax, 0(%3)\n"
31048- "6: movl %%edx, 4(%3)\n"
31049+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31050+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31051 "7: movl 8(%4), %%eax\n"
31052 "8: movl 12(%4),%%edx\n"
31053- "9: movl %%eax, 8(%3)\n"
31054- "10: movl %%edx, 12(%3)\n"
31055+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31056+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31057 "11: movl 16(%4), %%eax\n"
31058 "12: movl 20(%4), %%edx\n"
31059- "13: movl %%eax, 16(%3)\n"
31060- "14: movl %%edx, 20(%3)\n"
31061+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31062+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31063 "15: movl 24(%4), %%eax\n"
31064 "16: movl 28(%4), %%edx\n"
31065- "17: movl %%eax, 24(%3)\n"
31066- "18: movl %%edx, 28(%3)\n"
31067+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31068+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31069 "19: movl 32(%4), %%eax\n"
31070 "20: movl 36(%4), %%edx\n"
31071- "21: movl %%eax, 32(%3)\n"
31072- "22: movl %%edx, 36(%3)\n"
31073+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31074+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31075 "23: movl 40(%4), %%eax\n"
31076 "24: movl 44(%4), %%edx\n"
31077- "25: movl %%eax, 40(%3)\n"
31078- "26: movl %%edx, 44(%3)\n"
31079+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31080+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31081 "27: movl 48(%4), %%eax\n"
31082 "28: movl 52(%4), %%edx\n"
31083- "29: movl %%eax, 48(%3)\n"
31084- "30: movl %%edx, 52(%3)\n"
31085+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31086+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31087 "31: movl 56(%4), %%eax\n"
31088 "32: movl 60(%4), %%edx\n"
31089- "33: movl %%eax, 56(%3)\n"
31090- "34: movl %%edx, 60(%3)\n"
31091+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31092+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31093 " addl $-64, %0\n"
31094 " addl $64, %4\n"
31095 " addl $64, %3\n"
31096@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31097 " shrl $2, %0\n"
31098 " andl $3, %%eax\n"
31099 " cld\n"
31100+ __COPYUSER_SET_ES
31101 "99: rep; movsl\n"
31102 "36: movl %%eax, %0\n"
31103 "37: rep; movsb\n"
31104 "100:\n"
31105+ __COPYUSER_RESTORE_ES
31106+ ".section .fixup,\"ax\"\n"
31107+ "101: lea 0(%%eax,%0,4),%0\n"
31108+ " jmp 100b\n"
31109+ ".previous\n"
31110+ _ASM_EXTABLE(1b,100b)
31111+ _ASM_EXTABLE(2b,100b)
31112+ _ASM_EXTABLE(3b,100b)
31113+ _ASM_EXTABLE(4b,100b)
31114+ _ASM_EXTABLE(5b,100b)
31115+ _ASM_EXTABLE(6b,100b)
31116+ _ASM_EXTABLE(7b,100b)
31117+ _ASM_EXTABLE(8b,100b)
31118+ _ASM_EXTABLE(9b,100b)
31119+ _ASM_EXTABLE(10b,100b)
31120+ _ASM_EXTABLE(11b,100b)
31121+ _ASM_EXTABLE(12b,100b)
31122+ _ASM_EXTABLE(13b,100b)
31123+ _ASM_EXTABLE(14b,100b)
31124+ _ASM_EXTABLE(15b,100b)
31125+ _ASM_EXTABLE(16b,100b)
31126+ _ASM_EXTABLE(17b,100b)
31127+ _ASM_EXTABLE(18b,100b)
31128+ _ASM_EXTABLE(19b,100b)
31129+ _ASM_EXTABLE(20b,100b)
31130+ _ASM_EXTABLE(21b,100b)
31131+ _ASM_EXTABLE(22b,100b)
31132+ _ASM_EXTABLE(23b,100b)
31133+ _ASM_EXTABLE(24b,100b)
31134+ _ASM_EXTABLE(25b,100b)
31135+ _ASM_EXTABLE(26b,100b)
31136+ _ASM_EXTABLE(27b,100b)
31137+ _ASM_EXTABLE(28b,100b)
31138+ _ASM_EXTABLE(29b,100b)
31139+ _ASM_EXTABLE(30b,100b)
31140+ _ASM_EXTABLE(31b,100b)
31141+ _ASM_EXTABLE(32b,100b)
31142+ _ASM_EXTABLE(33b,100b)
31143+ _ASM_EXTABLE(34b,100b)
31144+ _ASM_EXTABLE(35b,100b)
31145+ _ASM_EXTABLE(36b,100b)
31146+ _ASM_EXTABLE(37b,100b)
31147+ _ASM_EXTABLE(99b,101b)
31148+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31149+ : "1"(to), "2"(from), "0"(size)
31150+ : "eax", "edx", "memory");
31151+ return size;
31152+}
31153+
31154+static unsigned long
31155+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31156+{
31157+ int d0, d1;
31158+ __asm__ __volatile__(
31159+ " .align 2,0x90\n"
31160+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31161+ " cmpl $67, %0\n"
31162+ " jbe 3f\n"
31163+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31164+ " .align 2,0x90\n"
31165+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31166+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31167+ "5: movl %%eax, 0(%3)\n"
31168+ "6: movl %%edx, 4(%3)\n"
31169+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31170+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31171+ "9: movl %%eax, 8(%3)\n"
31172+ "10: movl %%edx, 12(%3)\n"
31173+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31174+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31175+ "13: movl %%eax, 16(%3)\n"
31176+ "14: movl %%edx, 20(%3)\n"
31177+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31178+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31179+ "17: movl %%eax, 24(%3)\n"
31180+ "18: movl %%edx, 28(%3)\n"
31181+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31182+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31183+ "21: movl %%eax, 32(%3)\n"
31184+ "22: movl %%edx, 36(%3)\n"
31185+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31186+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31187+ "25: movl %%eax, 40(%3)\n"
31188+ "26: movl %%edx, 44(%3)\n"
31189+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31190+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31191+ "29: movl %%eax, 48(%3)\n"
31192+ "30: movl %%edx, 52(%3)\n"
31193+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31194+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31195+ "33: movl %%eax, 56(%3)\n"
31196+ "34: movl %%edx, 60(%3)\n"
31197+ " addl $-64, %0\n"
31198+ " addl $64, %4\n"
31199+ " addl $64, %3\n"
31200+ " cmpl $63, %0\n"
31201+ " ja 1b\n"
31202+ "35: movl %0, %%eax\n"
31203+ " shrl $2, %0\n"
31204+ " andl $3, %%eax\n"
31205+ " cld\n"
31206+ "99: rep; "__copyuser_seg" movsl\n"
31207+ "36: movl %%eax, %0\n"
31208+ "37: rep; "__copyuser_seg" movsb\n"
31209+ "100:\n"
31210 ".section .fixup,\"ax\"\n"
31211 "101: lea 0(%%eax,%0,4),%0\n"
31212 " jmp 100b\n"
31213@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31214 int d0, d1;
31215 __asm__ __volatile__(
31216 " .align 2,0x90\n"
31217- "0: movl 32(%4), %%eax\n"
31218+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31219 " cmpl $67, %0\n"
31220 " jbe 2f\n"
31221- "1: movl 64(%4), %%eax\n"
31222+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31223 " .align 2,0x90\n"
31224- "2: movl 0(%4), %%eax\n"
31225- "21: movl 4(%4), %%edx\n"
31226+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31227+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31228 " movl %%eax, 0(%3)\n"
31229 " movl %%edx, 4(%3)\n"
31230- "3: movl 8(%4), %%eax\n"
31231- "31: movl 12(%4),%%edx\n"
31232+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31233+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31234 " movl %%eax, 8(%3)\n"
31235 " movl %%edx, 12(%3)\n"
31236- "4: movl 16(%4), %%eax\n"
31237- "41: movl 20(%4), %%edx\n"
31238+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31239+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31240 " movl %%eax, 16(%3)\n"
31241 " movl %%edx, 20(%3)\n"
31242- "10: movl 24(%4), %%eax\n"
31243- "51: movl 28(%4), %%edx\n"
31244+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31245+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31246 " movl %%eax, 24(%3)\n"
31247 " movl %%edx, 28(%3)\n"
31248- "11: movl 32(%4), %%eax\n"
31249- "61: movl 36(%4), %%edx\n"
31250+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31251+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31252 " movl %%eax, 32(%3)\n"
31253 " movl %%edx, 36(%3)\n"
31254- "12: movl 40(%4), %%eax\n"
31255- "71: movl 44(%4), %%edx\n"
31256+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31257+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31258 " movl %%eax, 40(%3)\n"
31259 " movl %%edx, 44(%3)\n"
31260- "13: movl 48(%4), %%eax\n"
31261- "81: movl 52(%4), %%edx\n"
31262+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31263+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31264 " movl %%eax, 48(%3)\n"
31265 " movl %%edx, 52(%3)\n"
31266- "14: movl 56(%4), %%eax\n"
31267- "91: movl 60(%4), %%edx\n"
31268+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31269+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31270 " movl %%eax, 56(%3)\n"
31271 " movl %%edx, 60(%3)\n"
31272 " addl $-64, %0\n"
31273@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31274 " shrl $2, %0\n"
31275 " andl $3, %%eax\n"
31276 " cld\n"
31277- "6: rep; movsl\n"
31278+ "6: rep; "__copyuser_seg" movsl\n"
31279 " movl %%eax,%0\n"
31280- "7: rep; movsb\n"
31281+ "7: rep; "__copyuser_seg" movsb\n"
31282 "8:\n"
31283 ".section .fixup,\"ax\"\n"
31284 "9: lea 0(%%eax,%0,4),%0\n"
31285@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31286
31287 __asm__ __volatile__(
31288 " .align 2,0x90\n"
31289- "0: movl 32(%4), %%eax\n"
31290+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31291 " cmpl $67, %0\n"
31292 " jbe 2f\n"
31293- "1: movl 64(%4), %%eax\n"
31294+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31295 " .align 2,0x90\n"
31296- "2: movl 0(%4), %%eax\n"
31297- "21: movl 4(%4), %%edx\n"
31298+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31299+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31300 " movnti %%eax, 0(%3)\n"
31301 " movnti %%edx, 4(%3)\n"
31302- "3: movl 8(%4), %%eax\n"
31303- "31: movl 12(%4),%%edx\n"
31304+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31305+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31306 " movnti %%eax, 8(%3)\n"
31307 " movnti %%edx, 12(%3)\n"
31308- "4: movl 16(%4), %%eax\n"
31309- "41: movl 20(%4), %%edx\n"
31310+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31311+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31312 " movnti %%eax, 16(%3)\n"
31313 " movnti %%edx, 20(%3)\n"
31314- "10: movl 24(%4), %%eax\n"
31315- "51: movl 28(%4), %%edx\n"
31316+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31317+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31318 " movnti %%eax, 24(%3)\n"
31319 " movnti %%edx, 28(%3)\n"
31320- "11: movl 32(%4), %%eax\n"
31321- "61: movl 36(%4), %%edx\n"
31322+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31323+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31324 " movnti %%eax, 32(%3)\n"
31325 " movnti %%edx, 36(%3)\n"
31326- "12: movl 40(%4), %%eax\n"
31327- "71: movl 44(%4), %%edx\n"
31328+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31329+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31330 " movnti %%eax, 40(%3)\n"
31331 " movnti %%edx, 44(%3)\n"
31332- "13: movl 48(%4), %%eax\n"
31333- "81: movl 52(%4), %%edx\n"
31334+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31335+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31336 " movnti %%eax, 48(%3)\n"
31337 " movnti %%edx, 52(%3)\n"
31338- "14: movl 56(%4), %%eax\n"
31339- "91: movl 60(%4), %%edx\n"
31340+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31341+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31342 " movnti %%eax, 56(%3)\n"
31343 " movnti %%edx, 60(%3)\n"
31344 " addl $-64, %0\n"
31345@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31346 " shrl $2, %0\n"
31347 " andl $3, %%eax\n"
31348 " cld\n"
31349- "6: rep; movsl\n"
31350+ "6: rep; "__copyuser_seg" movsl\n"
31351 " movl %%eax,%0\n"
31352- "7: rep; movsb\n"
31353+ "7: rep; "__copyuser_seg" movsb\n"
31354 "8:\n"
31355 ".section .fixup,\"ax\"\n"
31356 "9: lea 0(%%eax,%0,4),%0\n"
31357@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31358
31359 __asm__ __volatile__(
31360 " .align 2,0x90\n"
31361- "0: movl 32(%4), %%eax\n"
31362+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31363 " cmpl $67, %0\n"
31364 " jbe 2f\n"
31365- "1: movl 64(%4), %%eax\n"
31366+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31367 " .align 2,0x90\n"
31368- "2: movl 0(%4), %%eax\n"
31369- "21: movl 4(%4), %%edx\n"
31370+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31371+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31372 " movnti %%eax, 0(%3)\n"
31373 " movnti %%edx, 4(%3)\n"
31374- "3: movl 8(%4), %%eax\n"
31375- "31: movl 12(%4),%%edx\n"
31376+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31377+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31378 " movnti %%eax, 8(%3)\n"
31379 " movnti %%edx, 12(%3)\n"
31380- "4: movl 16(%4), %%eax\n"
31381- "41: movl 20(%4), %%edx\n"
31382+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31383+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31384 " movnti %%eax, 16(%3)\n"
31385 " movnti %%edx, 20(%3)\n"
31386- "10: movl 24(%4), %%eax\n"
31387- "51: movl 28(%4), %%edx\n"
31388+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31389+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31390 " movnti %%eax, 24(%3)\n"
31391 " movnti %%edx, 28(%3)\n"
31392- "11: movl 32(%4), %%eax\n"
31393- "61: movl 36(%4), %%edx\n"
31394+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31395+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31396 " movnti %%eax, 32(%3)\n"
31397 " movnti %%edx, 36(%3)\n"
31398- "12: movl 40(%4), %%eax\n"
31399- "71: movl 44(%4), %%edx\n"
31400+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31401+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31402 " movnti %%eax, 40(%3)\n"
31403 " movnti %%edx, 44(%3)\n"
31404- "13: movl 48(%4), %%eax\n"
31405- "81: movl 52(%4), %%edx\n"
31406+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31407+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31408 " movnti %%eax, 48(%3)\n"
31409 " movnti %%edx, 52(%3)\n"
31410- "14: movl 56(%4), %%eax\n"
31411- "91: movl 60(%4), %%edx\n"
31412+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31413+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31414 " movnti %%eax, 56(%3)\n"
31415 " movnti %%edx, 60(%3)\n"
31416 " addl $-64, %0\n"
31417@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31418 " shrl $2, %0\n"
31419 " andl $3, %%eax\n"
31420 " cld\n"
31421- "6: rep; movsl\n"
31422+ "6: rep; "__copyuser_seg" movsl\n"
31423 " movl %%eax,%0\n"
31424- "7: rep; movsb\n"
31425+ "7: rep; "__copyuser_seg" movsb\n"
31426 "8:\n"
31427 ".section .fixup,\"ax\"\n"
31428 "9: lea 0(%%eax,%0,4),%0\n"
31429@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31430 */
31431 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31432 unsigned long size);
31433-unsigned long __copy_user_intel(void __user *to, const void *from,
31434+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31435+ unsigned long size);
31436+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31437 unsigned long size);
31438 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31439 const void __user *from, unsigned long size);
31440 #endif /* CONFIG_X86_INTEL_USERCOPY */
31441
31442 /* Generic arbitrary sized copy. */
31443-#define __copy_user(to, from, size) \
31444+#define __copy_user(to, from, size, prefix, set, restore) \
31445 do { \
31446 int __d0, __d1, __d2; \
31447 __asm__ __volatile__( \
31448+ set \
31449 " cmp $7,%0\n" \
31450 " jbe 1f\n" \
31451 " movl %1,%0\n" \
31452 " negl %0\n" \
31453 " andl $7,%0\n" \
31454 " subl %0,%3\n" \
31455- "4: rep; movsb\n" \
31456+ "4: rep; "prefix"movsb\n" \
31457 " movl %3,%0\n" \
31458 " shrl $2,%0\n" \
31459 " andl $3,%3\n" \
31460 " .align 2,0x90\n" \
31461- "0: rep; movsl\n" \
31462+ "0: rep; "prefix"movsl\n" \
31463 " movl %3,%0\n" \
31464- "1: rep; movsb\n" \
31465+ "1: rep; "prefix"movsb\n" \
31466 "2:\n" \
31467+ restore \
31468 ".section .fixup,\"ax\"\n" \
31469 "5: addl %3,%0\n" \
31470 " jmp 2b\n" \
31471@@ -538,14 +650,14 @@ do { \
31472 " negl %0\n" \
31473 " andl $7,%0\n" \
31474 " subl %0,%3\n" \
31475- "4: rep; movsb\n" \
31476+ "4: rep; "__copyuser_seg"movsb\n" \
31477 " movl %3,%0\n" \
31478 " shrl $2,%0\n" \
31479 " andl $3,%3\n" \
31480 " .align 2,0x90\n" \
31481- "0: rep; movsl\n" \
31482+ "0: rep; "__copyuser_seg"movsl\n" \
31483 " movl %3,%0\n" \
31484- "1: rep; movsb\n" \
31485+ "1: rep; "__copyuser_seg"movsb\n" \
31486 "2:\n" \
31487 ".section .fixup,\"ax\"\n" \
31488 "5: addl %3,%0\n" \
31489@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31490 {
31491 stac();
31492 if (movsl_is_ok(to, from, n))
31493- __copy_user(to, from, n);
31494+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31495 else
31496- n = __copy_user_intel(to, from, n);
31497+ n = __generic_copy_to_user_intel(to, from, n);
31498 clac();
31499 return n;
31500 }
31501@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31502 {
31503 stac();
31504 if (movsl_is_ok(to, from, n))
31505- __copy_user(to, from, n);
31506+ __copy_user(to, from, n, __copyuser_seg, "", "");
31507 else
31508- n = __copy_user_intel((void __user *)to,
31509- (const void *)from, n);
31510+ n = __generic_copy_from_user_intel(to, from, n);
31511 clac();
31512 return n;
31513 }
31514@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31515 if (n > 64 && cpu_has_xmm2)
31516 n = __copy_user_intel_nocache(to, from, n);
31517 else
31518- __copy_user(to, from, n);
31519+ __copy_user(to, from, n, __copyuser_seg, "", "");
31520 #else
31521- __copy_user(to, from, n);
31522+ __copy_user(to, from, n, __copyuser_seg, "", "");
31523 #endif
31524 clac();
31525 return n;
31526 }
31527 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31528
31529-/**
31530- * copy_to_user: - Copy a block of data into user space.
31531- * @to: Destination address, in user space.
31532- * @from: Source address, in kernel space.
31533- * @n: Number of bytes to copy.
31534- *
31535- * Context: User context only. This function may sleep.
31536- *
31537- * Copy data from kernel space to user space.
31538- *
31539- * Returns number of bytes that could not be copied.
31540- * On success, this will be zero.
31541- */
31542-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31543+#ifdef CONFIG_PAX_MEMORY_UDEREF
31544+void __set_fs(mm_segment_t x)
31545 {
31546- if (access_ok(VERIFY_WRITE, to, n))
31547- n = __copy_to_user(to, from, n);
31548- return n;
31549+ switch (x.seg) {
31550+ case 0:
31551+ loadsegment(gs, 0);
31552+ break;
31553+ case TASK_SIZE_MAX:
31554+ loadsegment(gs, __USER_DS);
31555+ break;
31556+ case -1UL:
31557+ loadsegment(gs, __KERNEL_DS);
31558+ break;
31559+ default:
31560+ BUG();
31561+ }
31562 }
31563-EXPORT_SYMBOL(_copy_to_user);
31564+EXPORT_SYMBOL(__set_fs);
31565
31566-/**
31567- * copy_from_user: - Copy a block of data from user space.
31568- * @to: Destination address, in kernel space.
31569- * @from: Source address, in user space.
31570- * @n: Number of bytes to copy.
31571- *
31572- * Context: User context only. This function may sleep.
31573- *
31574- * Copy data from user space to kernel space.
31575- *
31576- * Returns number of bytes that could not be copied.
31577- * On success, this will be zero.
31578- *
31579- * If some data could not be copied, this function will pad the copied
31580- * data to the requested size using zero bytes.
31581- */
31582-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31583+void set_fs(mm_segment_t x)
31584 {
31585- if (access_ok(VERIFY_READ, from, n))
31586- n = __copy_from_user(to, from, n);
31587- else
31588- memset(to, 0, n);
31589- return n;
31590+ current_thread_info()->addr_limit = x;
31591+ __set_fs(x);
31592 }
31593-EXPORT_SYMBOL(_copy_from_user);
31594+EXPORT_SYMBOL(set_fs);
31595+#endif
31596diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31597index c905e89..01ab928 100644
31598--- a/arch/x86/lib/usercopy_64.c
31599+++ b/arch/x86/lib/usercopy_64.c
31600@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31601 might_fault();
31602 /* no memory constraint because it doesn't change any memory gcc knows
31603 about */
31604+ pax_open_userland();
31605 stac();
31606 asm volatile(
31607 " testq %[size8],%[size8]\n"
31608@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31609 _ASM_EXTABLE(0b,3b)
31610 _ASM_EXTABLE(1b,2b)
31611 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31612- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31613+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31614 [zero] "r" (0UL), [eight] "r" (8UL));
31615 clac();
31616+ pax_close_userland();
31617 return size;
31618 }
31619 EXPORT_SYMBOL(__clear_user);
31620@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31621 }
31622 EXPORT_SYMBOL(clear_user);
31623
31624-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31625+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31626 {
31627- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31628- return copy_user_generic((__force void *)to, (__force void *)from, len);
31629- }
31630- return len;
31631+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31632+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31633+ return len;
31634 }
31635 EXPORT_SYMBOL(copy_in_user);
31636
31637@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31638 * it is not necessary to optimize tail handling.
31639 */
31640 __visible unsigned long
31641-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31642+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31643 {
31644 char c;
31645 unsigned zero_len;
31646
31647+ clac();
31648+ pax_close_userland();
31649 for (; len; --len, to++) {
31650 if (__get_user_nocheck(c, from++, sizeof(char)))
31651 break;
31652@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31653 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31654 if (__put_user_nocheck(c, to++, sizeof(char)))
31655 break;
31656- clac();
31657 return len;
31658 }
31659diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31660index ecfdc46..55b9309 100644
31661--- a/arch/x86/mm/Makefile
31662+++ b/arch/x86/mm/Makefile
31663@@ -32,3 +32,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31664 obj-$(CONFIG_MEMTEST) += memtest.o
31665
31666 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31667+
31668+quote:="
31669+obj-$(CONFIG_X86_64) += uderef_64.o
31670+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31671diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31672index 903ec1e..c4166b2 100644
31673--- a/arch/x86/mm/extable.c
31674+++ b/arch/x86/mm/extable.c
31675@@ -6,12 +6,24 @@
31676 static inline unsigned long
31677 ex_insn_addr(const struct exception_table_entry *x)
31678 {
31679- return (unsigned long)&x->insn + x->insn;
31680+ unsigned long reloc = 0;
31681+
31682+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31683+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31684+#endif
31685+
31686+ return (unsigned long)&x->insn + x->insn + reloc;
31687 }
31688 static inline unsigned long
31689 ex_fixup_addr(const struct exception_table_entry *x)
31690 {
31691- return (unsigned long)&x->fixup + x->fixup;
31692+ unsigned long reloc = 0;
31693+
31694+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31695+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31696+#endif
31697+
31698+ return (unsigned long)&x->fixup + x->fixup + reloc;
31699 }
31700
31701 int fixup_exception(struct pt_regs *regs)
31702@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31703 unsigned long new_ip;
31704
31705 #ifdef CONFIG_PNPBIOS
31706- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31707+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31708 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31709 extern u32 pnp_bios_is_utter_crap;
31710 pnp_bios_is_utter_crap = 1;
31711@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31712 i += 4;
31713 p->fixup -= i;
31714 i += 4;
31715+
31716+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31717+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31718+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31719+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31720+#endif
31721+
31722 }
31723 }
31724
31725diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31726index e3ff27a..f38f7c0 100644
31727--- a/arch/x86/mm/fault.c
31728+++ b/arch/x86/mm/fault.c
31729@@ -13,12 +13,19 @@
31730 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31731 #include <linux/prefetch.h> /* prefetchw */
31732 #include <linux/context_tracking.h> /* exception_enter(), ... */
31733+#include <linux/unistd.h>
31734+#include <linux/compiler.h>
31735
31736 #include <asm/traps.h> /* dotraplinkage, ... */
31737 #include <asm/pgalloc.h> /* pgd_*(), ... */
31738 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31739 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31740 #include <asm/vsyscall.h> /* emulate_vsyscall */
31741+#include <asm/tlbflush.h>
31742+
31743+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31744+#include <asm/stacktrace.h>
31745+#endif
31746
31747 #define CREATE_TRACE_POINTS
31748 #include <asm/trace/exceptions.h>
31749@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31750 int ret = 0;
31751
31752 /* kprobe_running() needs smp_processor_id() */
31753- if (kprobes_built_in() && !user_mode_vm(regs)) {
31754+ if (kprobes_built_in() && !user_mode(regs)) {
31755 preempt_disable();
31756 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31757 ret = 1;
31758@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31759 return !instr_lo || (instr_lo>>1) == 1;
31760 case 0x00:
31761 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31762- if (probe_kernel_address(instr, opcode))
31763+ if (user_mode(regs)) {
31764+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31765+ return 0;
31766+ } else if (probe_kernel_address(instr, opcode))
31767 return 0;
31768
31769 *prefetch = (instr_lo == 0xF) &&
31770@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31771 while (instr < max_instr) {
31772 unsigned char opcode;
31773
31774- if (probe_kernel_address(instr, opcode))
31775+ if (user_mode(regs)) {
31776+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31777+ break;
31778+ } else if (probe_kernel_address(instr, opcode))
31779 break;
31780
31781 instr++;
31782@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31783 force_sig_info(si_signo, &info, tsk);
31784 }
31785
31786+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31787+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31788+#endif
31789+
31790+#ifdef CONFIG_PAX_EMUTRAMP
31791+static int pax_handle_fetch_fault(struct pt_regs *regs);
31792+#endif
31793+
31794+#ifdef CONFIG_PAX_PAGEEXEC
31795+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31796+{
31797+ pgd_t *pgd;
31798+ pud_t *pud;
31799+ pmd_t *pmd;
31800+
31801+ pgd = pgd_offset(mm, address);
31802+ if (!pgd_present(*pgd))
31803+ return NULL;
31804+ pud = pud_offset(pgd, address);
31805+ if (!pud_present(*pud))
31806+ return NULL;
31807+ pmd = pmd_offset(pud, address);
31808+ if (!pmd_present(*pmd))
31809+ return NULL;
31810+ return pmd;
31811+}
31812+#endif
31813+
31814 DEFINE_SPINLOCK(pgd_lock);
31815 LIST_HEAD(pgd_list);
31816
31817@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31818 for (address = VMALLOC_START & PMD_MASK;
31819 address >= TASK_SIZE && address < FIXADDR_TOP;
31820 address += PMD_SIZE) {
31821+
31822+#ifdef CONFIG_PAX_PER_CPU_PGD
31823+ unsigned long cpu;
31824+#else
31825 struct page *page;
31826+#endif
31827
31828 spin_lock(&pgd_lock);
31829+
31830+#ifdef CONFIG_PAX_PER_CPU_PGD
31831+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31832+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31833+ pmd_t *ret;
31834+
31835+ ret = vmalloc_sync_one(pgd, address);
31836+ if (!ret)
31837+ break;
31838+ pgd = get_cpu_pgd(cpu, kernel);
31839+#else
31840 list_for_each_entry(page, &pgd_list, lru) {
31841+ pgd_t *pgd;
31842 spinlock_t *pgt_lock;
31843 pmd_t *ret;
31844
31845@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31846 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31847
31848 spin_lock(pgt_lock);
31849- ret = vmalloc_sync_one(page_address(page), address);
31850+ pgd = page_address(page);
31851+#endif
31852+
31853+ ret = vmalloc_sync_one(pgd, address);
31854+
31855+#ifndef CONFIG_PAX_PER_CPU_PGD
31856 spin_unlock(pgt_lock);
31857+#endif
31858
31859 if (!ret)
31860 break;
31861@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31862 * an interrupt in the middle of a task switch..
31863 */
31864 pgd_paddr = read_cr3();
31865+
31866+#ifdef CONFIG_PAX_PER_CPU_PGD
31867+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31868+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31869+#endif
31870+
31871 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31872 if (!pmd_k)
31873 return -1;
31874@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31875 * happen within a race in page table update. In the later
31876 * case just flush:
31877 */
31878- pgd = pgd_offset(current->active_mm, address);
31879+
31880 pgd_ref = pgd_offset_k(address);
31881 if (pgd_none(*pgd_ref))
31882 return -1;
31883
31884+#ifdef CONFIG_PAX_PER_CPU_PGD
31885+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31886+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31887+ if (pgd_none(*pgd)) {
31888+ set_pgd(pgd, *pgd_ref);
31889+ arch_flush_lazy_mmu_mode();
31890+ } else {
31891+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31892+ }
31893+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31894+#else
31895+ pgd = pgd_offset(current->active_mm, address);
31896+#endif
31897+
31898 if (pgd_none(*pgd)) {
31899 set_pgd(pgd, *pgd_ref);
31900 arch_flush_lazy_mmu_mode();
31901@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31902 static int is_errata100(struct pt_regs *regs, unsigned long address)
31903 {
31904 #ifdef CONFIG_X86_64
31905- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31906+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31907 return 1;
31908 #endif
31909 return 0;
31910@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31911 }
31912
31913 static const char nx_warning[] = KERN_CRIT
31914-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31915+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31916 static const char smep_warning[] = KERN_CRIT
31917-"unable to execute userspace code (SMEP?) (uid: %d)\n";
31918+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
31919
31920 static void
31921 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31922@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31923 if (!oops_may_print())
31924 return;
31925
31926- if (error_code & PF_INSTR) {
31927+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31928 unsigned int level;
31929 pgd_t *pgd;
31930 pte_t *pte;
31931@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31932 pte = lookup_address_in_pgd(pgd, address, &level);
31933
31934 if (pte && pte_present(*pte) && !pte_exec(*pte))
31935- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31936+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31937 if (pte && pte_present(*pte) && pte_exec(*pte) &&
31938 (pgd_flags(*pgd) & _PAGE_USER) &&
31939 (read_cr4() & X86_CR4_SMEP))
31940- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
31941+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31942 }
31943
31944+#ifdef CONFIG_PAX_KERNEXEC
31945+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31946+ if (current->signal->curr_ip)
31947+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31948+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31949+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31950+ else
31951+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
31952+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31953+ }
31954+#endif
31955+
31956 printk(KERN_ALERT "BUG: unable to handle kernel ");
31957 if (address < PAGE_SIZE)
31958 printk(KERN_CONT "NULL pointer dereference");
31959@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
31960 return;
31961 }
31962 #endif
31963+
31964+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31965+ if (pax_is_fetch_fault(regs, error_code, address)) {
31966+
31967+#ifdef CONFIG_PAX_EMUTRAMP
31968+ switch (pax_handle_fetch_fault(regs)) {
31969+ case 2:
31970+ return;
31971+ }
31972+#endif
31973+
31974+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31975+ do_group_exit(SIGKILL);
31976+ }
31977+#endif
31978+
31979 /* Kernel addresses are always protection faults: */
31980 if (address >= TASK_SIZE)
31981 error_code |= PF_PROT;
31982@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
31983 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
31984 printk(KERN_ERR
31985 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
31986- tsk->comm, tsk->pid, address);
31987+ tsk->comm, task_pid_nr(tsk), address);
31988 code = BUS_MCEERR_AR;
31989 }
31990 #endif
31991@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
31992 return 1;
31993 }
31994
31995+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
31996+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
31997+{
31998+ pte_t *pte;
31999+ pmd_t *pmd;
32000+ spinlock_t *ptl;
32001+ unsigned char pte_mask;
32002+
32003+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32004+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32005+ return 0;
32006+
32007+ /* PaX: it's our fault, let's handle it if we can */
32008+
32009+ /* PaX: take a look at read faults before acquiring any locks */
32010+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32011+ /* instruction fetch attempt from a protected page in user mode */
32012+ up_read(&mm->mmap_sem);
32013+
32014+#ifdef CONFIG_PAX_EMUTRAMP
32015+ switch (pax_handle_fetch_fault(regs)) {
32016+ case 2:
32017+ return 1;
32018+ }
32019+#endif
32020+
32021+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32022+ do_group_exit(SIGKILL);
32023+ }
32024+
32025+ pmd = pax_get_pmd(mm, address);
32026+ if (unlikely(!pmd))
32027+ return 0;
32028+
32029+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32030+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32031+ pte_unmap_unlock(pte, ptl);
32032+ return 0;
32033+ }
32034+
32035+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32036+ /* write attempt to a protected page in user mode */
32037+ pte_unmap_unlock(pte, ptl);
32038+ return 0;
32039+ }
32040+
32041+#ifdef CONFIG_SMP
32042+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32043+#else
32044+ if (likely(address > get_limit(regs->cs)))
32045+#endif
32046+ {
32047+ set_pte(pte, pte_mkread(*pte));
32048+ __flush_tlb_one(address);
32049+ pte_unmap_unlock(pte, ptl);
32050+ up_read(&mm->mmap_sem);
32051+ return 1;
32052+ }
32053+
32054+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32055+
32056+ /*
32057+ * PaX: fill DTLB with user rights and retry
32058+ */
32059+ __asm__ __volatile__ (
32060+ "orb %2,(%1)\n"
32061+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32062+/*
32063+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32064+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32065+ * page fault when examined during a TLB load attempt. this is true not only
32066+ * for PTEs holding a non-present entry but also present entries that will
32067+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32068+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32069+ * for our target pages since their PTEs are simply not in the TLBs at all.
32070+
32071+ * the best thing in omitting it is that we gain around 15-20% speed in the
32072+ * fast path of the page fault handler and can get rid of tracing since we
32073+ * can no longer flush unintended entries.
32074+ */
32075+ "invlpg (%0)\n"
32076+#endif
32077+ __copyuser_seg"testb $0,(%0)\n"
32078+ "xorb %3,(%1)\n"
32079+ :
32080+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32081+ : "memory", "cc");
32082+ pte_unmap_unlock(pte, ptl);
32083+ up_read(&mm->mmap_sem);
32084+ return 1;
32085+}
32086+#endif
32087+
32088 /*
32089 * Handle a spurious fault caused by a stale TLB entry.
32090 *
32091@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32092 static inline int
32093 access_error(unsigned long error_code, struct vm_area_struct *vma)
32094 {
32095+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32096+ return 1;
32097+
32098 if (error_code & PF_WRITE) {
32099 /* write, present and write, not present: */
32100 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32101@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32102 if (error_code & PF_USER)
32103 return false;
32104
32105- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32106+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32107 return false;
32108
32109 return true;
32110@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32111 tsk = current;
32112 mm = tsk->mm;
32113
32114+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32115+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32116+ if (!search_exception_tables(regs->ip)) {
32117+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32118+ bad_area_nosemaphore(regs, error_code, address);
32119+ return;
32120+ }
32121+ if (address < pax_user_shadow_base) {
32122+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32123+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32124+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32125+ } else
32126+ address -= pax_user_shadow_base;
32127+ }
32128+#endif
32129+
32130 /*
32131 * Detect and handle instructions that would cause a page fault for
32132 * both a tracked kernel page and a userspace page.
32133@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32134 * User-mode registers count as a user access even for any
32135 * potential system fault or CPU buglet:
32136 */
32137- if (user_mode_vm(regs)) {
32138+ if (user_mode(regs)) {
32139 local_irq_enable();
32140 error_code |= PF_USER;
32141 flags |= FAULT_FLAG_USER;
32142@@ -1187,6 +1411,11 @@ retry:
32143 might_sleep();
32144 }
32145
32146+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32147+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32148+ return;
32149+#endif
32150+
32151 vma = find_vma(mm, address);
32152 if (unlikely(!vma)) {
32153 bad_area(regs, error_code, address);
32154@@ -1198,18 +1427,24 @@ retry:
32155 bad_area(regs, error_code, address);
32156 return;
32157 }
32158- if (error_code & PF_USER) {
32159- /*
32160- * Accessing the stack below %sp is always a bug.
32161- * The large cushion allows instructions like enter
32162- * and pusha to work. ("enter $65535, $31" pushes
32163- * 32 pointers and then decrements %sp by 65535.)
32164- */
32165- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32166- bad_area(regs, error_code, address);
32167- return;
32168- }
32169+ /*
32170+ * Accessing the stack below %sp is always a bug.
32171+ * The large cushion allows instructions like enter
32172+ * and pusha to work. ("enter $65535, $31" pushes
32173+ * 32 pointers and then decrements %sp by 65535.)
32174+ */
32175+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32176+ bad_area(regs, error_code, address);
32177+ return;
32178 }
32179+
32180+#ifdef CONFIG_PAX_SEGMEXEC
32181+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32182+ bad_area(regs, error_code, address);
32183+ return;
32184+ }
32185+#endif
32186+
32187 if (unlikely(expand_stack(vma, address))) {
32188 bad_area(regs, error_code, address);
32189 return;
32190@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32191 }
32192 NOKPROBE_SYMBOL(trace_do_page_fault);
32193 #endif /* CONFIG_TRACING */
32194+
32195+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32196+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32197+{
32198+ struct mm_struct *mm = current->mm;
32199+ unsigned long ip = regs->ip;
32200+
32201+ if (v8086_mode(regs))
32202+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32203+
32204+#ifdef CONFIG_PAX_PAGEEXEC
32205+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32206+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32207+ return true;
32208+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32209+ return true;
32210+ return false;
32211+ }
32212+#endif
32213+
32214+#ifdef CONFIG_PAX_SEGMEXEC
32215+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32216+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32217+ return true;
32218+ return false;
32219+ }
32220+#endif
32221+
32222+ return false;
32223+}
32224+#endif
32225+
32226+#ifdef CONFIG_PAX_EMUTRAMP
32227+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32228+{
32229+ int err;
32230+
32231+ do { /* PaX: libffi trampoline emulation */
32232+ unsigned char mov, jmp;
32233+ unsigned int addr1, addr2;
32234+
32235+#ifdef CONFIG_X86_64
32236+ if ((regs->ip + 9) >> 32)
32237+ break;
32238+#endif
32239+
32240+ err = get_user(mov, (unsigned char __user *)regs->ip);
32241+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32242+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32243+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32244+
32245+ if (err)
32246+ break;
32247+
32248+ if (mov == 0xB8 && jmp == 0xE9) {
32249+ regs->ax = addr1;
32250+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32251+ return 2;
32252+ }
32253+ } while (0);
32254+
32255+ do { /* PaX: gcc trampoline emulation #1 */
32256+ unsigned char mov1, mov2;
32257+ unsigned short jmp;
32258+ unsigned int addr1, addr2;
32259+
32260+#ifdef CONFIG_X86_64
32261+ if ((regs->ip + 11) >> 32)
32262+ break;
32263+#endif
32264+
32265+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32266+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32267+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32268+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32269+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32270+
32271+ if (err)
32272+ break;
32273+
32274+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32275+ regs->cx = addr1;
32276+ regs->ax = addr2;
32277+ regs->ip = addr2;
32278+ return 2;
32279+ }
32280+ } while (0);
32281+
32282+ do { /* PaX: gcc trampoline emulation #2 */
32283+ unsigned char mov, jmp;
32284+ unsigned int addr1, addr2;
32285+
32286+#ifdef CONFIG_X86_64
32287+ if ((regs->ip + 9) >> 32)
32288+ break;
32289+#endif
32290+
32291+ err = get_user(mov, (unsigned char __user *)regs->ip);
32292+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32293+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32294+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32295+
32296+ if (err)
32297+ break;
32298+
32299+ if (mov == 0xB9 && jmp == 0xE9) {
32300+ regs->cx = addr1;
32301+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32302+ return 2;
32303+ }
32304+ } while (0);
32305+
32306+ return 1; /* PaX in action */
32307+}
32308+
32309+#ifdef CONFIG_X86_64
32310+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32311+{
32312+ int err;
32313+
32314+ do { /* PaX: libffi trampoline emulation */
32315+ unsigned short mov1, mov2, jmp1;
32316+ unsigned char stcclc, jmp2;
32317+ unsigned long addr1, addr2;
32318+
32319+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32320+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32321+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32322+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32323+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32324+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32325+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32326+
32327+ if (err)
32328+ break;
32329+
32330+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32331+ regs->r11 = addr1;
32332+ regs->r10 = addr2;
32333+ if (stcclc == 0xF8)
32334+ regs->flags &= ~X86_EFLAGS_CF;
32335+ else
32336+ regs->flags |= X86_EFLAGS_CF;
32337+ regs->ip = addr1;
32338+ return 2;
32339+ }
32340+ } while (0);
32341+
32342+ do { /* PaX: gcc trampoline emulation #1 */
32343+ unsigned short mov1, mov2, jmp1;
32344+ unsigned char jmp2;
32345+ unsigned int addr1;
32346+ unsigned long addr2;
32347+
32348+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32349+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32350+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32351+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32352+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32353+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32354+
32355+ if (err)
32356+ break;
32357+
32358+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32359+ regs->r11 = addr1;
32360+ regs->r10 = addr2;
32361+ regs->ip = addr1;
32362+ return 2;
32363+ }
32364+ } while (0);
32365+
32366+ do { /* PaX: gcc trampoline emulation #2 */
32367+ unsigned short mov1, mov2, jmp1;
32368+ unsigned char jmp2;
32369+ unsigned long addr1, addr2;
32370+
32371+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32372+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32373+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32374+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32375+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32376+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32377+
32378+ if (err)
32379+ break;
32380+
32381+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32382+ regs->r11 = addr1;
32383+ regs->r10 = addr2;
32384+ regs->ip = addr1;
32385+ return 2;
32386+ }
32387+ } while (0);
32388+
32389+ return 1; /* PaX in action */
32390+}
32391+#endif
32392+
32393+/*
32394+ * PaX: decide what to do with offenders (regs->ip = fault address)
32395+ *
32396+ * returns 1 when task should be killed
32397+ * 2 when gcc trampoline was detected
32398+ */
32399+static int pax_handle_fetch_fault(struct pt_regs *regs)
32400+{
32401+ if (v8086_mode(regs))
32402+ return 1;
32403+
32404+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32405+ return 1;
32406+
32407+#ifdef CONFIG_X86_32
32408+ return pax_handle_fetch_fault_32(regs);
32409+#else
32410+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32411+ return pax_handle_fetch_fault_32(regs);
32412+ else
32413+ return pax_handle_fetch_fault_64(regs);
32414+#endif
32415+}
32416+#endif
32417+
32418+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32419+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32420+{
32421+ long i;
32422+
32423+ printk(KERN_ERR "PAX: bytes at PC: ");
32424+ for (i = 0; i < 20; i++) {
32425+ unsigned char c;
32426+ if (get_user(c, (unsigned char __force_user *)pc+i))
32427+ printk(KERN_CONT "?? ");
32428+ else
32429+ printk(KERN_CONT "%02x ", c);
32430+ }
32431+ printk("\n");
32432+
32433+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32434+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32435+ unsigned long c;
32436+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32437+#ifdef CONFIG_X86_32
32438+ printk(KERN_CONT "???????? ");
32439+#else
32440+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32441+ printk(KERN_CONT "???????? ???????? ");
32442+ else
32443+ printk(KERN_CONT "???????????????? ");
32444+#endif
32445+ } else {
32446+#ifdef CONFIG_X86_64
32447+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32448+ printk(KERN_CONT "%08x ", (unsigned int)c);
32449+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32450+ } else
32451+#endif
32452+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32453+ }
32454+ }
32455+ printk("\n");
32456+}
32457+#endif
32458+
32459+/**
32460+ * probe_kernel_write(): safely attempt to write to a location
32461+ * @dst: address to write to
32462+ * @src: pointer to the data that shall be written
32463+ * @size: size of the data chunk
32464+ *
32465+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32466+ * happens, handle that and return -EFAULT.
32467+ */
32468+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32469+{
32470+ long ret;
32471+ mm_segment_t old_fs = get_fs();
32472+
32473+ set_fs(KERNEL_DS);
32474+ pagefault_disable();
32475+ pax_open_kernel();
32476+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32477+ pax_close_kernel();
32478+ pagefault_enable();
32479+ set_fs(old_fs);
32480+
32481+ return ret ? -EFAULT : 0;
32482+}
32483diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32484index 224b142..c2c9423 100644
32485--- a/arch/x86/mm/gup.c
32486+++ b/arch/x86/mm/gup.c
32487@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32488 addr = start;
32489 len = (unsigned long) nr_pages << PAGE_SHIFT;
32490 end = start + len;
32491- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32492+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32493 (void __user *)start, len)))
32494 return 0;
32495
32496@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32497 goto slow_irqon;
32498 #endif
32499
32500+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32501+ (void __user *)start, len)))
32502+ return 0;
32503+
32504 /*
32505 * XXX: batch / limit 'nr', to avoid large irq off latency
32506 * needs some instrumenting to determine the common sizes used by
32507diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32508index 4500142..53a363c 100644
32509--- a/arch/x86/mm/highmem_32.c
32510+++ b/arch/x86/mm/highmem_32.c
32511@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32512 idx = type + KM_TYPE_NR*smp_processor_id();
32513 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32514 BUG_ON(!pte_none(*(kmap_pte-idx)));
32515+
32516+ pax_open_kernel();
32517 set_pte(kmap_pte-idx, mk_pte(page, prot));
32518+ pax_close_kernel();
32519+
32520 arch_flush_lazy_mmu_mode();
32521
32522 return (void *)vaddr;
32523diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32524index 006cc91..bf05a83 100644
32525--- a/arch/x86/mm/hugetlbpage.c
32526+++ b/arch/x86/mm/hugetlbpage.c
32527@@ -86,23 +86,24 @@ int pud_huge(pud_t pud)
32528 #ifdef CONFIG_HUGETLB_PAGE
32529 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32530 unsigned long addr, unsigned long len,
32531- unsigned long pgoff, unsigned long flags)
32532+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32533 {
32534 struct hstate *h = hstate_file(file);
32535 struct vm_unmapped_area_info info;
32536-
32537+
32538 info.flags = 0;
32539 info.length = len;
32540 info.low_limit = current->mm->mmap_legacy_base;
32541 info.high_limit = TASK_SIZE;
32542 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32543 info.align_offset = 0;
32544+ info.threadstack_offset = offset;
32545 return vm_unmapped_area(&info);
32546 }
32547
32548 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32549 unsigned long addr0, unsigned long len,
32550- unsigned long pgoff, unsigned long flags)
32551+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32552 {
32553 struct hstate *h = hstate_file(file);
32554 struct vm_unmapped_area_info info;
32555@@ -114,6 +115,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32556 info.high_limit = current->mm->mmap_base;
32557 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32558 info.align_offset = 0;
32559+ info.threadstack_offset = offset;
32560 addr = vm_unmapped_area(&info);
32561
32562 /*
32563@@ -126,6 +128,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32564 VM_BUG_ON(addr != -ENOMEM);
32565 info.flags = 0;
32566 info.low_limit = TASK_UNMAPPED_BASE;
32567+
32568+#ifdef CONFIG_PAX_RANDMMAP
32569+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32570+ info.low_limit += current->mm->delta_mmap;
32571+#endif
32572+
32573 info.high_limit = TASK_SIZE;
32574 addr = vm_unmapped_area(&info);
32575 }
32576@@ -140,10 +148,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32577 struct hstate *h = hstate_file(file);
32578 struct mm_struct *mm = current->mm;
32579 struct vm_area_struct *vma;
32580+ unsigned long pax_task_size = TASK_SIZE;
32581+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32582
32583 if (len & ~huge_page_mask(h))
32584 return -EINVAL;
32585- if (len > TASK_SIZE)
32586+
32587+#ifdef CONFIG_PAX_SEGMEXEC
32588+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32589+ pax_task_size = SEGMEXEC_TASK_SIZE;
32590+#endif
32591+
32592+ pax_task_size -= PAGE_SIZE;
32593+
32594+ if (len > pax_task_size)
32595 return -ENOMEM;
32596
32597 if (flags & MAP_FIXED) {
32598@@ -152,19 +170,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32599 return addr;
32600 }
32601
32602+#ifdef CONFIG_PAX_RANDMMAP
32603+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32604+#endif
32605+
32606 if (addr) {
32607 addr = ALIGN(addr, huge_page_size(h));
32608 vma = find_vma(mm, addr);
32609- if (TASK_SIZE - len >= addr &&
32610- (!vma || addr + len <= vma->vm_start))
32611+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32612 return addr;
32613 }
32614 if (mm->get_unmapped_area == arch_get_unmapped_area)
32615 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32616- pgoff, flags);
32617+ pgoff, flags, offset);
32618 else
32619 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32620- pgoff, flags);
32621+ pgoff, flags, offset);
32622 }
32623 #endif /* CONFIG_HUGETLB_PAGE */
32624
32625diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32626index 079c3b6..7069023 100644
32627--- a/arch/x86/mm/init.c
32628+++ b/arch/x86/mm/init.c
32629@@ -4,6 +4,7 @@
32630 #include <linux/swap.h>
32631 #include <linux/memblock.h>
32632 #include <linux/bootmem.h> /* for max_low_pfn */
32633+#include <linux/tboot.h>
32634
32635 #include <asm/cacheflush.h>
32636 #include <asm/e820.h>
32637@@ -17,6 +18,8 @@
32638 #include <asm/proto.h>
32639 #include <asm/dma.h> /* for MAX_DMA_PFN */
32640 #include <asm/microcode.h>
32641+#include <asm/desc.h>
32642+#include <asm/bios_ebda.h>
32643
32644 /*
32645 * We need to define the tracepoints somewhere, and tlb.c
32646@@ -596,7 +599,18 @@ void __init init_mem_mapping(void)
32647 early_ioremap_page_table_range_init();
32648 #endif
32649
32650+#ifdef CONFIG_PAX_PER_CPU_PGD
32651+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32652+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32653+ KERNEL_PGD_PTRS);
32654+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32655+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32656+ KERNEL_PGD_PTRS);
32657+ load_cr3(get_cpu_pgd(0, kernel));
32658+#else
32659 load_cr3(swapper_pg_dir);
32660+#endif
32661+
32662 __flush_tlb_all();
32663
32664 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32665@@ -612,10 +626,40 @@ void __init init_mem_mapping(void)
32666 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32667 * mmio resources as well as potential bios/acpi data regions.
32668 */
32669+
32670+#ifdef CONFIG_GRKERNSEC_KMEM
32671+static unsigned int ebda_start __read_only;
32672+static unsigned int ebda_end __read_only;
32673+#endif
32674+
32675 int devmem_is_allowed(unsigned long pagenr)
32676 {
32677- if (pagenr < 256)
32678+#ifdef CONFIG_GRKERNSEC_KMEM
32679+ /* allow BDA */
32680+ if (!pagenr)
32681 return 1;
32682+ /* allow EBDA */
32683+ if (pagenr >= ebda_start && pagenr < ebda_end)
32684+ return 1;
32685+ /* if tboot is in use, allow access to its hardcoded serial log range */
32686+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32687+ return 1;
32688+#else
32689+ if (!pagenr)
32690+ return 1;
32691+#ifdef CONFIG_VM86
32692+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32693+ return 1;
32694+#endif
32695+#endif
32696+
32697+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32698+ return 1;
32699+#ifdef CONFIG_GRKERNSEC_KMEM
32700+ /* throw out everything else below 1MB */
32701+ if (pagenr <= 256)
32702+ return 0;
32703+#endif
32704 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32705 return 0;
32706 if (!page_is_ram(pagenr))
32707@@ -661,8 +705,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32708 #endif
32709 }
32710
32711+#ifdef CONFIG_GRKERNSEC_KMEM
32712+static inline void gr_init_ebda(void)
32713+{
32714+ unsigned int ebda_addr;
32715+ unsigned int ebda_size = 0;
32716+
32717+ ebda_addr = get_bios_ebda();
32718+ if (ebda_addr) {
32719+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32720+ ebda_size <<= 10;
32721+ }
32722+ if (ebda_addr && ebda_size) {
32723+ ebda_start = ebda_addr >> PAGE_SHIFT;
32724+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32725+ } else {
32726+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32727+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32728+ }
32729+}
32730+#else
32731+static inline void gr_init_ebda(void) { }
32732+#endif
32733+
32734 void free_initmem(void)
32735 {
32736+#ifdef CONFIG_PAX_KERNEXEC
32737+#ifdef CONFIG_X86_32
32738+ /* PaX: limit KERNEL_CS to actual size */
32739+ unsigned long addr, limit;
32740+ struct desc_struct d;
32741+ int cpu;
32742+#else
32743+ pgd_t *pgd;
32744+ pud_t *pud;
32745+ pmd_t *pmd;
32746+ unsigned long addr, end;
32747+#endif
32748+#endif
32749+
32750+ gr_init_ebda();
32751+
32752+#ifdef CONFIG_PAX_KERNEXEC
32753+#ifdef CONFIG_X86_32
32754+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32755+ limit = (limit - 1UL) >> PAGE_SHIFT;
32756+
32757+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32758+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32759+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32760+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32761+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32762+ }
32763+
32764+ /* PaX: make KERNEL_CS read-only */
32765+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32766+ if (!paravirt_enabled())
32767+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32768+/*
32769+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32770+ pgd = pgd_offset_k(addr);
32771+ pud = pud_offset(pgd, addr);
32772+ pmd = pmd_offset(pud, addr);
32773+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32774+ }
32775+*/
32776+#ifdef CONFIG_X86_PAE
32777+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32778+/*
32779+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32780+ pgd = pgd_offset_k(addr);
32781+ pud = pud_offset(pgd, addr);
32782+ pmd = pmd_offset(pud, addr);
32783+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32784+ }
32785+*/
32786+#endif
32787+
32788+#ifdef CONFIG_MODULES
32789+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32790+#endif
32791+
32792+#else
32793+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32794+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32795+ pgd = pgd_offset_k(addr);
32796+ pud = pud_offset(pgd, addr);
32797+ pmd = pmd_offset(pud, addr);
32798+ if (!pmd_present(*pmd))
32799+ continue;
32800+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32801+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32802+ else
32803+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32804+ }
32805+
32806+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32807+ end = addr + KERNEL_IMAGE_SIZE;
32808+ for (; addr < end; addr += PMD_SIZE) {
32809+ pgd = pgd_offset_k(addr);
32810+ pud = pud_offset(pgd, addr);
32811+ pmd = pmd_offset(pud, addr);
32812+ if (!pmd_present(*pmd))
32813+ continue;
32814+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32815+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32816+ }
32817+#endif
32818+
32819+ flush_tlb_all();
32820+#endif
32821+
32822 free_init_pages("unused kernel",
32823 (unsigned long)(&__init_begin),
32824 (unsigned long)(&__init_end));
32825diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32826index c8140e1..59257fc 100644
32827--- a/arch/x86/mm/init_32.c
32828+++ b/arch/x86/mm/init_32.c
32829@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32830 bool __read_mostly __vmalloc_start_set = false;
32831
32832 /*
32833- * Creates a middle page table and puts a pointer to it in the
32834- * given global directory entry. This only returns the gd entry
32835- * in non-PAE compilation mode, since the middle layer is folded.
32836- */
32837-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32838-{
32839- pud_t *pud;
32840- pmd_t *pmd_table;
32841-
32842-#ifdef CONFIG_X86_PAE
32843- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32844- pmd_table = (pmd_t *)alloc_low_page();
32845- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32846- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32847- pud = pud_offset(pgd, 0);
32848- BUG_ON(pmd_table != pmd_offset(pud, 0));
32849-
32850- return pmd_table;
32851- }
32852-#endif
32853- pud = pud_offset(pgd, 0);
32854- pmd_table = pmd_offset(pud, 0);
32855-
32856- return pmd_table;
32857-}
32858-
32859-/*
32860 * Create a page table and place a pointer to it in a middle page
32861 * directory entry:
32862 */
32863@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32864 pte_t *page_table = (pte_t *)alloc_low_page();
32865
32866 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32867+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32868+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32869+#else
32870 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32871+#endif
32872 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32873 }
32874
32875 return pte_offset_kernel(pmd, 0);
32876 }
32877
32878+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32879+{
32880+ pud_t *pud;
32881+ pmd_t *pmd_table;
32882+
32883+ pud = pud_offset(pgd, 0);
32884+ pmd_table = pmd_offset(pud, 0);
32885+
32886+ return pmd_table;
32887+}
32888+
32889 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32890 {
32891 int pgd_idx = pgd_index(vaddr);
32892@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32893 int pgd_idx, pmd_idx;
32894 unsigned long vaddr;
32895 pgd_t *pgd;
32896+ pud_t *pud;
32897 pmd_t *pmd;
32898 pte_t *pte = NULL;
32899 unsigned long count = page_table_range_init_count(start, end);
32900@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32901 pgd = pgd_base + pgd_idx;
32902
32903 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32904- pmd = one_md_table_init(pgd);
32905- pmd = pmd + pmd_index(vaddr);
32906+ pud = pud_offset(pgd, vaddr);
32907+ pmd = pmd_offset(pud, vaddr);
32908+
32909+#ifdef CONFIG_X86_PAE
32910+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32911+#endif
32912+
32913 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32914 pmd++, pmd_idx++) {
32915 pte = page_table_kmap_check(one_page_table_init(pmd),
32916@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32917 }
32918 }
32919
32920-static inline int is_kernel_text(unsigned long addr)
32921+static inline int is_kernel_text(unsigned long start, unsigned long end)
32922 {
32923- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32924- return 1;
32925- return 0;
32926+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32927+ end <= ktla_ktva((unsigned long)_stext)) &&
32928+ (start >= ktla_ktva((unsigned long)_einittext) ||
32929+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32930+
32931+#ifdef CONFIG_ACPI_SLEEP
32932+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32933+#endif
32934+
32935+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32936+ return 0;
32937+ return 1;
32938 }
32939
32940 /*
32941@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32942 unsigned long last_map_addr = end;
32943 unsigned long start_pfn, end_pfn;
32944 pgd_t *pgd_base = swapper_pg_dir;
32945- int pgd_idx, pmd_idx, pte_ofs;
32946+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32947 unsigned long pfn;
32948 pgd_t *pgd;
32949+ pud_t *pud;
32950 pmd_t *pmd;
32951 pte_t *pte;
32952 unsigned pages_2m, pages_4k;
32953@@ -291,8 +295,13 @@ repeat:
32954 pfn = start_pfn;
32955 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32956 pgd = pgd_base + pgd_idx;
32957- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
32958- pmd = one_md_table_init(pgd);
32959+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
32960+ pud = pud_offset(pgd, 0);
32961+ pmd = pmd_offset(pud, 0);
32962+
32963+#ifdef CONFIG_X86_PAE
32964+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32965+#endif
32966
32967 if (pfn >= end_pfn)
32968 continue;
32969@@ -304,14 +313,13 @@ repeat:
32970 #endif
32971 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
32972 pmd++, pmd_idx++) {
32973- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
32974+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
32975
32976 /*
32977 * Map with big pages if possible, otherwise
32978 * create normal page tables:
32979 */
32980 if (use_pse) {
32981- unsigned int addr2;
32982 pgprot_t prot = PAGE_KERNEL_LARGE;
32983 /*
32984 * first pass will use the same initial
32985@@ -322,11 +330,7 @@ repeat:
32986 _PAGE_PSE);
32987
32988 pfn &= PMD_MASK >> PAGE_SHIFT;
32989- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
32990- PAGE_OFFSET + PAGE_SIZE-1;
32991-
32992- if (is_kernel_text(addr) ||
32993- is_kernel_text(addr2))
32994+ if (is_kernel_text(address, address + PMD_SIZE))
32995 prot = PAGE_KERNEL_LARGE_EXEC;
32996
32997 pages_2m++;
32998@@ -343,7 +347,7 @@ repeat:
32999 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33000 pte += pte_ofs;
33001 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33002- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33003+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33004 pgprot_t prot = PAGE_KERNEL;
33005 /*
33006 * first pass will use the same initial
33007@@ -351,7 +355,7 @@ repeat:
33008 */
33009 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33010
33011- if (is_kernel_text(addr))
33012+ if (is_kernel_text(address, address + PAGE_SIZE))
33013 prot = PAGE_KERNEL_EXEC;
33014
33015 pages_4k++;
33016@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33017
33018 pud = pud_offset(pgd, va);
33019 pmd = pmd_offset(pud, va);
33020- if (!pmd_present(*pmd))
33021+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33022 break;
33023
33024 /* should not be large page here */
33025@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33026
33027 static void __init pagetable_init(void)
33028 {
33029- pgd_t *pgd_base = swapper_pg_dir;
33030-
33031- permanent_kmaps_init(pgd_base);
33032+ permanent_kmaps_init(swapper_pg_dir);
33033 }
33034
33035-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33036+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33037 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33038
33039 /* user-defined highmem size */
33040@@ -787,10 +789,10 @@ void __init mem_init(void)
33041 ((unsigned long)&__init_end -
33042 (unsigned long)&__init_begin) >> 10,
33043
33044- (unsigned long)&_etext, (unsigned long)&_edata,
33045- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33046+ (unsigned long)&_sdata, (unsigned long)&_edata,
33047+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33048
33049- (unsigned long)&_text, (unsigned long)&_etext,
33050+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33051 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33052
33053 /*
33054@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33055 if (!kernel_set_to_readonly)
33056 return;
33057
33058+ start = ktla_ktva(start);
33059 pr_debug("Set kernel text: %lx - %lx for read write\n",
33060 start, start+size);
33061
33062@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33063 if (!kernel_set_to_readonly)
33064 return;
33065
33066+ start = ktla_ktva(start);
33067 pr_debug("Set kernel text: %lx - %lx for read only\n",
33068 start, start+size);
33069
33070@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33071 unsigned long start = PFN_ALIGN(_text);
33072 unsigned long size = PFN_ALIGN(_etext) - start;
33073
33074+ start = ktla_ktva(start);
33075 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33076 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33077 size >> 10);
33078diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33079index 30eb05a..ae671ac 100644
33080--- a/arch/x86/mm/init_64.c
33081+++ b/arch/x86/mm/init_64.c
33082@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33083 * around without checking the pgd every time.
33084 */
33085
33086-pteval_t __supported_pte_mask __read_mostly = ~0;
33087+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33088 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33089
33090 int force_personality32;
33091@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33092
33093 for (address = start; address <= end; address += PGDIR_SIZE) {
33094 const pgd_t *pgd_ref = pgd_offset_k(address);
33095+
33096+#ifdef CONFIG_PAX_PER_CPU_PGD
33097+ unsigned long cpu;
33098+#else
33099 struct page *page;
33100+#endif
33101
33102 /*
33103 * When it is called after memory hot remove, pgd_none()
33104@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33105 continue;
33106
33107 spin_lock(&pgd_lock);
33108+
33109+#ifdef CONFIG_PAX_PER_CPU_PGD
33110+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33111+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33112+
33113+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33114+ BUG_ON(pgd_page_vaddr(*pgd)
33115+ != pgd_page_vaddr(*pgd_ref));
33116+
33117+ if (removed) {
33118+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33119+ pgd_clear(pgd);
33120+ } else {
33121+ if (pgd_none(*pgd))
33122+ set_pgd(pgd, *pgd_ref);
33123+ }
33124+
33125+ pgd = pgd_offset_cpu(cpu, kernel, address);
33126+#else
33127 list_for_each_entry(page, &pgd_list, lru) {
33128 pgd_t *pgd;
33129 spinlock_t *pgt_lock;
33130@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33131 /* the pgt_lock only for Xen */
33132 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33133 spin_lock(pgt_lock);
33134+#endif
33135
33136 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33137 BUG_ON(pgd_page_vaddr(*pgd)
33138@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33139 set_pgd(pgd, *pgd_ref);
33140 }
33141
33142+#ifndef CONFIG_PAX_PER_CPU_PGD
33143 spin_unlock(pgt_lock);
33144+#endif
33145+
33146 }
33147 spin_unlock(&pgd_lock);
33148 }
33149@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33150 {
33151 if (pgd_none(*pgd)) {
33152 pud_t *pud = (pud_t *)spp_getpage();
33153- pgd_populate(&init_mm, pgd, pud);
33154+ pgd_populate_kernel(&init_mm, pgd, pud);
33155 if (pud != pud_offset(pgd, 0))
33156 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33157 pud, pud_offset(pgd, 0));
33158@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33159 {
33160 if (pud_none(*pud)) {
33161 pmd_t *pmd = (pmd_t *) spp_getpage();
33162- pud_populate(&init_mm, pud, pmd);
33163+ pud_populate_kernel(&init_mm, pud, pmd);
33164 if (pmd != pmd_offset(pud, 0))
33165 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33166 pmd, pmd_offset(pud, 0));
33167@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33168 pmd = fill_pmd(pud, vaddr);
33169 pte = fill_pte(pmd, vaddr);
33170
33171+ pax_open_kernel();
33172 set_pte(pte, new_pte);
33173+ pax_close_kernel();
33174
33175 /*
33176 * It's enough to flush this one mapping.
33177@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33178 pgd = pgd_offset_k((unsigned long)__va(phys));
33179 if (pgd_none(*pgd)) {
33180 pud = (pud_t *) spp_getpage();
33181- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33182- _PAGE_USER));
33183+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33184 }
33185 pud = pud_offset(pgd, (unsigned long)__va(phys));
33186 if (pud_none(*pud)) {
33187 pmd = (pmd_t *) spp_getpage();
33188- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33189- _PAGE_USER));
33190+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33191 }
33192 pmd = pmd_offset(pud, phys);
33193 BUG_ON(!pmd_none(*pmd));
33194@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33195 prot);
33196
33197 spin_lock(&init_mm.page_table_lock);
33198- pud_populate(&init_mm, pud, pmd);
33199+ pud_populate_kernel(&init_mm, pud, pmd);
33200 spin_unlock(&init_mm.page_table_lock);
33201 }
33202 __flush_tlb_all();
33203@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33204 page_size_mask);
33205
33206 spin_lock(&init_mm.page_table_lock);
33207- pgd_populate(&init_mm, pgd, pud);
33208+ pgd_populate_kernel(&init_mm, pgd, pud);
33209 spin_unlock(&init_mm.page_table_lock);
33210 pgd_changed = true;
33211 }
33212diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33213index 9ca35fc..4b2b7b7 100644
33214--- a/arch/x86/mm/iomap_32.c
33215+++ b/arch/x86/mm/iomap_32.c
33216@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33217 type = kmap_atomic_idx_push();
33218 idx = type + KM_TYPE_NR * smp_processor_id();
33219 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33220+
33221+ pax_open_kernel();
33222 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33223+ pax_close_kernel();
33224+
33225 arch_flush_lazy_mmu_mode();
33226
33227 return (void *)vaddr;
33228diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33229index fdf617c..b9e85bc 100644
33230--- a/arch/x86/mm/ioremap.c
33231+++ b/arch/x86/mm/ioremap.c
33232@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33233 unsigned long i;
33234
33235 for (i = 0; i < nr_pages; ++i)
33236- if (pfn_valid(start_pfn + i) &&
33237- !PageReserved(pfn_to_page(start_pfn + i)))
33238+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33239+ !PageReserved(pfn_to_page(start_pfn + i))))
33240 return 1;
33241
33242 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33243@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33244 *
33245 * Caller must ensure there is only one unmapping for the same pointer.
33246 */
33247-void iounmap(volatile void __iomem *addr)
33248+void iounmap(const volatile void __iomem *addr)
33249 {
33250 struct vm_struct *p, *o;
33251
33252@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33253 */
33254 void *xlate_dev_mem_ptr(phys_addr_t phys)
33255 {
33256- void *addr;
33257- unsigned long start = phys & PAGE_MASK;
33258-
33259 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33260- if (page_is_ram(start >> PAGE_SHIFT))
33261+ if (page_is_ram(phys >> PAGE_SHIFT))
33262+#ifdef CONFIG_HIGHMEM
33263+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33264+#endif
33265 return __va(phys);
33266
33267- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33268- if (addr)
33269- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33270-
33271- return addr;
33272+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33273 }
33274
33275 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33276 {
33277 if (page_is_ram(phys >> PAGE_SHIFT))
33278+#ifdef CONFIG_HIGHMEM
33279+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33280+#endif
33281 return;
33282
33283 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33284 return;
33285 }
33286
33287-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33288+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33289
33290 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33291 {
33292@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33293 early_ioremap_setup();
33294
33295 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33296- memset(bm_pte, 0, sizeof(bm_pte));
33297- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33298+ pmd_populate_user(&init_mm, pmd, bm_pte);
33299
33300 /*
33301 * The boot-ioremap range spans multiple pmds, for which
33302diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33303index b4f2e7e..96c9c3e 100644
33304--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33305+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33306@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33307 * memory (e.g. tracked pages)? For now, we need this to avoid
33308 * invoking kmemcheck for PnP BIOS calls.
33309 */
33310- if (regs->flags & X86_VM_MASK)
33311+ if (v8086_mode(regs))
33312 return false;
33313- if (regs->cs != __KERNEL_CS)
33314+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33315 return false;
33316
33317 pte = kmemcheck_pte_lookup(address);
33318diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33319index df4552b..12c129c 100644
33320--- a/arch/x86/mm/mmap.c
33321+++ b/arch/x86/mm/mmap.c
33322@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33323 * Leave an at least ~128 MB hole with possible stack randomization.
33324 */
33325 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33326-#define MAX_GAP (TASK_SIZE/6*5)
33327+#define MAX_GAP (pax_task_size/6*5)
33328
33329 static int mmap_is_legacy(void)
33330 {
33331@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33332 return rnd << PAGE_SHIFT;
33333 }
33334
33335-static unsigned long mmap_base(void)
33336+static unsigned long mmap_base(struct mm_struct *mm)
33337 {
33338 unsigned long gap = rlimit(RLIMIT_STACK);
33339+ unsigned long pax_task_size = TASK_SIZE;
33340+
33341+#ifdef CONFIG_PAX_SEGMEXEC
33342+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33343+ pax_task_size = SEGMEXEC_TASK_SIZE;
33344+#endif
33345
33346 if (gap < MIN_GAP)
33347 gap = MIN_GAP;
33348 else if (gap > MAX_GAP)
33349 gap = MAX_GAP;
33350
33351- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33352+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33353 }
33354
33355 /*
33356 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33357 * does, but not when emulating X86_32
33358 */
33359-static unsigned long mmap_legacy_base(void)
33360+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33361 {
33362- if (mmap_is_ia32())
33363+ if (mmap_is_ia32()) {
33364+
33365+#ifdef CONFIG_PAX_SEGMEXEC
33366+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33367+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33368+ else
33369+#endif
33370+
33371 return TASK_UNMAPPED_BASE;
33372- else
33373+ } else
33374 return TASK_UNMAPPED_BASE + mmap_rnd();
33375 }
33376
33377@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33378 */
33379 void arch_pick_mmap_layout(struct mm_struct *mm)
33380 {
33381- mm->mmap_legacy_base = mmap_legacy_base();
33382- mm->mmap_base = mmap_base();
33383+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33384+ mm->mmap_base = mmap_base(mm);
33385+
33386+#ifdef CONFIG_PAX_RANDMMAP
33387+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33388+ mm->mmap_legacy_base += mm->delta_mmap;
33389+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33390+ }
33391+#endif
33392
33393 if (mmap_is_legacy()) {
33394 mm->mmap_base = mm->mmap_legacy_base;
33395diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33396index 0057a7a..95c7edd 100644
33397--- a/arch/x86/mm/mmio-mod.c
33398+++ b/arch/x86/mm/mmio-mod.c
33399@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33400 break;
33401 default:
33402 {
33403- unsigned char *ip = (unsigned char *)instptr;
33404+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33405 my_trace->opcode = MMIO_UNKNOWN_OP;
33406 my_trace->width = 0;
33407 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33408@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33409 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33410 void __iomem *addr)
33411 {
33412- static atomic_t next_id;
33413+ static atomic_unchecked_t next_id;
33414 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33415 /* These are page-unaligned. */
33416 struct mmiotrace_map map = {
33417@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33418 .private = trace
33419 },
33420 .phys = offset,
33421- .id = atomic_inc_return(&next_id)
33422+ .id = atomic_inc_return_unchecked(&next_id)
33423 };
33424 map.map_id = trace->id;
33425
33426@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33427 ioremap_trace_core(offset, size, addr);
33428 }
33429
33430-static void iounmap_trace_core(volatile void __iomem *addr)
33431+static void iounmap_trace_core(const volatile void __iomem *addr)
33432 {
33433 struct mmiotrace_map map = {
33434 .phys = 0,
33435@@ -328,7 +328,7 @@ not_enabled:
33436 }
33437 }
33438
33439-void mmiotrace_iounmap(volatile void __iomem *addr)
33440+void mmiotrace_iounmap(const volatile void __iomem *addr)
33441 {
33442 might_sleep();
33443 if (is_enabled()) /* recheck and proper locking in *_core() */
33444diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33445index 1a88370..3f598b5 100644
33446--- a/arch/x86/mm/numa.c
33447+++ b/arch/x86/mm/numa.c
33448@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33449 }
33450 }
33451
33452-static int __init numa_register_memblks(struct numa_meminfo *mi)
33453+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33454 {
33455 unsigned long uninitialized_var(pfn_align);
33456 int i, nid;
33457diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33458index 536ea2f..f42c293 100644
33459--- a/arch/x86/mm/pageattr.c
33460+++ b/arch/x86/mm/pageattr.c
33461@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33462 */
33463 #ifdef CONFIG_PCI_BIOS
33464 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33465- pgprot_val(forbidden) |= _PAGE_NX;
33466+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33467 #endif
33468
33469 /*
33470@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33471 * Does not cover __inittext since that is gone later on. On
33472 * 64bit we do not enforce !NX on the low mapping
33473 */
33474- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33475- pgprot_val(forbidden) |= _PAGE_NX;
33476+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33477+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33478
33479+#ifdef CONFIG_DEBUG_RODATA
33480 /*
33481 * The .rodata section needs to be read-only. Using the pfn
33482 * catches all aliases.
33483@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33484 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33485 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33486 pgprot_val(forbidden) |= _PAGE_RW;
33487+#endif
33488
33489 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33490 /*
33491@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33492 }
33493 #endif
33494
33495+#ifdef CONFIG_PAX_KERNEXEC
33496+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33497+ pgprot_val(forbidden) |= _PAGE_RW;
33498+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33499+ }
33500+#endif
33501+
33502 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33503
33504 return prot;
33505@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33506 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33507 {
33508 /* change init_mm */
33509+ pax_open_kernel();
33510 set_pte_atomic(kpte, pte);
33511+
33512 #ifdef CONFIG_X86_32
33513 if (!SHARED_KERNEL_PMD) {
33514+
33515+#ifdef CONFIG_PAX_PER_CPU_PGD
33516+ unsigned long cpu;
33517+#else
33518 struct page *page;
33519+#endif
33520
33521+#ifdef CONFIG_PAX_PER_CPU_PGD
33522+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33523+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33524+#else
33525 list_for_each_entry(page, &pgd_list, lru) {
33526- pgd_t *pgd;
33527+ pgd_t *pgd = (pgd_t *)page_address(page);
33528+#endif
33529+
33530 pud_t *pud;
33531 pmd_t *pmd;
33532
33533- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33534+ pgd += pgd_index(address);
33535 pud = pud_offset(pgd, address);
33536 pmd = pmd_offset(pud, address);
33537 set_pte_atomic((pte_t *)pmd, pte);
33538 }
33539 }
33540 #endif
33541+ pax_close_kernel();
33542 }
33543
33544 static int
33545diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33546index 7ac6869..c0ba541 100644
33547--- a/arch/x86/mm/pat.c
33548+++ b/arch/x86/mm/pat.c
33549@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33550 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33551
33552 if (pg_flags == _PGMT_DEFAULT)
33553- return -1;
33554+ return _PAGE_CACHE_MODE_NUM;
33555 else if (pg_flags == _PGMT_WC)
33556 return _PAGE_CACHE_MODE_WC;
33557 else if (pg_flags == _PGMT_UC_MINUS)
33558@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33559
33560 page = pfn_to_page(pfn);
33561 type = get_page_memtype(page);
33562- if (type != -1) {
33563+ if (type != _PAGE_CACHE_MODE_NUM) {
33564 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33565 start, end - 1, type, req_type);
33566 if (new_type)
33567@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33568
33569 if (!entry) {
33570 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33571- current->comm, current->pid, start, end - 1);
33572+ current->comm, task_pid_nr(current), start, end - 1);
33573 return -EINVAL;
33574 }
33575
33576@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33577 page = pfn_to_page(paddr >> PAGE_SHIFT);
33578 rettype = get_page_memtype(page);
33579 /*
33580- * -1 from get_page_memtype() implies RAM page is in its
33581+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33582 * default state and not reserved, and hence of type WB
33583 */
33584- if (rettype == -1)
33585+ if (rettype == _PAGE_CACHE_MODE_NUM)
33586 rettype = _PAGE_CACHE_MODE_WB;
33587
33588 return rettype;
33589@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33590
33591 while (cursor < to) {
33592 if (!devmem_is_allowed(pfn)) {
33593- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33594- current->comm, from, to - 1);
33595+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33596+ current->comm, from, to - 1, cursor);
33597 return 0;
33598 }
33599 cursor += PAGE_SIZE;
33600@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33601 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33602 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33603 "for [mem %#010Lx-%#010Lx]\n",
33604- current->comm, current->pid,
33605+ current->comm, task_pid_nr(current),
33606 cattr_name(pcm),
33607 base, (unsigned long long)(base + size-1));
33608 return -EINVAL;
33609@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33610 pcm = lookup_memtype(paddr);
33611 if (want_pcm != pcm) {
33612 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33613- current->comm, current->pid,
33614+ current->comm, task_pid_nr(current),
33615 cattr_name(want_pcm),
33616 (unsigned long long)paddr,
33617 (unsigned long long)(paddr + size - 1),
33618@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33619 free_memtype(paddr, paddr + size);
33620 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33621 " for [mem %#010Lx-%#010Lx], got %s\n",
33622- current->comm, current->pid,
33623+ current->comm, task_pid_nr(current),
33624 cattr_name(want_pcm),
33625 (unsigned long long)paddr,
33626 (unsigned long long)(paddr + size - 1),
33627diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33628index 6582adc..fcc5d0b 100644
33629--- a/arch/x86/mm/pat_rbtree.c
33630+++ b/arch/x86/mm/pat_rbtree.c
33631@@ -161,7 +161,7 @@ success:
33632
33633 failure:
33634 printk(KERN_INFO "%s:%d conflicting memory types "
33635- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33636+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33637 end, cattr_name(found_type), cattr_name(match->type));
33638 return -EBUSY;
33639 }
33640diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33641index 9f0614d..92ae64a 100644
33642--- a/arch/x86/mm/pf_in.c
33643+++ b/arch/x86/mm/pf_in.c
33644@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33645 int i;
33646 enum reason_type rv = OTHERS;
33647
33648- p = (unsigned char *)ins_addr;
33649+ p = (unsigned char *)ktla_ktva(ins_addr);
33650 p += skip_prefix(p, &prf);
33651 p += get_opcode(p, &opcode);
33652
33653@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33654 struct prefix_bits prf;
33655 int i;
33656
33657- p = (unsigned char *)ins_addr;
33658+ p = (unsigned char *)ktla_ktva(ins_addr);
33659 p += skip_prefix(p, &prf);
33660 p += get_opcode(p, &opcode);
33661
33662@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33663 struct prefix_bits prf;
33664 int i;
33665
33666- p = (unsigned char *)ins_addr;
33667+ p = (unsigned char *)ktla_ktva(ins_addr);
33668 p += skip_prefix(p, &prf);
33669 p += get_opcode(p, &opcode);
33670
33671@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33672 struct prefix_bits prf;
33673 int i;
33674
33675- p = (unsigned char *)ins_addr;
33676+ p = (unsigned char *)ktla_ktva(ins_addr);
33677 p += skip_prefix(p, &prf);
33678 p += get_opcode(p, &opcode);
33679 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33680@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33681 struct prefix_bits prf;
33682 int i;
33683
33684- p = (unsigned char *)ins_addr;
33685+ p = (unsigned char *)ktla_ktva(ins_addr);
33686 p += skip_prefix(p, &prf);
33687 p += get_opcode(p, &opcode);
33688 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33689diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33690index 6fb6927..4fc13c0 100644
33691--- a/arch/x86/mm/pgtable.c
33692+++ b/arch/x86/mm/pgtable.c
33693@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33694 list_del(&page->lru);
33695 }
33696
33697-#define UNSHARED_PTRS_PER_PGD \
33698- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33699+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33700+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33701
33702+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33703+{
33704+ unsigned int count = USER_PGD_PTRS;
33705
33706+ if (!pax_user_shadow_base)
33707+ return;
33708+
33709+ while (count--)
33710+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33711+}
33712+#endif
33713+
33714+#ifdef CONFIG_PAX_PER_CPU_PGD
33715+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33716+{
33717+ unsigned int count = USER_PGD_PTRS;
33718+
33719+ while (count--) {
33720+ pgd_t pgd;
33721+
33722+#ifdef CONFIG_X86_64
33723+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33724+#else
33725+ pgd = *src++;
33726+#endif
33727+
33728+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33729+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33730+#endif
33731+
33732+ *dst++ = pgd;
33733+ }
33734+
33735+}
33736+#endif
33737+
33738+#ifdef CONFIG_X86_64
33739+#define pxd_t pud_t
33740+#define pyd_t pgd_t
33741+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33742+#define pgtable_pxd_page_ctor(page) true
33743+#define pgtable_pxd_page_dtor(page)
33744+#define pxd_free(mm, pud) pud_free((mm), (pud))
33745+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33746+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33747+#define PYD_SIZE PGDIR_SIZE
33748+#else
33749+#define pxd_t pmd_t
33750+#define pyd_t pud_t
33751+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33752+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33753+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33754+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33755+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33756+#define pyd_offset(mm, address) pud_offset((mm), (address))
33757+#define PYD_SIZE PUD_SIZE
33758+#endif
33759+
33760+#ifdef CONFIG_PAX_PER_CPU_PGD
33761+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33762+static inline void pgd_dtor(pgd_t *pgd) {}
33763+#else
33764 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33765 {
33766 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33767@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33768 pgd_list_del(pgd);
33769 spin_unlock(&pgd_lock);
33770 }
33771+#endif
33772
33773 /*
33774 * List of all pgd's needed for non-PAE so it can invalidate entries
33775@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33776 * -- nyc
33777 */
33778
33779-#ifdef CONFIG_X86_PAE
33780+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33781 /*
33782 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33783 * updating the top-level pagetable entries to guarantee the
33784@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33785 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33786 * and initialize the kernel pmds here.
33787 */
33788-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33789+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33790
33791 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33792 {
33793@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33794 */
33795 flush_tlb_mm(mm);
33796 }
33797+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33798+#define PREALLOCATED_PXDS USER_PGD_PTRS
33799 #else /* !CONFIG_X86_PAE */
33800
33801 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33802-#define PREALLOCATED_PMDS 0
33803+#define PREALLOCATED_PXDS 0
33804
33805 #endif /* CONFIG_X86_PAE */
33806
33807-static void free_pmds(pmd_t *pmds[])
33808+static void free_pxds(pxd_t *pxds[])
33809 {
33810 int i;
33811
33812- for(i = 0; i < PREALLOCATED_PMDS; i++)
33813- if (pmds[i]) {
33814- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33815- free_page((unsigned long)pmds[i]);
33816+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33817+ if (pxds[i]) {
33818+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33819+ free_page((unsigned long)pxds[i]);
33820 }
33821 }
33822
33823-static int preallocate_pmds(pmd_t *pmds[])
33824+static int preallocate_pxds(pxd_t *pxds[])
33825 {
33826 int i;
33827 bool failed = false;
33828
33829- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33830- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33831- if (!pmd)
33832+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33833+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33834+ if (!pxd)
33835 failed = true;
33836- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33837- free_page((unsigned long)pmd);
33838- pmd = NULL;
33839+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33840+ free_page((unsigned long)pxd);
33841+ pxd = NULL;
33842 failed = true;
33843 }
33844- pmds[i] = pmd;
33845+ pxds[i] = pxd;
33846 }
33847
33848 if (failed) {
33849- free_pmds(pmds);
33850+ free_pxds(pxds);
33851 return -ENOMEM;
33852 }
33853
33854@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33855 * preallocate which never got a corresponding vma will need to be
33856 * freed manually.
33857 */
33858-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33859+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33860 {
33861 int i;
33862
33863- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33864+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33865 pgd_t pgd = pgdp[i];
33866
33867 if (pgd_val(pgd) != 0) {
33868- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33869+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33870
33871- pgdp[i] = native_make_pgd(0);
33872+ set_pgd(pgdp + i, native_make_pgd(0));
33873
33874- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33875- pmd_free(mm, pmd);
33876+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33877+ pxd_free(mm, pxd);
33878 }
33879 }
33880 }
33881
33882-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33883+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33884 {
33885- pud_t *pud;
33886+ pyd_t *pyd;
33887 int i;
33888
33889- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33890+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33891 return;
33892
33893- pud = pud_offset(pgd, 0);
33894-
33895- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33896- pmd_t *pmd = pmds[i];
33897+#ifdef CONFIG_X86_64
33898+ pyd = pyd_offset(mm, 0L);
33899+#else
33900+ pyd = pyd_offset(pgd, 0L);
33901+#endif
33902
33903+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33904+ pxd_t *pxd = pxds[i];
33905 if (i >= KERNEL_PGD_BOUNDARY)
33906- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33907- sizeof(pmd_t) * PTRS_PER_PMD);
33908+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33909+ sizeof(pxd_t) * PTRS_PER_PMD);
33910
33911- pud_populate(mm, pud, pmd);
33912+ pyd_populate(mm, pyd, pxd);
33913 }
33914 }
33915
33916 pgd_t *pgd_alloc(struct mm_struct *mm)
33917 {
33918 pgd_t *pgd;
33919- pmd_t *pmds[PREALLOCATED_PMDS];
33920+ pxd_t *pxds[PREALLOCATED_PXDS];
33921
33922 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33923
33924@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33925
33926 mm->pgd = pgd;
33927
33928- if (preallocate_pmds(pmds) != 0)
33929+ if (preallocate_pxds(pxds) != 0)
33930 goto out_free_pgd;
33931
33932 if (paravirt_pgd_alloc(mm) != 0)
33933- goto out_free_pmds;
33934+ goto out_free_pxds;
33935
33936 /*
33937 * Make sure that pre-populating the pmds is atomic with
33938@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33939 spin_lock(&pgd_lock);
33940
33941 pgd_ctor(mm, pgd);
33942- pgd_prepopulate_pmd(mm, pgd, pmds);
33943+ pgd_prepopulate_pxd(mm, pgd, pxds);
33944
33945 spin_unlock(&pgd_lock);
33946
33947 return pgd;
33948
33949-out_free_pmds:
33950- free_pmds(pmds);
33951+out_free_pxds:
33952+ free_pxds(pxds);
33953 out_free_pgd:
33954 free_page((unsigned long)pgd);
33955 out:
33956@@ -313,7 +380,7 @@ out:
33957
33958 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33959 {
33960- pgd_mop_up_pmds(mm, pgd);
33961+ pgd_mop_up_pxds(mm, pgd);
33962 pgd_dtor(pgd);
33963 paravirt_pgd_free(mm, pgd);
33964 free_page((unsigned long)pgd);
33965diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
33966index 75cc097..79a097f 100644
33967--- a/arch/x86/mm/pgtable_32.c
33968+++ b/arch/x86/mm/pgtable_32.c
33969@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
33970 return;
33971 }
33972 pte = pte_offset_kernel(pmd, vaddr);
33973+
33974+ pax_open_kernel();
33975 if (pte_val(pteval))
33976 set_pte_at(&init_mm, vaddr, pte, pteval);
33977 else
33978 pte_clear(&init_mm, vaddr, pte);
33979+ pax_close_kernel();
33980
33981 /*
33982 * It's enough to flush this one mapping.
33983diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
33984index e666cbb..61788c45 100644
33985--- a/arch/x86/mm/physaddr.c
33986+++ b/arch/x86/mm/physaddr.c
33987@@ -10,7 +10,7 @@
33988 #ifdef CONFIG_X86_64
33989
33990 #ifdef CONFIG_DEBUG_VIRTUAL
33991-unsigned long __phys_addr(unsigned long x)
33992+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
33993 {
33994 unsigned long y = x - __START_KERNEL_map;
33995
33996@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
33997 #else
33998
33999 #ifdef CONFIG_DEBUG_VIRTUAL
34000-unsigned long __phys_addr(unsigned long x)
34001+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34002 {
34003 unsigned long phys_addr = x - PAGE_OFFSET;
34004 /* VMALLOC_* aren't constants */
34005diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34006index 90555bf..f5f1828 100644
34007--- a/arch/x86/mm/setup_nx.c
34008+++ b/arch/x86/mm/setup_nx.c
34009@@ -5,8 +5,10 @@
34010 #include <asm/pgtable.h>
34011 #include <asm/proto.h>
34012
34013+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34014 static int disable_nx;
34015
34016+#ifndef CONFIG_PAX_PAGEEXEC
34017 /*
34018 * noexec = on|off
34019 *
34020@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34021 return 0;
34022 }
34023 early_param("noexec", noexec_setup);
34024+#endif
34025+
34026+#endif
34027
34028 void x86_configure_nx(void)
34029 {
34030+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34031 if (cpu_has_nx && !disable_nx)
34032 __supported_pte_mask |= _PAGE_NX;
34033 else
34034+#endif
34035 __supported_pte_mask &= ~_PAGE_NX;
34036 }
34037
34038diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34039index ee61c36..e6fedeb 100644
34040--- a/arch/x86/mm/tlb.c
34041+++ b/arch/x86/mm/tlb.c
34042@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34043 BUG();
34044 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34045 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34046+
34047+#ifndef CONFIG_PAX_PER_CPU_PGD
34048 load_cr3(swapper_pg_dir);
34049+#endif
34050+
34051 /*
34052 * This gets called in the idle path where RCU
34053 * functions differently. Tracing normally
34054diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34055new file mode 100644
34056index 0000000..dace51c
34057--- /dev/null
34058+++ b/arch/x86/mm/uderef_64.c
34059@@ -0,0 +1,37 @@
34060+#include <linux/mm.h>
34061+#include <asm/pgtable.h>
34062+#include <asm/uaccess.h>
34063+
34064+#ifdef CONFIG_PAX_MEMORY_UDEREF
34065+/* PaX: due to the special call convention these functions must
34066+ * - remain leaf functions under all configurations,
34067+ * - never be called directly, only dereferenced from the wrappers.
34068+ */
34069+void __pax_open_userland(void)
34070+{
34071+ unsigned int cpu;
34072+
34073+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34074+ return;
34075+
34076+ cpu = raw_get_cpu();
34077+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34078+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34079+ raw_put_cpu_no_resched();
34080+}
34081+EXPORT_SYMBOL(__pax_open_userland);
34082+
34083+void __pax_close_userland(void)
34084+{
34085+ unsigned int cpu;
34086+
34087+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34088+ return;
34089+
34090+ cpu = raw_get_cpu();
34091+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34092+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34093+ raw_put_cpu_no_resched();
34094+}
34095+EXPORT_SYMBOL(__pax_close_userland);
34096+#endif
34097diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34098index 6440221..f84b5c7 100644
34099--- a/arch/x86/net/bpf_jit.S
34100+++ b/arch/x86/net/bpf_jit.S
34101@@ -9,6 +9,7 @@
34102 */
34103 #include <linux/linkage.h>
34104 #include <asm/dwarf2.h>
34105+#include <asm/alternative-asm.h>
34106
34107 /*
34108 * Calling convention :
34109@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34110 jle bpf_slow_path_word
34111 mov (SKBDATA,%rsi),%eax
34112 bswap %eax /* ntohl() */
34113+ pax_force_retaddr
34114 ret
34115
34116 sk_load_half:
34117@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34118 jle bpf_slow_path_half
34119 movzwl (SKBDATA,%rsi),%eax
34120 rol $8,%ax # ntohs()
34121+ pax_force_retaddr
34122 ret
34123
34124 sk_load_byte:
34125@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34126 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34127 jle bpf_slow_path_byte
34128 movzbl (SKBDATA,%rsi),%eax
34129+ pax_force_retaddr
34130 ret
34131
34132 /* rsi contains offset and can be scratched */
34133@@ -90,6 +94,7 @@ bpf_slow_path_word:
34134 js bpf_error
34135 mov - MAX_BPF_STACK + 32(%rbp),%eax
34136 bswap %eax
34137+ pax_force_retaddr
34138 ret
34139
34140 bpf_slow_path_half:
34141@@ -98,12 +103,14 @@ bpf_slow_path_half:
34142 mov - MAX_BPF_STACK + 32(%rbp),%ax
34143 rol $8,%ax
34144 movzwl %ax,%eax
34145+ pax_force_retaddr
34146 ret
34147
34148 bpf_slow_path_byte:
34149 bpf_slow_path_common(1)
34150 js bpf_error
34151 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34152+ pax_force_retaddr
34153 ret
34154
34155 #define sk_negative_common(SIZE) \
34156@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34157 sk_negative_common(4)
34158 mov (%rax), %eax
34159 bswap %eax
34160+ pax_force_retaddr
34161 ret
34162
34163 bpf_slow_path_half_neg:
34164@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34165 mov (%rax),%ax
34166 rol $8,%ax
34167 movzwl %ax,%eax
34168+ pax_force_retaddr
34169 ret
34170
34171 bpf_slow_path_byte_neg:
34172@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34173 .globl sk_load_byte_negative_offset
34174 sk_negative_common(1)
34175 movzbl (%rax), %eax
34176+ pax_force_retaddr
34177 ret
34178
34179 bpf_error:
34180@@ -156,4 +166,5 @@ bpf_error:
34181 mov - MAX_BPF_STACK + 16(%rbp),%r14
34182 mov - MAX_BPF_STACK + 24(%rbp),%r15
34183 leaveq
34184+ pax_force_retaddr
34185 ret
34186diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34187index 9875143..00f6656 100644
34188--- a/arch/x86/net/bpf_jit_comp.c
34189+++ b/arch/x86/net/bpf_jit_comp.c
34190@@ -13,7 +13,11 @@
34191 #include <linux/if_vlan.h>
34192 #include <asm/cacheflush.h>
34193
34194+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34195+int bpf_jit_enable __read_only;
34196+#else
34197 int bpf_jit_enable __read_mostly;
34198+#endif
34199
34200 /*
34201 * assembly code in arch/x86/net/bpf_jit.S
34202@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34203 static void jit_fill_hole(void *area, unsigned int size)
34204 {
34205 /* fill whole space with int3 instructions */
34206+ pax_open_kernel();
34207 memset(area, 0xcc, size);
34208+ pax_close_kernel();
34209 }
34210
34211 struct jit_context {
34212@@ -896,7 +902,9 @@ common_load:
34213 pr_err("bpf_jit_compile fatal error\n");
34214 return -EFAULT;
34215 }
34216+ pax_open_kernel();
34217 memcpy(image + proglen, temp, ilen);
34218+ pax_close_kernel();
34219 }
34220 proglen += ilen;
34221 addrs[i] = proglen;
34222@@ -968,7 +976,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34223
34224 if (image) {
34225 bpf_flush_icache(header, image + proglen);
34226- set_memory_ro((unsigned long)header, header->pages);
34227 prog->bpf_func = (void *)image;
34228 prog->jited = true;
34229 }
34230@@ -981,12 +988,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34231 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34232 struct bpf_binary_header *header = (void *)addr;
34233
34234- if (!fp->jited)
34235- goto free_filter;
34236+ if (fp->jited)
34237+ bpf_jit_binary_free(header);
34238
34239- set_memory_rw(addr, header->pages);
34240- bpf_jit_binary_free(header);
34241-
34242-free_filter:
34243 bpf_prog_unlock_free(fp);
34244 }
34245diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34246index 5d04be5..2beeaa2 100644
34247--- a/arch/x86/oprofile/backtrace.c
34248+++ b/arch/x86/oprofile/backtrace.c
34249@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34250 struct stack_frame_ia32 *fp;
34251 unsigned long bytes;
34252
34253- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34254+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34255 if (bytes != 0)
34256 return NULL;
34257
34258- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34259+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34260
34261 oprofile_add_trace(bufhead[0].return_address);
34262
34263@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34264 struct stack_frame bufhead[2];
34265 unsigned long bytes;
34266
34267- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34268+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34269 if (bytes != 0)
34270 return NULL;
34271
34272@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34273 {
34274 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34275
34276- if (!user_mode_vm(regs)) {
34277+ if (!user_mode(regs)) {
34278 unsigned long stack = kernel_stack_pointer(regs);
34279 if (depth)
34280 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34281diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34282index 1d2e639..f6ef82a 100644
34283--- a/arch/x86/oprofile/nmi_int.c
34284+++ b/arch/x86/oprofile/nmi_int.c
34285@@ -23,6 +23,7 @@
34286 #include <asm/nmi.h>
34287 #include <asm/msr.h>
34288 #include <asm/apic.h>
34289+#include <asm/pgtable.h>
34290
34291 #include "op_counter.h"
34292 #include "op_x86_model.h"
34293@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34294 if (ret)
34295 return ret;
34296
34297- if (!model->num_virt_counters)
34298- model->num_virt_counters = model->num_counters;
34299+ if (!model->num_virt_counters) {
34300+ pax_open_kernel();
34301+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34302+ pax_close_kernel();
34303+ }
34304
34305 mux_init(ops);
34306
34307diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34308index 50d86c0..7985318 100644
34309--- a/arch/x86/oprofile/op_model_amd.c
34310+++ b/arch/x86/oprofile/op_model_amd.c
34311@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34312 num_counters = AMD64_NUM_COUNTERS;
34313 }
34314
34315- op_amd_spec.num_counters = num_counters;
34316- op_amd_spec.num_controls = num_counters;
34317- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34318+ pax_open_kernel();
34319+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34320+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34321+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34322+ pax_close_kernel();
34323
34324 return 0;
34325 }
34326diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34327index d90528e..0127e2b 100644
34328--- a/arch/x86/oprofile/op_model_ppro.c
34329+++ b/arch/x86/oprofile/op_model_ppro.c
34330@@ -19,6 +19,7 @@
34331 #include <asm/msr.h>
34332 #include <asm/apic.h>
34333 #include <asm/nmi.h>
34334+#include <asm/pgtable.h>
34335
34336 #include "op_x86_model.h"
34337 #include "op_counter.h"
34338@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34339
34340 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34341
34342- op_arch_perfmon_spec.num_counters = num_counters;
34343- op_arch_perfmon_spec.num_controls = num_counters;
34344+ pax_open_kernel();
34345+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34346+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34347+ pax_close_kernel();
34348 }
34349
34350 static int arch_perfmon_init(struct oprofile_operations *ignore)
34351diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34352index 71e8a67..6a313bb 100644
34353--- a/arch/x86/oprofile/op_x86_model.h
34354+++ b/arch/x86/oprofile/op_x86_model.h
34355@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34356 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34357 struct op_msrs const * const msrs);
34358 #endif
34359-};
34360+} __do_const;
34361
34362 struct op_counter_config;
34363
34364diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34365index 44b9271..4c5a988 100644
34366--- a/arch/x86/pci/intel_mid_pci.c
34367+++ b/arch/x86/pci/intel_mid_pci.c
34368@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34369 pci_mmcfg_late_init();
34370 pcibios_enable_irq = intel_mid_pci_irq_enable;
34371 pcibios_disable_irq = intel_mid_pci_irq_disable;
34372- pci_root_ops = intel_mid_pci_ops;
34373+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34374 pci_soc_mode = 1;
34375 /* Continue with standard init */
34376 return 1;
34377diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34378index 5dc6ca5..25c03f5 100644
34379--- a/arch/x86/pci/irq.c
34380+++ b/arch/x86/pci/irq.c
34381@@ -51,7 +51,7 @@ struct irq_router {
34382 struct irq_router_handler {
34383 u16 vendor;
34384 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34385-};
34386+} __do_const;
34387
34388 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34389 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34390@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34391 return 0;
34392 }
34393
34394-static __initdata struct irq_router_handler pirq_routers[] = {
34395+static __initconst const struct irq_router_handler pirq_routers[] = {
34396 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34397 { PCI_VENDOR_ID_AL, ali_router_probe },
34398 { PCI_VENDOR_ID_ITE, ite_router_probe },
34399@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34400 static void __init pirq_find_router(struct irq_router *r)
34401 {
34402 struct irq_routing_table *rt = pirq_table;
34403- struct irq_router_handler *h;
34404+ const struct irq_router_handler *h;
34405
34406 #ifdef CONFIG_PCI_BIOS
34407 if (!rt->signature) {
34408@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34409 return 0;
34410 }
34411
34412-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34413+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34414 {
34415 .callback = fix_broken_hp_bios_irq9,
34416 .ident = "HP Pavilion N5400 Series Laptop",
34417diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34418index 9b83b90..4112152 100644
34419--- a/arch/x86/pci/pcbios.c
34420+++ b/arch/x86/pci/pcbios.c
34421@@ -79,7 +79,7 @@ union bios32 {
34422 static struct {
34423 unsigned long address;
34424 unsigned short segment;
34425-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34426+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34427
34428 /*
34429 * Returns the entry point for the given service, NULL on error
34430@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34431 unsigned long length; /* %ecx */
34432 unsigned long entry; /* %edx */
34433 unsigned long flags;
34434+ struct desc_struct d, *gdt;
34435
34436 local_irq_save(flags);
34437- __asm__("lcall *(%%edi); cld"
34438+
34439+ gdt = get_cpu_gdt_table(smp_processor_id());
34440+
34441+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34442+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34443+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34444+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34445+
34446+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34447 : "=a" (return_code),
34448 "=b" (address),
34449 "=c" (length),
34450 "=d" (entry)
34451 : "0" (service),
34452 "1" (0),
34453- "D" (&bios32_indirect));
34454+ "D" (&bios32_indirect),
34455+ "r"(__PCIBIOS_DS)
34456+ : "memory");
34457+
34458+ pax_open_kernel();
34459+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34460+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34461+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34462+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34463+ pax_close_kernel();
34464+
34465 local_irq_restore(flags);
34466
34467 switch (return_code) {
34468- case 0:
34469- return address + entry;
34470- case 0x80: /* Not present */
34471- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34472- return 0;
34473- default: /* Shouldn't happen */
34474- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34475- service, return_code);
34476+ case 0: {
34477+ int cpu;
34478+ unsigned char flags;
34479+
34480+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34481+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34482+ printk(KERN_WARNING "bios32_service: not valid\n");
34483 return 0;
34484+ }
34485+ address = address + PAGE_OFFSET;
34486+ length += 16UL; /* some BIOSs underreport this... */
34487+ flags = 4;
34488+ if (length >= 64*1024*1024) {
34489+ length >>= PAGE_SHIFT;
34490+ flags |= 8;
34491+ }
34492+
34493+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34494+ gdt = get_cpu_gdt_table(cpu);
34495+ pack_descriptor(&d, address, length, 0x9b, flags);
34496+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34497+ pack_descriptor(&d, address, length, 0x93, flags);
34498+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34499+ }
34500+ return entry;
34501+ }
34502+ case 0x80: /* Not present */
34503+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34504+ return 0;
34505+ default: /* Shouldn't happen */
34506+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34507+ service, return_code);
34508+ return 0;
34509 }
34510 }
34511
34512 static struct {
34513 unsigned long address;
34514 unsigned short segment;
34515-} pci_indirect = { 0, __KERNEL_CS };
34516+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34517
34518-static int pci_bios_present;
34519+static int pci_bios_present __read_only;
34520
34521 static int __init check_pcibios(void)
34522 {
34523@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34524 unsigned long flags, pcibios_entry;
34525
34526 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34527- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34528+ pci_indirect.address = pcibios_entry;
34529
34530 local_irq_save(flags);
34531- __asm__(
34532- "lcall *(%%edi); cld\n\t"
34533+ __asm__("movw %w6, %%ds\n\t"
34534+ "lcall *%%ss:(%%edi); cld\n\t"
34535+ "push %%ss\n\t"
34536+ "pop %%ds\n\t"
34537 "jc 1f\n\t"
34538 "xor %%ah, %%ah\n"
34539 "1:"
34540@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34541 "=b" (ebx),
34542 "=c" (ecx)
34543 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34544- "D" (&pci_indirect)
34545+ "D" (&pci_indirect),
34546+ "r" (__PCIBIOS_DS)
34547 : "memory");
34548 local_irq_restore(flags);
34549
34550@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34551
34552 switch (len) {
34553 case 1:
34554- __asm__("lcall *(%%esi); cld\n\t"
34555+ __asm__("movw %w6, %%ds\n\t"
34556+ "lcall *%%ss:(%%esi); cld\n\t"
34557+ "push %%ss\n\t"
34558+ "pop %%ds\n\t"
34559 "jc 1f\n\t"
34560 "xor %%ah, %%ah\n"
34561 "1:"
34562@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34563 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34564 "b" (bx),
34565 "D" ((long)reg),
34566- "S" (&pci_indirect));
34567+ "S" (&pci_indirect),
34568+ "r" (__PCIBIOS_DS));
34569 /*
34570 * Zero-extend the result beyond 8 bits, do not trust the
34571 * BIOS having done it:
34572@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34573 *value &= 0xff;
34574 break;
34575 case 2:
34576- __asm__("lcall *(%%esi); cld\n\t"
34577+ __asm__("movw %w6, %%ds\n\t"
34578+ "lcall *%%ss:(%%esi); cld\n\t"
34579+ "push %%ss\n\t"
34580+ "pop %%ds\n\t"
34581 "jc 1f\n\t"
34582 "xor %%ah, %%ah\n"
34583 "1:"
34584@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34585 : "1" (PCIBIOS_READ_CONFIG_WORD),
34586 "b" (bx),
34587 "D" ((long)reg),
34588- "S" (&pci_indirect));
34589+ "S" (&pci_indirect),
34590+ "r" (__PCIBIOS_DS));
34591 /*
34592 * Zero-extend the result beyond 16 bits, do not trust the
34593 * BIOS having done it:
34594@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34595 *value &= 0xffff;
34596 break;
34597 case 4:
34598- __asm__("lcall *(%%esi); cld\n\t"
34599+ __asm__("movw %w6, %%ds\n\t"
34600+ "lcall *%%ss:(%%esi); cld\n\t"
34601+ "push %%ss\n\t"
34602+ "pop %%ds\n\t"
34603 "jc 1f\n\t"
34604 "xor %%ah, %%ah\n"
34605 "1:"
34606@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34607 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34608 "b" (bx),
34609 "D" ((long)reg),
34610- "S" (&pci_indirect));
34611+ "S" (&pci_indirect),
34612+ "r" (__PCIBIOS_DS));
34613 break;
34614 }
34615
34616@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34617
34618 switch (len) {
34619 case 1:
34620- __asm__("lcall *(%%esi); cld\n\t"
34621+ __asm__("movw %w6, %%ds\n\t"
34622+ "lcall *%%ss:(%%esi); cld\n\t"
34623+ "push %%ss\n\t"
34624+ "pop %%ds\n\t"
34625 "jc 1f\n\t"
34626 "xor %%ah, %%ah\n"
34627 "1:"
34628@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34629 "c" (value),
34630 "b" (bx),
34631 "D" ((long)reg),
34632- "S" (&pci_indirect));
34633+ "S" (&pci_indirect),
34634+ "r" (__PCIBIOS_DS));
34635 break;
34636 case 2:
34637- __asm__("lcall *(%%esi); cld\n\t"
34638+ __asm__("movw %w6, %%ds\n\t"
34639+ "lcall *%%ss:(%%esi); cld\n\t"
34640+ "push %%ss\n\t"
34641+ "pop %%ds\n\t"
34642 "jc 1f\n\t"
34643 "xor %%ah, %%ah\n"
34644 "1:"
34645@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34646 "c" (value),
34647 "b" (bx),
34648 "D" ((long)reg),
34649- "S" (&pci_indirect));
34650+ "S" (&pci_indirect),
34651+ "r" (__PCIBIOS_DS));
34652 break;
34653 case 4:
34654- __asm__("lcall *(%%esi); cld\n\t"
34655+ __asm__("movw %w6, %%ds\n\t"
34656+ "lcall *%%ss:(%%esi); cld\n\t"
34657+ "push %%ss\n\t"
34658+ "pop %%ds\n\t"
34659 "jc 1f\n\t"
34660 "xor %%ah, %%ah\n"
34661 "1:"
34662@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34663 "c" (value),
34664 "b" (bx),
34665 "D" ((long)reg),
34666- "S" (&pci_indirect));
34667+ "S" (&pci_indirect),
34668+ "r" (__PCIBIOS_DS));
34669 break;
34670 }
34671
34672@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34673
34674 DBG("PCI: Fetching IRQ routing table... ");
34675 __asm__("push %%es\n\t"
34676+ "movw %w8, %%ds\n\t"
34677 "push %%ds\n\t"
34678 "pop %%es\n\t"
34679- "lcall *(%%esi); cld\n\t"
34680+ "lcall *%%ss:(%%esi); cld\n\t"
34681 "pop %%es\n\t"
34682+ "push %%ss\n\t"
34683+ "pop %%ds\n"
34684 "jc 1f\n\t"
34685 "xor %%ah, %%ah\n"
34686 "1:"
34687@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34688 "1" (0),
34689 "D" ((long) &opt),
34690 "S" (&pci_indirect),
34691- "m" (opt)
34692+ "m" (opt),
34693+ "r" (__PCIBIOS_DS)
34694 : "memory");
34695 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34696 if (ret & 0xff00)
34697@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34698 {
34699 int ret;
34700
34701- __asm__("lcall *(%%esi); cld\n\t"
34702+ __asm__("movw %w5, %%ds\n\t"
34703+ "lcall *%%ss:(%%esi); cld\n\t"
34704+ "push %%ss\n\t"
34705+ "pop %%ds\n"
34706 "jc 1f\n\t"
34707 "xor %%ah, %%ah\n"
34708 "1:"
34709@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34710 : "0" (PCIBIOS_SET_PCI_HW_INT),
34711 "b" ((dev->bus->number << 8) | dev->devfn),
34712 "c" ((irq << 8) | (pin + 10)),
34713- "S" (&pci_indirect));
34714+ "S" (&pci_indirect),
34715+ "r" (__PCIBIOS_DS));
34716 return !(ret & 0xff00);
34717 }
34718 EXPORT_SYMBOL(pcibios_set_irq_routing);
34719diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34720index 40e7cda..c7e6672 100644
34721--- a/arch/x86/platform/efi/efi_32.c
34722+++ b/arch/x86/platform/efi/efi_32.c
34723@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34724 {
34725 struct desc_ptr gdt_descr;
34726
34727+#ifdef CONFIG_PAX_KERNEXEC
34728+ struct desc_struct d;
34729+#endif
34730+
34731 local_irq_save(efi_rt_eflags);
34732
34733 load_cr3(initial_page_table);
34734 __flush_tlb_all();
34735
34736+#ifdef CONFIG_PAX_KERNEXEC
34737+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34738+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34739+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34740+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34741+#endif
34742+
34743 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34744 gdt_descr.size = GDT_SIZE - 1;
34745 load_gdt(&gdt_descr);
34746@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34747 {
34748 struct desc_ptr gdt_descr;
34749
34750+#ifdef CONFIG_PAX_KERNEXEC
34751+ struct desc_struct d;
34752+
34753+ memset(&d, 0, sizeof d);
34754+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34755+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34756+#endif
34757+
34758 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34759 gdt_descr.size = GDT_SIZE - 1;
34760 load_gdt(&gdt_descr);
34761
34762+#ifdef CONFIG_PAX_PER_CPU_PGD
34763+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34764+#else
34765 load_cr3(swapper_pg_dir);
34766+#endif
34767+
34768 __flush_tlb_all();
34769
34770 local_irq_restore(efi_rt_eflags);
34771diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34772index 17e80d8..9fa6e41 100644
34773--- a/arch/x86/platform/efi/efi_64.c
34774+++ b/arch/x86/platform/efi/efi_64.c
34775@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34776 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34777 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34778 }
34779+
34780+#ifdef CONFIG_PAX_PER_CPU_PGD
34781+ load_cr3(swapper_pg_dir);
34782+#endif
34783+
34784 __flush_tlb_all();
34785 }
34786
34787@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34788 for (pgd = 0; pgd < n_pgds; pgd++)
34789 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34790 kfree(save_pgd);
34791+
34792+#ifdef CONFIG_PAX_PER_CPU_PGD
34793+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34794+#endif
34795+
34796 __flush_tlb_all();
34797 local_irq_restore(efi_flags);
34798 early_code_mapping_set_exec(0);
34799@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34800 unsigned npages;
34801 pgd_t *pgd;
34802
34803- if (efi_enabled(EFI_OLD_MEMMAP))
34804+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34805+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34806+ * able to execute the EFI services.
34807+ */
34808+ if (__supported_pte_mask & _PAGE_NX) {
34809+ unsigned long addr = (unsigned long) __va(0);
34810+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34811+
34812+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34813+#ifdef CONFIG_PAX_PER_CPU_PGD
34814+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34815+#endif
34816+ set_pgd(pgd_offset_k(addr), pe);
34817+ }
34818+
34819 return 0;
34820+ }
34821
34822 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34823 pgd = __va(efi_scratch.efi_pgt);
34824diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34825index 040192b..7d3300f 100644
34826--- a/arch/x86/platform/efi/efi_stub_32.S
34827+++ b/arch/x86/platform/efi/efi_stub_32.S
34828@@ -6,7 +6,9 @@
34829 */
34830
34831 #include <linux/linkage.h>
34832+#include <linux/init.h>
34833 #include <asm/page_types.h>
34834+#include <asm/segment.h>
34835
34836 /*
34837 * efi_call_phys(void *, ...) is a function with variable parameters.
34838@@ -20,7 +22,7 @@
34839 * service functions will comply with gcc calling convention, too.
34840 */
34841
34842-.text
34843+__INIT
34844 ENTRY(efi_call_phys)
34845 /*
34846 * 0. The function can only be called in Linux kernel. So CS has been
34847@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34848 * The mapping of lower virtual memory has been created in prolog and
34849 * epilog.
34850 */
34851- movl $1f, %edx
34852- subl $__PAGE_OFFSET, %edx
34853- jmp *%edx
34854+#ifdef CONFIG_PAX_KERNEXEC
34855+ movl $(__KERNEXEC_EFI_DS), %edx
34856+ mov %edx, %ds
34857+ mov %edx, %es
34858+ mov %edx, %ss
34859+ addl $2f,(1f)
34860+ ljmp *(1f)
34861+
34862+__INITDATA
34863+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34864+.previous
34865+
34866+2:
34867+ subl $2b,(1b)
34868+#else
34869+ jmp 1f-__PAGE_OFFSET
34870 1:
34871+#endif
34872
34873 /*
34874 * 2. Now on the top of stack is the return
34875@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34876 * parameter 2, ..., param n. To make things easy, we save the return
34877 * address of efi_call_phys in a global variable.
34878 */
34879- popl %edx
34880- movl %edx, saved_return_addr
34881- /* get the function pointer into ECX*/
34882- popl %ecx
34883- movl %ecx, efi_rt_function_ptr
34884- movl $2f, %edx
34885- subl $__PAGE_OFFSET, %edx
34886- pushl %edx
34887+ popl (saved_return_addr)
34888+ popl (efi_rt_function_ptr)
34889
34890 /*
34891 * 3. Clear PG bit in %CR0.
34892@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34893 /*
34894 * 5. Call the physical function.
34895 */
34896- jmp *%ecx
34897+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
34898
34899-2:
34900 /*
34901 * 6. After EFI runtime service returns, control will return to
34902 * following instruction. We'd better readjust stack pointer first.
34903@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34904 movl %cr0, %edx
34905 orl $0x80000000, %edx
34906 movl %edx, %cr0
34907- jmp 1f
34908-1:
34909+
34910 /*
34911 * 8. Now restore the virtual mode from flat mode by
34912 * adding EIP with PAGE_OFFSET.
34913 */
34914- movl $1f, %edx
34915- jmp *%edx
34916+#ifdef CONFIG_PAX_KERNEXEC
34917+ movl $(__KERNEL_DS), %edx
34918+ mov %edx, %ds
34919+ mov %edx, %es
34920+ mov %edx, %ss
34921+ ljmp $(__KERNEL_CS),$1f
34922+#else
34923+ jmp 1f+__PAGE_OFFSET
34924+#endif
34925 1:
34926
34927 /*
34928 * 9. Balance the stack. And because EAX contain the return value,
34929 * we'd better not clobber it.
34930 */
34931- leal efi_rt_function_ptr, %edx
34932- movl (%edx), %ecx
34933- pushl %ecx
34934+ pushl (efi_rt_function_ptr)
34935
34936 /*
34937- * 10. Push the saved return address onto the stack and return.
34938+ * 10. Return to the saved return address.
34939 */
34940- leal saved_return_addr, %edx
34941- movl (%edx), %ecx
34942- pushl %ecx
34943- ret
34944+ jmpl *(saved_return_addr)
34945 ENDPROC(efi_call_phys)
34946 .previous
34947
34948-.data
34949+__INITDATA
34950 saved_return_addr:
34951 .long 0
34952 efi_rt_function_ptr:
34953diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
34954index 86d0f9e..6d499f4 100644
34955--- a/arch/x86/platform/efi/efi_stub_64.S
34956+++ b/arch/x86/platform/efi/efi_stub_64.S
34957@@ -11,6 +11,7 @@
34958 #include <asm/msr.h>
34959 #include <asm/processor-flags.h>
34960 #include <asm/page_types.h>
34961+#include <asm/alternative-asm.h>
34962
34963 #define SAVE_XMM \
34964 mov %rsp, %rax; \
34965@@ -88,6 +89,7 @@ ENTRY(efi_call)
34966 RESTORE_PGT
34967 addq $48, %rsp
34968 RESTORE_XMM
34969+ pax_force_retaddr 0, 1
34970 ret
34971 ENDPROC(efi_call)
34972
34973diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
34974index 1bbedc4..eb795b5 100644
34975--- a/arch/x86/platform/intel-mid/intel-mid.c
34976+++ b/arch/x86/platform/intel-mid/intel-mid.c
34977@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
34978 {
34979 };
34980
34981-static void intel_mid_reboot(void)
34982+static void __noreturn intel_mid_reboot(void)
34983 {
34984 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
34985+ BUG();
34986 }
34987
34988 static unsigned long __init intel_mid_calibrate_tsc(void)
34989diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
34990index 3c1c386..59a68ed 100644
34991--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
34992+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
34993@@ -13,6 +13,6 @@
34994 /* For every CPU addition a new get_<cpuname>_ops interface needs
34995 * to be added.
34996 */
34997-extern void *get_penwell_ops(void);
34998-extern void *get_cloverview_ops(void);
34999-extern void *get_tangier_ops(void);
35000+extern const void *get_penwell_ops(void);
35001+extern const void *get_cloverview_ops(void);
35002+extern const void *get_tangier_ops(void);
35003diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35004index 23381d2..8ddc10e 100644
35005--- a/arch/x86/platform/intel-mid/mfld.c
35006+++ b/arch/x86/platform/intel-mid/mfld.c
35007@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35008 pm_power_off = mfld_power_off;
35009 }
35010
35011-void *get_penwell_ops(void)
35012+const void *get_penwell_ops(void)
35013 {
35014 return &penwell_ops;
35015 }
35016
35017-void *get_cloverview_ops(void)
35018+const void *get_cloverview_ops(void)
35019 {
35020 return &penwell_ops;
35021 }
35022diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35023index aaca917..66eadbc 100644
35024--- a/arch/x86/platform/intel-mid/mrfl.c
35025+++ b/arch/x86/platform/intel-mid/mrfl.c
35026@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35027 .arch_setup = tangier_arch_setup,
35028 };
35029
35030-void *get_tangier_ops(void)
35031+const void *get_tangier_ops(void)
35032 {
35033 return &tangier_ops;
35034 }
35035diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35036index d6ee929..3637cb5 100644
35037--- a/arch/x86/platform/olpc/olpc_dt.c
35038+++ b/arch/x86/platform/olpc/olpc_dt.c
35039@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35040 return res;
35041 }
35042
35043-static struct of_pdt_ops prom_olpc_ops __initdata = {
35044+static struct of_pdt_ops prom_olpc_ops __initconst = {
35045 .nextprop = olpc_dt_nextprop,
35046 .getproplen = olpc_dt_getproplen,
35047 .getproperty = olpc_dt_getproperty,
35048diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35049index 6ec7910..ecdbb11 100644
35050--- a/arch/x86/power/cpu.c
35051+++ b/arch/x86/power/cpu.c
35052@@ -137,11 +137,8 @@ static void do_fpu_end(void)
35053 static void fix_processor_context(void)
35054 {
35055 int cpu = smp_processor_id();
35056- struct tss_struct *t = &per_cpu(init_tss, cpu);
35057-#ifdef CONFIG_X86_64
35058- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35059- tss_desc tss;
35060-#endif
35061+ struct tss_struct *t = init_tss + cpu;
35062+
35063 set_tss_desc(cpu, t); /*
35064 * This just modifies memory; should not be
35065 * necessary. But... This is necessary, because
35066@@ -150,10 +147,6 @@ static void fix_processor_context(void)
35067 */
35068
35069 #ifdef CONFIG_X86_64
35070- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35071- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35072- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35073-
35074 syscall_init(); /* This sets MSR_*STAR and related */
35075 #endif
35076 load_TR_desc(); /* This does ltr */
35077diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35078index bad628a..a102610 100644
35079--- a/arch/x86/realmode/init.c
35080+++ b/arch/x86/realmode/init.c
35081@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35082 __va(real_mode_header->trampoline_header);
35083
35084 #ifdef CONFIG_X86_32
35085- trampoline_header->start = __pa_symbol(startup_32_smp);
35086+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35087+
35088+#ifdef CONFIG_PAX_KERNEXEC
35089+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35090+#endif
35091+
35092+ trampoline_header->boot_cs = __BOOT_CS;
35093 trampoline_header->gdt_limit = __BOOT_DS + 7;
35094 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35095 #else
35096@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35097 *trampoline_cr4_features = read_cr4();
35098
35099 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35100- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35101+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35102 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35103 #endif
35104 }
35105diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35106index 7c0d7be..d24dc88 100644
35107--- a/arch/x86/realmode/rm/Makefile
35108+++ b/arch/x86/realmode/rm/Makefile
35109@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35110
35111 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35112 -I$(srctree)/arch/x86/boot
35113+ifdef CONSTIFY_PLUGIN
35114+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35115+endif
35116 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35117 GCOV_PROFILE := n
35118diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35119index a28221d..93c40f1 100644
35120--- a/arch/x86/realmode/rm/header.S
35121+++ b/arch/x86/realmode/rm/header.S
35122@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35123 #endif
35124 /* APM/BIOS reboot */
35125 .long pa_machine_real_restart_asm
35126-#ifdef CONFIG_X86_64
35127+#ifdef CONFIG_X86_32
35128+ .long __KERNEL_CS
35129+#else
35130 .long __KERNEL32_CS
35131 #endif
35132 END(real_mode_header)
35133diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35134index 48ddd76..c26749f 100644
35135--- a/arch/x86/realmode/rm/trampoline_32.S
35136+++ b/arch/x86/realmode/rm/trampoline_32.S
35137@@ -24,6 +24,12 @@
35138 #include <asm/page_types.h>
35139 #include "realmode.h"
35140
35141+#ifdef CONFIG_PAX_KERNEXEC
35142+#define ta(X) (X)
35143+#else
35144+#define ta(X) (pa_ ## X)
35145+#endif
35146+
35147 .text
35148 .code16
35149
35150@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35151
35152 cli # We should be safe anyway
35153
35154- movl tr_start, %eax # where we need to go
35155-
35156 movl $0xA5A5A5A5, trampoline_status
35157 # write marker for master knows we're running
35158
35159@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35160 movw $1, %dx # protected mode (PE) bit
35161 lmsw %dx # into protected mode
35162
35163- ljmpl $__BOOT_CS, $pa_startup_32
35164+ ljmpl *(trampoline_header)
35165
35166 .section ".text32","ax"
35167 .code32
35168@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35169 .balign 8
35170 GLOBAL(trampoline_header)
35171 tr_start: .space 4
35172- tr_gdt_pad: .space 2
35173+ tr_boot_cs: .space 2
35174 tr_gdt: .space 6
35175 END(trampoline_header)
35176
35177diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35178index dac7b20..72dbaca 100644
35179--- a/arch/x86/realmode/rm/trampoline_64.S
35180+++ b/arch/x86/realmode/rm/trampoline_64.S
35181@@ -93,6 +93,7 @@ ENTRY(startup_32)
35182 movl %edx, %gs
35183
35184 movl pa_tr_cr4, %eax
35185+ andl $~X86_CR4_PCIDE, %eax
35186 movl %eax, %cr4 # Enable PAE mode
35187
35188 # Setup trampoline 4 level pagetables
35189@@ -106,7 +107,7 @@ ENTRY(startup_32)
35190 wrmsr
35191
35192 # Enable paging and in turn activate Long Mode
35193- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35194+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35195 movl %eax, %cr0
35196
35197 /*
35198diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35199index 9e7e147..25a4158 100644
35200--- a/arch/x86/realmode/rm/wakeup_asm.S
35201+++ b/arch/x86/realmode/rm/wakeup_asm.S
35202@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35203 lgdtl pmode_gdt
35204
35205 /* This really couldn't... */
35206- movl pmode_entry, %eax
35207 movl pmode_cr0, %ecx
35208 movl %ecx, %cr0
35209- ljmpl $__KERNEL_CS, $pa_startup_32
35210- /* -> jmp *%eax in trampoline_32.S */
35211+
35212+ ljmpl *pmode_entry
35213 #else
35214 jmp trampoline_start
35215 #endif
35216diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35217index 604a37e..e49702a 100644
35218--- a/arch/x86/tools/Makefile
35219+++ b/arch/x86/tools/Makefile
35220@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35221
35222 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35223
35224-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35225+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35226 hostprogs-y += relocs
35227 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35228 PHONY += relocs
35229diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35230index 0c2fae8..88036b7 100644
35231--- a/arch/x86/tools/relocs.c
35232+++ b/arch/x86/tools/relocs.c
35233@@ -1,5 +1,7 @@
35234 /* This is included from relocs_32/64.c */
35235
35236+#include "../../../include/generated/autoconf.h"
35237+
35238 #define ElfW(type) _ElfW(ELF_BITS, type)
35239 #define _ElfW(bits, type) __ElfW(bits, type)
35240 #define __ElfW(bits, type) Elf##bits##_##type
35241@@ -11,6 +13,7 @@
35242 #define Elf_Sym ElfW(Sym)
35243
35244 static Elf_Ehdr ehdr;
35245+static Elf_Phdr *phdr;
35246
35247 struct relocs {
35248 uint32_t *offset;
35249@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35250 }
35251 }
35252
35253+static void read_phdrs(FILE *fp)
35254+{
35255+ unsigned int i;
35256+
35257+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35258+ if (!phdr) {
35259+ die("Unable to allocate %d program headers\n",
35260+ ehdr.e_phnum);
35261+ }
35262+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35263+ die("Seek to %d failed: %s\n",
35264+ ehdr.e_phoff, strerror(errno));
35265+ }
35266+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35267+ die("Cannot read ELF program headers: %s\n",
35268+ strerror(errno));
35269+ }
35270+ for(i = 0; i < ehdr.e_phnum; i++) {
35271+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35272+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35273+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35274+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35275+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35276+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35277+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35278+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35279+ }
35280+
35281+}
35282+
35283 static void read_shdrs(FILE *fp)
35284 {
35285- int i;
35286+ unsigned int i;
35287 Elf_Shdr shdr;
35288
35289 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35290@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35291
35292 static void read_strtabs(FILE *fp)
35293 {
35294- int i;
35295+ unsigned int i;
35296 for (i = 0; i < ehdr.e_shnum; i++) {
35297 struct section *sec = &secs[i];
35298 if (sec->shdr.sh_type != SHT_STRTAB) {
35299@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35300
35301 static void read_symtabs(FILE *fp)
35302 {
35303- int i,j;
35304+ unsigned int i,j;
35305 for (i = 0; i < ehdr.e_shnum; i++) {
35306 struct section *sec = &secs[i];
35307 if (sec->shdr.sh_type != SHT_SYMTAB) {
35308@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35309 }
35310
35311
35312-static void read_relocs(FILE *fp)
35313+static void read_relocs(FILE *fp, int use_real_mode)
35314 {
35315- int i,j;
35316+ unsigned int i,j;
35317+ uint32_t base;
35318+
35319 for (i = 0; i < ehdr.e_shnum; i++) {
35320 struct section *sec = &secs[i];
35321 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35322@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35323 die("Cannot read symbol table: %s\n",
35324 strerror(errno));
35325 }
35326+ base = 0;
35327+
35328+#ifdef CONFIG_X86_32
35329+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35330+ if (phdr[j].p_type != PT_LOAD )
35331+ continue;
35332+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35333+ continue;
35334+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35335+ break;
35336+ }
35337+#endif
35338+
35339 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35340 Elf_Rel *rel = &sec->reltab[j];
35341- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35342+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35343 rel->r_info = elf_xword_to_cpu(rel->r_info);
35344 #if (SHT_REL_TYPE == SHT_RELA)
35345 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35346@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35347
35348 static void print_absolute_symbols(void)
35349 {
35350- int i;
35351+ unsigned int i;
35352 const char *format;
35353
35354 if (ELF_BITS == 64)
35355@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35356 for (i = 0; i < ehdr.e_shnum; i++) {
35357 struct section *sec = &secs[i];
35358 char *sym_strtab;
35359- int j;
35360+ unsigned int j;
35361
35362 if (sec->shdr.sh_type != SHT_SYMTAB) {
35363 continue;
35364@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35365
35366 static void print_absolute_relocs(void)
35367 {
35368- int i, printed = 0;
35369+ unsigned int i, printed = 0;
35370 const char *format;
35371
35372 if (ELF_BITS == 64)
35373@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35374 struct section *sec_applies, *sec_symtab;
35375 char *sym_strtab;
35376 Elf_Sym *sh_symtab;
35377- int j;
35378+ unsigned int j;
35379 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35380 continue;
35381 }
35382@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35383 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35384 Elf_Sym *sym, const char *symname))
35385 {
35386- int i;
35387+ unsigned int i;
35388 /* Walk through the relocations */
35389 for (i = 0; i < ehdr.e_shnum; i++) {
35390 char *sym_strtab;
35391 Elf_Sym *sh_symtab;
35392 struct section *sec_applies, *sec_symtab;
35393- int j;
35394+ unsigned int j;
35395 struct section *sec = &secs[i];
35396
35397 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35398@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35399 {
35400 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35401 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35402+ char *sym_strtab = sec->link->link->strtab;
35403+
35404+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35405+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35406+ return 0;
35407+
35408+#ifdef CONFIG_PAX_KERNEXEC
35409+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35410+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35411+ return 0;
35412+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35413+ return 0;
35414+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35415+ return 0;
35416+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35417+ return 0;
35418+#endif
35419
35420 switch (r_type) {
35421 case R_386_NONE:
35422@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35423
35424 static void emit_relocs(int as_text, int use_real_mode)
35425 {
35426- int i;
35427+ unsigned int i;
35428 int (*write_reloc)(uint32_t, FILE *) = write32;
35429 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35430 const char *symname);
35431@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35432 {
35433 regex_init(use_real_mode);
35434 read_ehdr(fp);
35435+ read_phdrs(fp);
35436 read_shdrs(fp);
35437 read_strtabs(fp);
35438 read_symtabs(fp);
35439- read_relocs(fp);
35440+ read_relocs(fp, use_real_mode);
35441 if (ELF_BITS == 64)
35442 percpu_init();
35443 if (show_absolute_syms) {
35444diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35445index f40281e..92728c9 100644
35446--- a/arch/x86/um/mem_32.c
35447+++ b/arch/x86/um/mem_32.c
35448@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35449 gate_vma.vm_start = FIXADDR_USER_START;
35450 gate_vma.vm_end = FIXADDR_USER_END;
35451 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35452- gate_vma.vm_page_prot = __P101;
35453+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35454
35455 return 0;
35456 }
35457diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35458index 80ffa5b..a33bd15 100644
35459--- a/arch/x86/um/tls_32.c
35460+++ b/arch/x86/um/tls_32.c
35461@@ -260,7 +260,7 @@ out:
35462 if (unlikely(task == current &&
35463 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35464 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35465- "without flushed TLS.", current->pid);
35466+ "without flushed TLS.", task_pid_nr(current));
35467 }
35468
35469 return 0;
35470diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35471index 5a4affe..9e2d522 100644
35472--- a/arch/x86/vdso/Makefile
35473+++ b/arch/x86/vdso/Makefile
35474@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
35475 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35476 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35477
35478-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35479+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35480 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35481 GCOV_PROFILE := n
35482
35483diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35484index 0224987..c7d65a5 100644
35485--- a/arch/x86/vdso/vdso2c.h
35486+++ b/arch/x86/vdso/vdso2c.h
35487@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35488 unsigned long load_size = -1; /* Work around bogus warning */
35489 unsigned long mapping_size;
35490 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35491- int i;
35492+ unsigned int i;
35493 unsigned long j;
35494 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35495 *alt_sec = NULL;
35496diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35497index e904c27..b9eaa03 100644
35498--- a/arch/x86/vdso/vdso32-setup.c
35499+++ b/arch/x86/vdso/vdso32-setup.c
35500@@ -14,6 +14,7 @@
35501 #include <asm/cpufeature.h>
35502 #include <asm/processor.h>
35503 #include <asm/vdso.h>
35504+#include <asm/mman.h>
35505
35506 #ifdef CONFIG_COMPAT_VDSO
35507 #define VDSO_DEFAULT 0
35508diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35509index 1c9f750..cfddb1a 100644
35510--- a/arch/x86/vdso/vma.c
35511+++ b/arch/x86/vdso/vma.c
35512@@ -19,10 +19,7 @@
35513 #include <asm/page.h>
35514 #include <asm/hpet.h>
35515 #include <asm/desc.h>
35516-
35517-#if defined(CONFIG_X86_64)
35518-unsigned int __read_mostly vdso64_enabled = 1;
35519-#endif
35520+#include <asm/mman.h>
35521
35522 void __init init_vdso_image(const struct vdso_image *image)
35523 {
35524@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35525 .pages = no_pages,
35526 };
35527
35528+#ifdef CONFIG_PAX_RANDMMAP
35529+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35530+ calculate_addr = false;
35531+#endif
35532+
35533 if (calculate_addr) {
35534 addr = vdso_addr(current->mm->start_stack,
35535 image->size - image->sym_vvar_start);
35536@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35537 down_write(&mm->mmap_sem);
35538
35539 addr = get_unmapped_area(NULL, addr,
35540- image->size - image->sym_vvar_start, 0, 0);
35541+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35542 if (IS_ERR_VALUE(addr)) {
35543 ret = addr;
35544 goto up_fail;
35545 }
35546
35547 text_start = addr - image->sym_vvar_start;
35548- current->mm->context.vdso = (void __user *)text_start;
35549+ mm->context.vdso = text_start;
35550
35551 /*
35552 * MAYWRITE to allow gdb to COW and set breakpoints
35553@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35554 hpet_address >> PAGE_SHIFT,
35555 PAGE_SIZE,
35556 pgprot_noncached(PAGE_READONLY));
35557-
35558- if (ret)
35559- goto up_fail;
35560 }
35561 #endif
35562
35563 up_fail:
35564 if (ret)
35565- current->mm->context.vdso = NULL;
35566+ current->mm->context.vdso = 0;
35567
35568 up_write(&mm->mmap_sem);
35569 return ret;
35570@@ -191,8 +190,8 @@ static int load_vdso32(void)
35571
35572 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35573 current_thread_info()->sysenter_return =
35574- current->mm->context.vdso +
35575- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35576+ (void __force_user *)(current->mm->context.vdso +
35577+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35578
35579 return 0;
35580 }
35581@@ -201,9 +200,6 @@ static int load_vdso32(void)
35582 #ifdef CONFIG_X86_64
35583 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35584 {
35585- if (!vdso64_enabled)
35586- return 0;
35587-
35588 return map_vdso(&vdso_image_64, true);
35589 }
35590
35591@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35592 int uses_interp)
35593 {
35594 #ifdef CONFIG_X86_X32_ABI
35595- if (test_thread_flag(TIF_X32)) {
35596- if (!vdso64_enabled)
35597- return 0;
35598-
35599+ if (test_thread_flag(TIF_X32))
35600 return map_vdso(&vdso_image_x32, true);
35601- }
35602 #endif
35603
35604 return load_vdso32();
35605@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35606 #endif
35607
35608 #ifdef CONFIG_X86_64
35609-static __init int vdso_setup(char *s)
35610-{
35611- vdso64_enabled = simple_strtoul(s, NULL, 0);
35612- return 0;
35613-}
35614-__setup("vdso=", vdso_setup);
35615-#endif
35616-
35617-#ifdef CONFIG_X86_64
35618 static void vgetcpu_cpu_init(void *arg)
35619 {
35620 int cpu = smp_processor_id();
35621diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35622index e88fda8..76ce7ce 100644
35623--- a/arch/x86/xen/Kconfig
35624+++ b/arch/x86/xen/Kconfig
35625@@ -9,6 +9,7 @@ config XEN
35626 select XEN_HAVE_PVMMU
35627 depends on X86_64 || (X86_32 && X86_PAE)
35628 depends on X86_TSC
35629+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35630 help
35631 This is the Linux Xen port. Enabling this will allow the
35632 kernel to boot in a paravirtualized environment under the
35633diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35634index 78a881b..9994bbb 100644
35635--- a/arch/x86/xen/enlighten.c
35636+++ b/arch/x86/xen/enlighten.c
35637@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35638
35639 struct shared_info xen_dummy_shared_info;
35640
35641-void *xen_initial_gdt;
35642-
35643 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35644 __read_mostly int xen_have_vector_callback;
35645 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35646@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35647 {
35648 unsigned long va = dtr->address;
35649 unsigned int size = dtr->size + 1;
35650- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35651- unsigned long frames[pages];
35652+ unsigned long frames[65536 / PAGE_SIZE];
35653 int f;
35654
35655 /*
35656@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35657 {
35658 unsigned long va = dtr->address;
35659 unsigned int size = dtr->size + 1;
35660- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35661- unsigned long frames[pages];
35662+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35663 int f;
35664
35665 /*
35666@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35667 * 8-byte entries, or 16 4k pages..
35668 */
35669
35670- BUG_ON(size > 65536);
35671+ BUG_ON(size > GDT_SIZE);
35672 BUG_ON(va & ~PAGE_MASK);
35673
35674 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35675@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35676 return 0;
35677 }
35678
35679-static void set_xen_basic_apic_ops(void)
35680+static void __init set_xen_basic_apic_ops(void)
35681 {
35682 apic->read = xen_apic_read;
35683 apic->write = xen_apic_write;
35684@@ -1291,30 +1287,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35685 #endif
35686 };
35687
35688-static void xen_reboot(int reason)
35689+static __noreturn void xen_reboot(int reason)
35690 {
35691 struct sched_shutdown r = { .reason = reason };
35692
35693- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35694- BUG();
35695+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35696+ BUG();
35697 }
35698
35699-static void xen_restart(char *msg)
35700+static __noreturn void xen_restart(char *msg)
35701 {
35702 xen_reboot(SHUTDOWN_reboot);
35703 }
35704
35705-static void xen_emergency_restart(void)
35706+static __noreturn void xen_emergency_restart(void)
35707 {
35708 xen_reboot(SHUTDOWN_reboot);
35709 }
35710
35711-static void xen_machine_halt(void)
35712+static __noreturn void xen_machine_halt(void)
35713 {
35714 xen_reboot(SHUTDOWN_poweroff);
35715 }
35716
35717-static void xen_machine_power_off(void)
35718+static __noreturn void xen_machine_power_off(void)
35719 {
35720 if (pm_power_off)
35721 pm_power_off();
35722@@ -1467,8 +1463,11 @@ static void __ref xen_setup_gdt(int cpu)
35723 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35724 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35725
35726- setup_stack_canary_segment(0);
35727- switch_to_new_gdt(0);
35728+ setup_stack_canary_segment(cpu);
35729+#ifdef CONFIG_X86_64
35730+ load_percpu_segment(cpu);
35731+#endif
35732+ switch_to_new_gdt(cpu);
35733
35734 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35735 pv_cpu_ops.load_gdt = xen_load_gdt;
35736@@ -1583,7 +1582,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35737 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35738
35739 /* Work out if we support NX */
35740- x86_configure_nx();
35741+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35742+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35743+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35744+ unsigned l, h;
35745+
35746+ __supported_pte_mask |= _PAGE_NX;
35747+ rdmsr(MSR_EFER, l, h);
35748+ l |= EFER_NX;
35749+ wrmsr(MSR_EFER, l, h);
35750+ }
35751+#endif
35752
35753 /* Get mfn list */
35754 xen_build_dynamic_phys_to_machine();
35755@@ -1611,13 +1620,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35756
35757 machine_ops = xen_machine_ops;
35758
35759- /*
35760- * The only reliable way to retain the initial address of the
35761- * percpu gdt_page is to remember it here, so we can go and
35762- * mark it RW later, when the initial percpu area is freed.
35763- */
35764- xen_initial_gdt = &per_cpu(gdt_page, 0);
35765-
35766 xen_smp_init();
35767
35768 #ifdef CONFIG_ACPI_NUMA
35769diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35770index 5c1f9ac..0e15f5c 100644
35771--- a/arch/x86/xen/mmu.c
35772+++ b/arch/x86/xen/mmu.c
35773@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35774 return val;
35775 }
35776
35777-static pteval_t pte_pfn_to_mfn(pteval_t val)
35778+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35779 {
35780 if (val & _PAGE_PRESENT) {
35781 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35782@@ -1836,7 +1836,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35783 * L3_k[511] -> level2_fixmap_pgt */
35784 convert_pfn_mfn(level3_kernel_pgt);
35785
35786+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35787+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35788+ convert_pfn_mfn(level3_vmemmap_pgt);
35789 /* L3_k[511][506] -> level1_fixmap_pgt */
35790+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35791 convert_pfn_mfn(level2_fixmap_pgt);
35792 }
35793 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35794@@ -1861,11 +1865,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35795 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35796 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35797 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35798+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35799+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35800+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35801 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35802 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35803+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35804 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35805 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35806 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35807+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35808
35809 /* Pin down new L4 */
35810 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35811@@ -2049,6 +2058,7 @@ static void __init xen_post_allocator_init(void)
35812 pv_mmu_ops.set_pud = xen_set_pud;
35813 #if PAGETABLE_LEVELS == 4
35814 pv_mmu_ops.set_pgd = xen_set_pgd;
35815+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35816 #endif
35817
35818 /* This will work as long as patching hasn't happened yet
35819@@ -2127,6 +2137,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35820 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35821 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35822 .set_pgd = xen_set_pgd_hyper,
35823+ .set_pgd_batched = xen_set_pgd_hyper,
35824
35825 .alloc_pud = xen_alloc_pmd_init,
35826 .release_pud = xen_release_pmd_init,
35827diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35828index 4c071ae..00e7049 100644
35829--- a/arch/x86/xen/smp.c
35830+++ b/arch/x86/xen/smp.c
35831@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35832
35833 if (xen_pv_domain()) {
35834 if (!xen_feature(XENFEAT_writable_page_tables))
35835- /* We've switched to the "real" per-cpu gdt, so make
35836- * sure the old memory can be recycled. */
35837- make_lowmem_page_readwrite(xen_initial_gdt);
35838-
35839 #ifdef CONFIG_X86_32
35840 /*
35841 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35842 * expects __USER_DS
35843 */
35844- loadsegment(ds, __USER_DS);
35845- loadsegment(es, __USER_DS);
35846+ loadsegment(ds, __KERNEL_DS);
35847+ loadsegment(es, __KERNEL_DS);
35848 #endif
35849
35850 xen_filter_cpu_maps();
35851@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35852 #ifdef CONFIG_X86_32
35853 /* Note: PVH is not yet supported on x86_32. */
35854 ctxt->user_regs.fs = __KERNEL_PERCPU;
35855- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35856+ savesegment(gs, ctxt->user_regs.gs);
35857 #endif
35858 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
35859
35860@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35861 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
35862 ctxt->flags = VGCF_IN_KERNEL;
35863 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35864- ctxt->user_regs.ds = __USER_DS;
35865- ctxt->user_regs.es = __USER_DS;
35866+ ctxt->user_regs.ds = __KERNEL_DS;
35867+ ctxt->user_regs.es = __KERNEL_DS;
35868 ctxt->user_regs.ss = __KERNEL_DS;
35869
35870 xen_copy_trap_info(ctxt->trap_ctxt);
35871@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35872 int rc;
35873
35874 per_cpu(current_task, cpu) = idle;
35875+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35876 #ifdef CONFIG_X86_32
35877 irq_ctx_init(cpu);
35878 #else
35879 clear_tsk_thread_flag(idle, TIF_FORK);
35880 #endif
35881- per_cpu(kernel_stack, cpu) =
35882- (unsigned long)task_stack_page(idle) -
35883- KERNEL_STACK_OFFSET + THREAD_SIZE;
35884+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
35885
35886 xen_setup_runstate_info(cpu);
35887 xen_setup_timer(cpu);
35888@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35889
35890 void __init xen_smp_init(void)
35891 {
35892- smp_ops = xen_smp_ops;
35893+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35894 xen_fill_possible_map();
35895 }
35896
35897diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35898index fd92a64..1f72641 100644
35899--- a/arch/x86/xen/xen-asm_32.S
35900+++ b/arch/x86/xen/xen-asm_32.S
35901@@ -99,7 +99,7 @@ ENTRY(xen_iret)
35902 pushw %fs
35903 movl $(__KERNEL_PERCPU), %eax
35904 movl %eax, %fs
35905- movl %fs:xen_vcpu, %eax
35906+ mov PER_CPU_VAR(xen_vcpu), %eax
35907 POP_FS
35908 #else
35909 movl %ss:xen_vcpu, %eax
35910diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
35911index 674b2225..f1f5dc1 100644
35912--- a/arch/x86/xen/xen-head.S
35913+++ b/arch/x86/xen/xen-head.S
35914@@ -39,6 +39,17 @@ ENTRY(startup_xen)
35915 #ifdef CONFIG_X86_32
35916 mov %esi,xen_start_info
35917 mov $init_thread_union+THREAD_SIZE,%esp
35918+#ifdef CONFIG_SMP
35919+ movl $cpu_gdt_table,%edi
35920+ movl $__per_cpu_load,%eax
35921+ movw %ax,__KERNEL_PERCPU + 2(%edi)
35922+ rorl $16,%eax
35923+ movb %al,__KERNEL_PERCPU + 4(%edi)
35924+ movb %ah,__KERNEL_PERCPU + 7(%edi)
35925+ movl $__per_cpu_end - 1,%eax
35926+ subl $__per_cpu_start,%eax
35927+ movw %ax,__KERNEL_PERCPU + 0(%edi)
35928+#endif
35929 #else
35930 mov %rsi,xen_start_info
35931 mov $init_thread_union+THREAD_SIZE,%rsp
35932diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
35933index 5686bd9..0c8b6ee 100644
35934--- a/arch/x86/xen/xen-ops.h
35935+++ b/arch/x86/xen/xen-ops.h
35936@@ -10,8 +10,6 @@
35937 extern const char xen_hypervisor_callback[];
35938 extern const char xen_failsafe_callback[];
35939
35940-extern void *xen_initial_gdt;
35941-
35942 struct trap_info;
35943 void xen_copy_trap_info(struct trap_info *traps);
35944
35945diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
35946index 525bd3d..ef888b1 100644
35947--- a/arch/xtensa/variants/dc232b/include/variant/core.h
35948+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
35949@@ -119,9 +119,9 @@
35950 ----------------------------------------------------------------------*/
35951
35952 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
35953-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
35954 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
35955 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
35956+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35957
35958 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
35959 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
35960diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
35961index 2f33760..835e50a 100644
35962--- a/arch/xtensa/variants/fsf/include/variant/core.h
35963+++ b/arch/xtensa/variants/fsf/include/variant/core.h
35964@@ -11,6 +11,7 @@
35965 #ifndef _XTENSA_CORE_H
35966 #define _XTENSA_CORE_H
35967
35968+#include <linux/const.h>
35969
35970 /****************************************************************************
35971 Parameters Useful for Any Code, USER or PRIVILEGED
35972@@ -112,9 +113,9 @@
35973 ----------------------------------------------------------------------*/
35974
35975 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35976-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35977 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35978 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35979+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35980
35981 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
35982 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
35983diff --git a/block/bio.c b/block/bio.c
35984index 471d738..bd3da0d 100644
35985--- a/block/bio.c
35986+++ b/block/bio.c
35987@@ -1169,7 +1169,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
35988 /*
35989 * Overflow, abort
35990 */
35991- if (end < start)
35992+ if (end < start || end - start > INT_MAX - nr_pages)
35993 return ERR_PTR(-EINVAL);
35994
35995 nr_pages += end - start;
35996@@ -1303,7 +1303,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
35997 /*
35998 * Overflow, abort
35999 */
36000- if (end < start)
36001+ if (end < start || end - start > INT_MAX - nr_pages)
36002 return ERR_PTR(-EINVAL);
36003
36004 nr_pages += end - start;
36005@@ -1565,7 +1565,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
36006 const int read = bio_data_dir(bio) == READ;
36007 struct bio_map_data *bmd = bio->bi_private;
36008 int i;
36009- char *p = bmd->sgvecs[0].iov_base;
36010+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
36011
36012 bio_for_each_segment_all(bvec, bio, i) {
36013 char *addr = page_address(bvec->bv_page);
36014diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36015index 0736729..2ec3b48 100644
36016--- a/block/blk-iopoll.c
36017+++ b/block/blk-iopoll.c
36018@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36019 }
36020 EXPORT_SYMBOL(blk_iopoll_complete);
36021
36022-static void blk_iopoll_softirq(struct softirq_action *h)
36023+static __latent_entropy void blk_iopoll_softirq(void)
36024 {
36025 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36026 int rearm = 0, budget = blk_iopoll_budget;
36027diff --git a/block/blk-map.c b/block/blk-map.c
36028index f890d43..97b0482 100644
36029--- a/block/blk-map.c
36030+++ b/block/blk-map.c
36031@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36032 if (!len || !kbuf)
36033 return -EINVAL;
36034
36035- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36036+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36037 if (do_copy)
36038 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36039 else
36040diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36041index 53b1737..08177d2e 100644
36042--- a/block/blk-softirq.c
36043+++ b/block/blk-softirq.c
36044@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36045 * Softirq action handler - move entries to local list and loop over them
36046 * while passing them to the queue registered handler.
36047 */
36048-static void blk_done_softirq(struct softirq_action *h)
36049+static __latent_entropy void blk_done_softirq(void)
36050 {
36051 struct list_head *cpu_list, local_list;
36052
36053diff --git a/block/bsg.c b/block/bsg.c
36054index 276e869..6fe4c61 100644
36055--- a/block/bsg.c
36056+++ b/block/bsg.c
36057@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36058 struct sg_io_v4 *hdr, struct bsg_device *bd,
36059 fmode_t has_write_perm)
36060 {
36061+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36062+ unsigned char *cmdptr;
36063+
36064 if (hdr->request_len > BLK_MAX_CDB) {
36065 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36066 if (!rq->cmd)
36067 return -ENOMEM;
36068- }
36069+ cmdptr = rq->cmd;
36070+ } else
36071+ cmdptr = tmpcmd;
36072
36073- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36074+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36075 hdr->request_len))
36076 return -EFAULT;
36077
36078+ if (cmdptr != rq->cmd)
36079+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36080+
36081 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36082 if (blk_verify_command(rq->cmd, has_write_perm))
36083 return -EPERM;
36084diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36085index f678c73..f35aa18 100644
36086--- a/block/compat_ioctl.c
36087+++ b/block/compat_ioctl.c
36088@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36089 cgc = compat_alloc_user_space(sizeof(*cgc));
36090 cgc32 = compat_ptr(arg);
36091
36092- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36093+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36094 get_user(data, &cgc32->buffer) ||
36095 put_user(compat_ptr(data), &cgc->buffer) ||
36096 copy_in_user(&cgc->buflen, &cgc32->buflen,
36097@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36098 err |= __get_user(f->spec1, &uf->spec1);
36099 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36100 err |= __get_user(name, &uf->name);
36101- f->name = compat_ptr(name);
36102+ f->name = (void __force_kernel *)compat_ptr(name);
36103 if (err) {
36104 err = -EFAULT;
36105 goto out;
36106diff --git a/block/genhd.c b/block/genhd.c
36107index 0a536dc..b8f7aca 100644
36108--- a/block/genhd.c
36109+++ b/block/genhd.c
36110@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36111
36112 /*
36113 * Register device numbers dev..(dev+range-1)
36114- * range must be nonzero
36115+ * Noop if @range is zero.
36116 * The hash chain is sorted on range, so that subranges can override.
36117 */
36118 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36119 struct kobject *(*probe)(dev_t, int *, void *),
36120 int (*lock)(dev_t, void *), void *data)
36121 {
36122- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36123+ if (range)
36124+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36125 }
36126
36127 EXPORT_SYMBOL(blk_register_region);
36128
36129+/* undo blk_register_region(), noop if @range is zero */
36130 void blk_unregister_region(dev_t devt, unsigned long range)
36131 {
36132- kobj_unmap(bdev_map, devt, range);
36133+ if (range)
36134+ kobj_unmap(bdev_map, devt, range);
36135 }
36136
36137 EXPORT_SYMBOL(blk_unregister_region);
36138diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36139index 56d08fd..2e07090 100644
36140--- a/block/partitions/efi.c
36141+++ b/block/partitions/efi.c
36142@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36143 if (!gpt)
36144 return NULL;
36145
36146+ if (!le32_to_cpu(gpt->num_partition_entries))
36147+ return NULL;
36148+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36149+ if (!pte)
36150+ return NULL;
36151+
36152 count = le32_to_cpu(gpt->num_partition_entries) *
36153 le32_to_cpu(gpt->sizeof_partition_entry);
36154- if (!count)
36155- return NULL;
36156- pte = kmalloc(count, GFP_KERNEL);
36157- if (!pte)
36158- return NULL;
36159-
36160 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36161 (u8 *) pte, count) < count) {
36162 kfree(pte);
36163diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36164index 28163fa..07190a06 100644
36165--- a/block/scsi_ioctl.c
36166+++ b/block/scsi_ioctl.c
36167@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36168 return put_user(0, p);
36169 }
36170
36171-static int sg_get_timeout(struct request_queue *q)
36172+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36173 {
36174 return jiffies_to_clock_t(q->sg_timeout);
36175 }
36176@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36177 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36178 struct sg_io_hdr *hdr, fmode_t mode)
36179 {
36180- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36181+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36182+ unsigned char *cmdptr;
36183+
36184+ if (rq->cmd != rq->__cmd)
36185+ cmdptr = rq->cmd;
36186+ else
36187+ cmdptr = tmpcmd;
36188+
36189+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36190 return -EFAULT;
36191+
36192+ if (cmdptr != rq->cmd)
36193+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36194+
36195 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36196 return -EPERM;
36197
36198@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36199 int err;
36200 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36201 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36202+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36203+ unsigned char *cmdptr;
36204
36205 if (!sic)
36206 return -EINVAL;
36207@@ -469,9 +483,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36208 */
36209 err = -EFAULT;
36210 rq->cmd_len = cmdlen;
36211- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36212+
36213+ if (rq->cmd != rq->__cmd)
36214+ cmdptr = rq->cmd;
36215+ else
36216+ cmdptr = tmpcmd;
36217+
36218+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36219 goto error;
36220
36221+ if (rq->cmd != cmdptr)
36222+ memcpy(rq->cmd, cmdptr, cmdlen);
36223+
36224 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36225 goto error;
36226
36227diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36228index 650afac1..f3307de 100644
36229--- a/crypto/cryptd.c
36230+++ b/crypto/cryptd.c
36231@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36232
36233 struct cryptd_blkcipher_request_ctx {
36234 crypto_completion_t complete;
36235-};
36236+} __no_const;
36237
36238 struct cryptd_hash_ctx {
36239 struct crypto_shash *child;
36240@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36241
36242 struct cryptd_aead_request_ctx {
36243 crypto_completion_t complete;
36244-};
36245+} __no_const;
36246
36247 static void cryptd_queue_worker(struct work_struct *work);
36248
36249diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36250index c305d41..a96de79 100644
36251--- a/crypto/pcrypt.c
36252+++ b/crypto/pcrypt.c
36253@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36254 int ret;
36255
36256 pinst->kobj.kset = pcrypt_kset;
36257- ret = kobject_add(&pinst->kobj, NULL, name);
36258+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36259 if (!ret)
36260 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36261
36262diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36263index 6921c7f..78e1af7 100644
36264--- a/drivers/acpi/acpica/hwxfsleep.c
36265+++ b/drivers/acpi/acpica/hwxfsleep.c
36266@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36267 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36268
36269 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36270- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36271- acpi_hw_extended_sleep},
36272- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36273- acpi_hw_extended_wake_prep},
36274- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36275+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36276+ .extended_function = acpi_hw_extended_sleep},
36277+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36278+ .extended_function = acpi_hw_extended_wake_prep},
36279+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36280+ .extended_function = acpi_hw_extended_wake}
36281 };
36282
36283 /*
36284diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36285index 16129c7..8b675cd 100644
36286--- a/drivers/acpi/apei/apei-internal.h
36287+++ b/drivers/acpi/apei/apei-internal.h
36288@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36289 struct apei_exec_ins_type {
36290 u32 flags;
36291 apei_exec_ins_func_t run;
36292-};
36293+} __do_const;
36294
36295 struct apei_exec_context {
36296 u32 ip;
36297diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36298index e82d097..0c855c1 100644
36299--- a/drivers/acpi/apei/ghes.c
36300+++ b/drivers/acpi/apei/ghes.c
36301@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36302 const struct acpi_hest_generic *generic,
36303 const struct acpi_hest_generic_status *estatus)
36304 {
36305- static atomic_t seqno;
36306+ static atomic_unchecked_t seqno;
36307 unsigned int curr_seqno;
36308 char pfx_seq[64];
36309
36310@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36311 else
36312 pfx = KERN_ERR;
36313 }
36314- curr_seqno = atomic_inc_return(&seqno);
36315+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36316 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36317 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36318 pfx_seq, generic->header.source_id);
36319diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36320index a83e3c6..c3d617f 100644
36321--- a/drivers/acpi/bgrt.c
36322+++ b/drivers/acpi/bgrt.c
36323@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36324 if (!bgrt_image)
36325 return -ENODEV;
36326
36327- bin_attr_image.private = bgrt_image;
36328- bin_attr_image.size = bgrt_image_size;
36329+ pax_open_kernel();
36330+ *(void **)&bin_attr_image.private = bgrt_image;
36331+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36332+ pax_close_kernel();
36333
36334 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36335 if (!bgrt_kobj)
36336diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36337index 9b693d5..8953d54 100644
36338--- a/drivers/acpi/blacklist.c
36339+++ b/drivers/acpi/blacklist.c
36340@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36341 u32 is_critical_error;
36342 };
36343
36344-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36345+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36346
36347 /*
36348 * POLICY: If *anything* doesn't work, put it on the blacklist.
36349@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36350 return 0;
36351 }
36352
36353-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36354+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36355 {
36356 .callback = dmi_disable_osi_vista,
36357 .ident = "Fujitsu Siemens",
36358diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36359index c68e724..e863008 100644
36360--- a/drivers/acpi/custom_method.c
36361+++ b/drivers/acpi/custom_method.c
36362@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36363 struct acpi_table_header table;
36364 acpi_status status;
36365
36366+#ifdef CONFIG_GRKERNSEC_KMEM
36367+ return -EPERM;
36368+#endif
36369+
36370 if (!(*ppos)) {
36371 /* parse the table header to get the table length */
36372 if (count <= sizeof(struct acpi_table_header))
36373diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36374index c0d44d3..5ad8f9a 100644
36375--- a/drivers/acpi/device_pm.c
36376+++ b/drivers/acpi/device_pm.c
36377@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36378
36379 #endif /* CONFIG_PM_SLEEP */
36380
36381+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36382+
36383 static struct dev_pm_domain acpi_general_pm_domain = {
36384 .ops = {
36385 #ifdef CONFIG_PM
36386@@ -1043,6 +1045,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36387 #endif
36388 #endif
36389 },
36390+ .detach = acpi_dev_pm_detach
36391 };
36392
36393 /**
36394@@ -1112,7 +1115,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36395 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36396 }
36397
36398- dev->pm_domain->detach = acpi_dev_pm_detach;
36399 return 0;
36400 }
36401 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36402diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36403index 87b704e..2d1d0c1 100644
36404--- a/drivers/acpi/processor_idle.c
36405+++ b/drivers/acpi/processor_idle.c
36406@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36407 {
36408 int i, count = CPUIDLE_DRIVER_STATE_START;
36409 struct acpi_processor_cx *cx;
36410- struct cpuidle_state *state;
36411+ cpuidle_state_no_const *state;
36412 struct cpuidle_driver *drv = &acpi_idle_driver;
36413
36414 if (!pr->flags.power_setup_done)
36415diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36416index 13e577c..cef11ee 100644
36417--- a/drivers/acpi/sysfs.c
36418+++ b/drivers/acpi/sysfs.c
36419@@ -423,11 +423,11 @@ static u32 num_counters;
36420 static struct attribute **all_attrs;
36421 static u32 acpi_gpe_count;
36422
36423-static struct attribute_group interrupt_stats_attr_group = {
36424+static attribute_group_no_const interrupt_stats_attr_group = {
36425 .name = "interrupts",
36426 };
36427
36428-static struct kobj_attribute *counter_attrs;
36429+static kobj_attribute_no_const *counter_attrs;
36430
36431 static void delete_gpe_attr_array(void)
36432 {
36433diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36434index 61a9c07..ea98fa1 100644
36435--- a/drivers/ata/libahci.c
36436+++ b/drivers/ata/libahci.c
36437@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36438 }
36439 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36440
36441-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36442+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36443 struct ata_taskfile *tf, int is_cmd, u16 flags,
36444 unsigned long timeout_msec)
36445 {
36446diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36447index d1a05f9..eb70e10 100644
36448--- a/drivers/ata/libata-core.c
36449+++ b/drivers/ata/libata-core.c
36450@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36451 static void ata_dev_xfermask(struct ata_device *dev);
36452 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36453
36454-atomic_t ata_print_id = ATOMIC_INIT(0);
36455+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36456
36457 struct ata_force_param {
36458 const char *name;
36459@@ -4831,7 +4831,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36460 struct ata_port *ap;
36461 unsigned int tag;
36462
36463- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36464+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36465 ap = qc->ap;
36466
36467 qc->flags = 0;
36468@@ -4847,7 +4847,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36469 struct ata_port *ap;
36470 struct ata_link *link;
36471
36472- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36473+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36474 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36475 ap = qc->ap;
36476 link = qc->dev->link;
36477@@ -5951,6 +5951,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36478 return;
36479
36480 spin_lock(&lock);
36481+ pax_open_kernel();
36482
36483 for (cur = ops->inherits; cur; cur = cur->inherits) {
36484 void **inherit = (void **)cur;
36485@@ -5964,8 +5965,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36486 if (IS_ERR(*pp))
36487 *pp = NULL;
36488
36489- ops->inherits = NULL;
36490+ *(struct ata_port_operations **)&ops->inherits = NULL;
36491
36492+ pax_close_kernel();
36493 spin_unlock(&lock);
36494 }
36495
36496@@ -6161,7 +6163,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36497
36498 /* give ports names and add SCSI hosts */
36499 for (i = 0; i < host->n_ports; i++) {
36500- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36501+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36502 host->ports[i]->local_port_no = i + 1;
36503 }
36504
36505diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36506index 6abd17a..9961bf7 100644
36507--- a/drivers/ata/libata-scsi.c
36508+++ b/drivers/ata/libata-scsi.c
36509@@ -4169,7 +4169,7 @@ int ata_sas_port_init(struct ata_port *ap)
36510
36511 if (rc)
36512 return rc;
36513- ap->print_id = atomic_inc_return(&ata_print_id);
36514+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36515 return 0;
36516 }
36517 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36518diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36519index 5f4e0cc..ff2c347 100644
36520--- a/drivers/ata/libata.h
36521+++ b/drivers/ata/libata.h
36522@@ -53,7 +53,7 @@ enum {
36523 ATA_DNXFER_QUIET = (1 << 31),
36524 };
36525
36526-extern atomic_t ata_print_id;
36527+extern atomic_unchecked_t ata_print_id;
36528 extern int atapi_passthru16;
36529 extern int libata_fua;
36530 extern int libata_noacpi;
36531diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36532index a9b0c82..207d97d 100644
36533--- a/drivers/ata/pata_arasan_cf.c
36534+++ b/drivers/ata/pata_arasan_cf.c
36535@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36536 /* Handle platform specific quirks */
36537 if (quirk) {
36538 if (quirk & CF_BROKEN_PIO) {
36539- ap->ops->set_piomode = NULL;
36540+ pax_open_kernel();
36541+ *(void **)&ap->ops->set_piomode = NULL;
36542+ pax_close_kernel();
36543 ap->pio_mask = 0;
36544 }
36545 if (quirk & CF_BROKEN_MWDMA)
36546diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36547index f9b983a..887b9d8 100644
36548--- a/drivers/atm/adummy.c
36549+++ b/drivers/atm/adummy.c
36550@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36551 vcc->pop(vcc, skb);
36552 else
36553 dev_kfree_skb_any(skb);
36554- atomic_inc(&vcc->stats->tx);
36555+ atomic_inc_unchecked(&vcc->stats->tx);
36556
36557 return 0;
36558 }
36559diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36560index f1a9198..f466a4a 100644
36561--- a/drivers/atm/ambassador.c
36562+++ b/drivers/atm/ambassador.c
36563@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36564 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36565
36566 // VC layer stats
36567- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36568+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36569
36570 // free the descriptor
36571 kfree (tx_descr);
36572@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36573 dump_skb ("<<<", vc, skb);
36574
36575 // VC layer stats
36576- atomic_inc(&atm_vcc->stats->rx);
36577+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36578 __net_timestamp(skb);
36579 // end of our responsibility
36580 atm_vcc->push (atm_vcc, skb);
36581@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36582 } else {
36583 PRINTK (KERN_INFO, "dropped over-size frame");
36584 // should we count this?
36585- atomic_inc(&atm_vcc->stats->rx_drop);
36586+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36587 }
36588
36589 } else {
36590@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36591 }
36592
36593 if (check_area (skb->data, skb->len)) {
36594- atomic_inc(&atm_vcc->stats->tx_err);
36595+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36596 return -ENOMEM; // ?
36597 }
36598
36599diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36600index 480fa6f..947067c 100644
36601--- a/drivers/atm/atmtcp.c
36602+++ b/drivers/atm/atmtcp.c
36603@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36604 if (vcc->pop) vcc->pop(vcc,skb);
36605 else dev_kfree_skb(skb);
36606 if (dev_data) return 0;
36607- atomic_inc(&vcc->stats->tx_err);
36608+ atomic_inc_unchecked(&vcc->stats->tx_err);
36609 return -ENOLINK;
36610 }
36611 size = skb->len+sizeof(struct atmtcp_hdr);
36612@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36613 if (!new_skb) {
36614 if (vcc->pop) vcc->pop(vcc,skb);
36615 else dev_kfree_skb(skb);
36616- atomic_inc(&vcc->stats->tx_err);
36617+ atomic_inc_unchecked(&vcc->stats->tx_err);
36618 return -ENOBUFS;
36619 }
36620 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36621@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36622 if (vcc->pop) vcc->pop(vcc,skb);
36623 else dev_kfree_skb(skb);
36624 out_vcc->push(out_vcc,new_skb);
36625- atomic_inc(&vcc->stats->tx);
36626- atomic_inc(&out_vcc->stats->rx);
36627+ atomic_inc_unchecked(&vcc->stats->tx);
36628+ atomic_inc_unchecked(&out_vcc->stats->rx);
36629 return 0;
36630 }
36631
36632@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36633 read_unlock(&vcc_sklist_lock);
36634 if (!out_vcc) {
36635 result = -EUNATCH;
36636- atomic_inc(&vcc->stats->tx_err);
36637+ atomic_inc_unchecked(&vcc->stats->tx_err);
36638 goto done;
36639 }
36640 skb_pull(skb,sizeof(struct atmtcp_hdr));
36641@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36642 __net_timestamp(new_skb);
36643 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36644 out_vcc->push(out_vcc,new_skb);
36645- atomic_inc(&vcc->stats->tx);
36646- atomic_inc(&out_vcc->stats->rx);
36647+ atomic_inc_unchecked(&vcc->stats->tx);
36648+ atomic_inc_unchecked(&out_vcc->stats->rx);
36649 done:
36650 if (vcc->pop) vcc->pop(vcc,skb);
36651 else dev_kfree_skb(skb);
36652diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36653index c7fab3e..68d0965 100644
36654--- a/drivers/atm/eni.c
36655+++ b/drivers/atm/eni.c
36656@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36657 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36658 vcc->dev->number);
36659 length = 0;
36660- atomic_inc(&vcc->stats->rx_err);
36661+ atomic_inc_unchecked(&vcc->stats->rx_err);
36662 }
36663 else {
36664 length = ATM_CELL_SIZE-1; /* no HEC */
36665@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36666 size);
36667 }
36668 eff = length = 0;
36669- atomic_inc(&vcc->stats->rx_err);
36670+ atomic_inc_unchecked(&vcc->stats->rx_err);
36671 }
36672 else {
36673 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36674@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36675 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36676 vcc->dev->number,vcc->vci,length,size << 2,descr);
36677 length = eff = 0;
36678- atomic_inc(&vcc->stats->rx_err);
36679+ atomic_inc_unchecked(&vcc->stats->rx_err);
36680 }
36681 }
36682 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36683@@ -770,7 +770,7 @@ rx_dequeued++;
36684 vcc->push(vcc,skb);
36685 pushed++;
36686 }
36687- atomic_inc(&vcc->stats->rx);
36688+ atomic_inc_unchecked(&vcc->stats->rx);
36689 }
36690 wake_up(&eni_dev->rx_wait);
36691 }
36692@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36693 PCI_DMA_TODEVICE);
36694 if (vcc->pop) vcc->pop(vcc,skb);
36695 else dev_kfree_skb_irq(skb);
36696- atomic_inc(&vcc->stats->tx);
36697+ atomic_inc_unchecked(&vcc->stats->tx);
36698 wake_up(&eni_dev->tx_wait);
36699 dma_complete++;
36700 }
36701diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36702index 82f2ae0..f205c02 100644
36703--- a/drivers/atm/firestream.c
36704+++ b/drivers/atm/firestream.c
36705@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36706 }
36707 }
36708
36709- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36710+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36711
36712 fs_dprintk (FS_DEBUG_TXMEM, "i");
36713 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36714@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36715 #endif
36716 skb_put (skb, qe->p1 & 0xffff);
36717 ATM_SKB(skb)->vcc = atm_vcc;
36718- atomic_inc(&atm_vcc->stats->rx);
36719+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36720 __net_timestamp(skb);
36721 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36722 atm_vcc->push (atm_vcc, skb);
36723@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36724 kfree (pe);
36725 }
36726 if (atm_vcc)
36727- atomic_inc(&atm_vcc->stats->rx_drop);
36728+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36729 break;
36730 case 0x1f: /* Reassembly abort: no buffers. */
36731 /* Silently increment error counter. */
36732 if (atm_vcc)
36733- atomic_inc(&atm_vcc->stats->rx_drop);
36734+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36735 break;
36736 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36737 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36738diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36739index d5d9eaf..65c0d53 100644
36740--- a/drivers/atm/fore200e.c
36741+++ b/drivers/atm/fore200e.c
36742@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36743 #endif
36744 /* check error condition */
36745 if (*entry->status & STATUS_ERROR)
36746- atomic_inc(&vcc->stats->tx_err);
36747+ atomic_inc_unchecked(&vcc->stats->tx_err);
36748 else
36749- atomic_inc(&vcc->stats->tx);
36750+ atomic_inc_unchecked(&vcc->stats->tx);
36751 }
36752 }
36753
36754@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36755 if (skb == NULL) {
36756 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36757
36758- atomic_inc(&vcc->stats->rx_drop);
36759+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36760 return -ENOMEM;
36761 }
36762
36763@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36764
36765 dev_kfree_skb_any(skb);
36766
36767- atomic_inc(&vcc->stats->rx_drop);
36768+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36769 return -ENOMEM;
36770 }
36771
36772 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36773
36774 vcc->push(vcc, skb);
36775- atomic_inc(&vcc->stats->rx);
36776+ atomic_inc_unchecked(&vcc->stats->rx);
36777
36778 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36779
36780@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36781 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36782 fore200e->atm_dev->number,
36783 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36784- atomic_inc(&vcc->stats->rx_err);
36785+ atomic_inc_unchecked(&vcc->stats->rx_err);
36786 }
36787 }
36788
36789@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36790 goto retry_here;
36791 }
36792
36793- atomic_inc(&vcc->stats->tx_err);
36794+ atomic_inc_unchecked(&vcc->stats->tx_err);
36795
36796 fore200e->tx_sat++;
36797 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36798diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36799index c39702b..785b73b 100644
36800--- a/drivers/atm/he.c
36801+++ b/drivers/atm/he.c
36802@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36803
36804 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36805 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36806- atomic_inc(&vcc->stats->rx_drop);
36807+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36808 goto return_host_buffers;
36809 }
36810
36811@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36812 RBRQ_LEN_ERR(he_dev->rbrq_head)
36813 ? "LEN_ERR" : "",
36814 vcc->vpi, vcc->vci);
36815- atomic_inc(&vcc->stats->rx_err);
36816+ atomic_inc_unchecked(&vcc->stats->rx_err);
36817 goto return_host_buffers;
36818 }
36819
36820@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36821 vcc->push(vcc, skb);
36822 spin_lock(&he_dev->global_lock);
36823
36824- atomic_inc(&vcc->stats->rx);
36825+ atomic_inc_unchecked(&vcc->stats->rx);
36826
36827 return_host_buffers:
36828 ++pdus_assembled;
36829@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
36830 tpd->vcc->pop(tpd->vcc, tpd->skb);
36831 else
36832 dev_kfree_skb_any(tpd->skb);
36833- atomic_inc(&tpd->vcc->stats->tx_err);
36834+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
36835 }
36836 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
36837 return;
36838@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36839 vcc->pop(vcc, skb);
36840 else
36841 dev_kfree_skb_any(skb);
36842- atomic_inc(&vcc->stats->tx_err);
36843+ atomic_inc_unchecked(&vcc->stats->tx_err);
36844 return -EINVAL;
36845 }
36846
36847@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36848 vcc->pop(vcc, skb);
36849 else
36850 dev_kfree_skb_any(skb);
36851- atomic_inc(&vcc->stats->tx_err);
36852+ atomic_inc_unchecked(&vcc->stats->tx_err);
36853 return -EINVAL;
36854 }
36855 #endif
36856@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36857 vcc->pop(vcc, skb);
36858 else
36859 dev_kfree_skb_any(skb);
36860- atomic_inc(&vcc->stats->tx_err);
36861+ atomic_inc_unchecked(&vcc->stats->tx_err);
36862 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36863 return -ENOMEM;
36864 }
36865@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36866 vcc->pop(vcc, skb);
36867 else
36868 dev_kfree_skb_any(skb);
36869- atomic_inc(&vcc->stats->tx_err);
36870+ atomic_inc_unchecked(&vcc->stats->tx_err);
36871 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36872 return -ENOMEM;
36873 }
36874@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36875 __enqueue_tpd(he_dev, tpd, cid);
36876 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36877
36878- atomic_inc(&vcc->stats->tx);
36879+ atomic_inc_unchecked(&vcc->stats->tx);
36880
36881 return 0;
36882 }
36883diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
36884index 1dc0519..1aadaf7 100644
36885--- a/drivers/atm/horizon.c
36886+++ b/drivers/atm/horizon.c
36887@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
36888 {
36889 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
36890 // VC layer stats
36891- atomic_inc(&vcc->stats->rx);
36892+ atomic_inc_unchecked(&vcc->stats->rx);
36893 __net_timestamp(skb);
36894 // end of our responsibility
36895 vcc->push (vcc, skb);
36896@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
36897 dev->tx_iovec = NULL;
36898
36899 // VC layer stats
36900- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36901+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36902
36903 // free the skb
36904 hrz_kfree_skb (skb);
36905diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
36906index 2b24ed0..b3d6acc 100644
36907--- a/drivers/atm/idt77252.c
36908+++ b/drivers/atm/idt77252.c
36909@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
36910 else
36911 dev_kfree_skb(skb);
36912
36913- atomic_inc(&vcc->stats->tx);
36914+ atomic_inc_unchecked(&vcc->stats->tx);
36915 }
36916
36917 atomic_dec(&scq->used);
36918@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36919 if ((sb = dev_alloc_skb(64)) == NULL) {
36920 printk("%s: Can't allocate buffers for aal0.\n",
36921 card->name);
36922- atomic_add(i, &vcc->stats->rx_drop);
36923+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36924 break;
36925 }
36926 if (!atm_charge(vcc, sb->truesize)) {
36927 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
36928 card->name);
36929- atomic_add(i - 1, &vcc->stats->rx_drop);
36930+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
36931 dev_kfree_skb(sb);
36932 break;
36933 }
36934@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36935 ATM_SKB(sb)->vcc = vcc;
36936 __net_timestamp(sb);
36937 vcc->push(vcc, sb);
36938- atomic_inc(&vcc->stats->rx);
36939+ atomic_inc_unchecked(&vcc->stats->rx);
36940
36941 cell += ATM_CELL_PAYLOAD;
36942 }
36943@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36944 "(CDC: %08x)\n",
36945 card->name, len, rpp->len, readl(SAR_REG_CDC));
36946 recycle_rx_pool_skb(card, rpp);
36947- atomic_inc(&vcc->stats->rx_err);
36948+ atomic_inc_unchecked(&vcc->stats->rx_err);
36949 return;
36950 }
36951 if (stat & SAR_RSQE_CRC) {
36952 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
36953 recycle_rx_pool_skb(card, rpp);
36954- atomic_inc(&vcc->stats->rx_err);
36955+ atomic_inc_unchecked(&vcc->stats->rx_err);
36956 return;
36957 }
36958 if (skb_queue_len(&rpp->queue) > 1) {
36959@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36960 RXPRINTK("%s: Can't alloc RX skb.\n",
36961 card->name);
36962 recycle_rx_pool_skb(card, rpp);
36963- atomic_inc(&vcc->stats->rx_err);
36964+ atomic_inc_unchecked(&vcc->stats->rx_err);
36965 return;
36966 }
36967 if (!atm_charge(vcc, skb->truesize)) {
36968@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36969 __net_timestamp(skb);
36970
36971 vcc->push(vcc, skb);
36972- atomic_inc(&vcc->stats->rx);
36973+ atomic_inc_unchecked(&vcc->stats->rx);
36974
36975 return;
36976 }
36977@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36978 __net_timestamp(skb);
36979
36980 vcc->push(vcc, skb);
36981- atomic_inc(&vcc->stats->rx);
36982+ atomic_inc_unchecked(&vcc->stats->rx);
36983
36984 if (skb->truesize > SAR_FB_SIZE_3)
36985 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
36986@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
36987 if (vcc->qos.aal != ATM_AAL0) {
36988 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
36989 card->name, vpi, vci);
36990- atomic_inc(&vcc->stats->rx_drop);
36991+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36992 goto drop;
36993 }
36994
36995 if ((sb = dev_alloc_skb(64)) == NULL) {
36996 printk("%s: Can't allocate buffers for AAL0.\n",
36997 card->name);
36998- atomic_inc(&vcc->stats->rx_err);
36999+ atomic_inc_unchecked(&vcc->stats->rx_err);
37000 goto drop;
37001 }
37002
37003@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37004 ATM_SKB(sb)->vcc = vcc;
37005 __net_timestamp(sb);
37006 vcc->push(vcc, sb);
37007- atomic_inc(&vcc->stats->rx);
37008+ atomic_inc_unchecked(&vcc->stats->rx);
37009
37010 drop:
37011 skb_pull(queue, 64);
37012@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37013
37014 if (vc == NULL) {
37015 printk("%s: NULL connection in send().\n", card->name);
37016- atomic_inc(&vcc->stats->tx_err);
37017+ atomic_inc_unchecked(&vcc->stats->tx_err);
37018 dev_kfree_skb(skb);
37019 return -EINVAL;
37020 }
37021 if (!test_bit(VCF_TX, &vc->flags)) {
37022 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37023- atomic_inc(&vcc->stats->tx_err);
37024+ atomic_inc_unchecked(&vcc->stats->tx_err);
37025 dev_kfree_skb(skb);
37026 return -EINVAL;
37027 }
37028@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37029 break;
37030 default:
37031 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37032- atomic_inc(&vcc->stats->tx_err);
37033+ atomic_inc_unchecked(&vcc->stats->tx_err);
37034 dev_kfree_skb(skb);
37035 return -EINVAL;
37036 }
37037
37038 if (skb_shinfo(skb)->nr_frags != 0) {
37039 printk("%s: No scatter-gather yet.\n", card->name);
37040- atomic_inc(&vcc->stats->tx_err);
37041+ atomic_inc_unchecked(&vcc->stats->tx_err);
37042 dev_kfree_skb(skb);
37043 return -EINVAL;
37044 }
37045@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37046
37047 err = queue_skb(card, vc, skb, oam);
37048 if (err) {
37049- atomic_inc(&vcc->stats->tx_err);
37050+ atomic_inc_unchecked(&vcc->stats->tx_err);
37051 dev_kfree_skb(skb);
37052 return err;
37053 }
37054@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37055 skb = dev_alloc_skb(64);
37056 if (!skb) {
37057 printk("%s: Out of memory in send_oam().\n", card->name);
37058- atomic_inc(&vcc->stats->tx_err);
37059+ atomic_inc_unchecked(&vcc->stats->tx_err);
37060 return -ENOMEM;
37061 }
37062 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37063diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37064index 4217f29..88f547a 100644
37065--- a/drivers/atm/iphase.c
37066+++ b/drivers/atm/iphase.c
37067@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37068 status = (u_short) (buf_desc_ptr->desc_mode);
37069 if (status & (RX_CER | RX_PTE | RX_OFL))
37070 {
37071- atomic_inc(&vcc->stats->rx_err);
37072+ atomic_inc_unchecked(&vcc->stats->rx_err);
37073 IF_ERR(printk("IA: bad packet, dropping it");)
37074 if (status & RX_CER) {
37075 IF_ERR(printk(" cause: packet CRC error\n");)
37076@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37077 len = dma_addr - buf_addr;
37078 if (len > iadev->rx_buf_sz) {
37079 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37080- atomic_inc(&vcc->stats->rx_err);
37081+ atomic_inc_unchecked(&vcc->stats->rx_err);
37082 goto out_free_desc;
37083 }
37084
37085@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37086 ia_vcc = INPH_IA_VCC(vcc);
37087 if (ia_vcc == NULL)
37088 {
37089- atomic_inc(&vcc->stats->rx_err);
37090+ atomic_inc_unchecked(&vcc->stats->rx_err);
37091 atm_return(vcc, skb->truesize);
37092 dev_kfree_skb_any(skb);
37093 goto INCR_DLE;
37094@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37095 if ((length > iadev->rx_buf_sz) || (length >
37096 (skb->len - sizeof(struct cpcs_trailer))))
37097 {
37098- atomic_inc(&vcc->stats->rx_err);
37099+ atomic_inc_unchecked(&vcc->stats->rx_err);
37100 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37101 length, skb->len);)
37102 atm_return(vcc, skb->truesize);
37103@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37104
37105 IF_RX(printk("rx_dle_intr: skb push");)
37106 vcc->push(vcc,skb);
37107- atomic_inc(&vcc->stats->rx);
37108+ atomic_inc_unchecked(&vcc->stats->rx);
37109 iadev->rx_pkt_cnt++;
37110 }
37111 INCR_DLE:
37112@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37113 {
37114 struct k_sonet_stats *stats;
37115 stats = &PRIV(_ia_dev[board])->sonet_stats;
37116- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37117- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37118- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37119- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37120- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37121- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37122- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37123- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37124- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37125+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37126+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37127+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37128+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37129+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37130+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37131+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37132+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37133+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37134 }
37135 ia_cmds.status = 0;
37136 break;
37137@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37138 if ((desc == 0) || (desc > iadev->num_tx_desc))
37139 {
37140 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37141- atomic_inc(&vcc->stats->tx);
37142+ atomic_inc_unchecked(&vcc->stats->tx);
37143 if (vcc->pop)
37144 vcc->pop(vcc, skb);
37145 else
37146@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37147 ATM_DESC(skb) = vcc->vci;
37148 skb_queue_tail(&iadev->tx_dma_q, skb);
37149
37150- atomic_inc(&vcc->stats->tx);
37151+ atomic_inc_unchecked(&vcc->stats->tx);
37152 iadev->tx_pkt_cnt++;
37153 /* Increment transaction counter */
37154 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37155
37156 #if 0
37157 /* add flow control logic */
37158- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37159+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37160 if (iavcc->vc_desc_cnt > 10) {
37161 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37162 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37163diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37164index 93eaf8d..b4ca7da 100644
37165--- a/drivers/atm/lanai.c
37166+++ b/drivers/atm/lanai.c
37167@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37168 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37169 lanai_endtx(lanai, lvcc);
37170 lanai_free_skb(lvcc->tx.atmvcc, skb);
37171- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37172+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37173 }
37174
37175 /* Try to fill the buffer - don't call unless there is backlog */
37176@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37177 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37178 __net_timestamp(skb);
37179 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37180- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37181+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37182 out:
37183 lvcc->rx.buf.ptr = end;
37184 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37185@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37186 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37187 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37188 lanai->stats.service_rxnotaal5++;
37189- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37190+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37191 return 0;
37192 }
37193 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37194@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37195 int bytes;
37196 read_unlock(&vcc_sklist_lock);
37197 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37198- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37199+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37200 lvcc->stats.x.aal5.service_trash++;
37201 bytes = (SERVICE_GET_END(s) * 16) -
37202 (((unsigned long) lvcc->rx.buf.ptr) -
37203@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37204 }
37205 if (s & SERVICE_STREAM) {
37206 read_unlock(&vcc_sklist_lock);
37207- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37208+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37209 lvcc->stats.x.aal5.service_stream++;
37210 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37211 "PDU on VCI %d!\n", lanai->number, vci);
37212@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37213 return 0;
37214 }
37215 DPRINTK("got rx crc error on vci %d\n", vci);
37216- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37217+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37218 lvcc->stats.x.aal5.service_rxcrc++;
37219 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37220 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37221diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37222index 9988ac9..7c52585 100644
37223--- a/drivers/atm/nicstar.c
37224+++ b/drivers/atm/nicstar.c
37225@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37226 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37227 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37228 card->index);
37229- atomic_inc(&vcc->stats->tx_err);
37230+ atomic_inc_unchecked(&vcc->stats->tx_err);
37231 dev_kfree_skb_any(skb);
37232 return -EINVAL;
37233 }
37234@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37235 if (!vc->tx) {
37236 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37237 card->index);
37238- atomic_inc(&vcc->stats->tx_err);
37239+ atomic_inc_unchecked(&vcc->stats->tx_err);
37240 dev_kfree_skb_any(skb);
37241 return -EINVAL;
37242 }
37243@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37244 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37245 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37246 card->index);
37247- atomic_inc(&vcc->stats->tx_err);
37248+ atomic_inc_unchecked(&vcc->stats->tx_err);
37249 dev_kfree_skb_any(skb);
37250 return -EINVAL;
37251 }
37252
37253 if (skb_shinfo(skb)->nr_frags != 0) {
37254 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37255- atomic_inc(&vcc->stats->tx_err);
37256+ atomic_inc_unchecked(&vcc->stats->tx_err);
37257 dev_kfree_skb_any(skb);
37258 return -EINVAL;
37259 }
37260@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37261 }
37262
37263 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37264- atomic_inc(&vcc->stats->tx_err);
37265+ atomic_inc_unchecked(&vcc->stats->tx_err);
37266 dev_kfree_skb_any(skb);
37267 return -EIO;
37268 }
37269- atomic_inc(&vcc->stats->tx);
37270+ atomic_inc_unchecked(&vcc->stats->tx);
37271
37272 return 0;
37273 }
37274@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37275 printk
37276 ("nicstar%d: Can't allocate buffers for aal0.\n",
37277 card->index);
37278- atomic_add(i, &vcc->stats->rx_drop);
37279+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37280 break;
37281 }
37282 if (!atm_charge(vcc, sb->truesize)) {
37283 RXPRINTK
37284 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37285 card->index);
37286- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37287+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37288 dev_kfree_skb_any(sb);
37289 break;
37290 }
37291@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37292 ATM_SKB(sb)->vcc = vcc;
37293 __net_timestamp(sb);
37294 vcc->push(vcc, sb);
37295- atomic_inc(&vcc->stats->rx);
37296+ atomic_inc_unchecked(&vcc->stats->rx);
37297 cell += ATM_CELL_PAYLOAD;
37298 }
37299
37300@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37301 if (iovb == NULL) {
37302 printk("nicstar%d: Out of iovec buffers.\n",
37303 card->index);
37304- atomic_inc(&vcc->stats->rx_drop);
37305+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37306 recycle_rx_buf(card, skb);
37307 return;
37308 }
37309@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37310 small or large buffer itself. */
37311 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37312 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37313- atomic_inc(&vcc->stats->rx_err);
37314+ atomic_inc_unchecked(&vcc->stats->rx_err);
37315 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37316 NS_MAX_IOVECS);
37317 NS_PRV_IOVCNT(iovb) = 0;
37318@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37319 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37320 card->index);
37321 which_list(card, skb);
37322- atomic_inc(&vcc->stats->rx_err);
37323+ atomic_inc_unchecked(&vcc->stats->rx_err);
37324 recycle_rx_buf(card, skb);
37325 vc->rx_iov = NULL;
37326 recycle_iov_buf(card, iovb);
37327@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37328 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37329 card->index);
37330 which_list(card, skb);
37331- atomic_inc(&vcc->stats->rx_err);
37332+ atomic_inc_unchecked(&vcc->stats->rx_err);
37333 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37334 NS_PRV_IOVCNT(iovb));
37335 vc->rx_iov = NULL;
37336@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37337 printk(" - PDU size mismatch.\n");
37338 else
37339 printk(".\n");
37340- atomic_inc(&vcc->stats->rx_err);
37341+ atomic_inc_unchecked(&vcc->stats->rx_err);
37342 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37343 NS_PRV_IOVCNT(iovb));
37344 vc->rx_iov = NULL;
37345@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37346 /* skb points to a small buffer */
37347 if (!atm_charge(vcc, skb->truesize)) {
37348 push_rxbufs(card, skb);
37349- atomic_inc(&vcc->stats->rx_drop);
37350+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37351 } else {
37352 skb_put(skb, len);
37353 dequeue_sm_buf(card, skb);
37354@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37355 ATM_SKB(skb)->vcc = vcc;
37356 __net_timestamp(skb);
37357 vcc->push(vcc, skb);
37358- atomic_inc(&vcc->stats->rx);
37359+ atomic_inc_unchecked(&vcc->stats->rx);
37360 }
37361 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37362 struct sk_buff *sb;
37363@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37364 if (len <= NS_SMBUFSIZE) {
37365 if (!atm_charge(vcc, sb->truesize)) {
37366 push_rxbufs(card, sb);
37367- atomic_inc(&vcc->stats->rx_drop);
37368+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37369 } else {
37370 skb_put(sb, len);
37371 dequeue_sm_buf(card, sb);
37372@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37373 ATM_SKB(sb)->vcc = vcc;
37374 __net_timestamp(sb);
37375 vcc->push(vcc, sb);
37376- atomic_inc(&vcc->stats->rx);
37377+ atomic_inc_unchecked(&vcc->stats->rx);
37378 }
37379
37380 push_rxbufs(card, skb);
37381@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37382
37383 if (!atm_charge(vcc, skb->truesize)) {
37384 push_rxbufs(card, skb);
37385- atomic_inc(&vcc->stats->rx_drop);
37386+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37387 } else {
37388 dequeue_lg_buf(card, skb);
37389 #ifdef NS_USE_DESTRUCTORS
37390@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37391 ATM_SKB(skb)->vcc = vcc;
37392 __net_timestamp(skb);
37393 vcc->push(vcc, skb);
37394- atomic_inc(&vcc->stats->rx);
37395+ atomic_inc_unchecked(&vcc->stats->rx);
37396 }
37397
37398 push_rxbufs(card, sb);
37399@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37400 printk
37401 ("nicstar%d: Out of huge buffers.\n",
37402 card->index);
37403- atomic_inc(&vcc->stats->rx_drop);
37404+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37405 recycle_iovec_rx_bufs(card,
37406 (struct iovec *)
37407 iovb->data,
37408@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37409 card->hbpool.count++;
37410 } else
37411 dev_kfree_skb_any(hb);
37412- atomic_inc(&vcc->stats->rx_drop);
37413+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37414 } else {
37415 /* Copy the small buffer to the huge buffer */
37416 sb = (struct sk_buff *)iov->iov_base;
37417@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37418 #endif /* NS_USE_DESTRUCTORS */
37419 __net_timestamp(hb);
37420 vcc->push(vcc, hb);
37421- atomic_inc(&vcc->stats->rx);
37422+ atomic_inc_unchecked(&vcc->stats->rx);
37423 }
37424 }
37425
37426diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37427index 21b0bc6..b5f40ba 100644
37428--- a/drivers/atm/solos-pci.c
37429+++ b/drivers/atm/solos-pci.c
37430@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37431 }
37432 atm_charge(vcc, skb->truesize);
37433 vcc->push(vcc, skb);
37434- atomic_inc(&vcc->stats->rx);
37435+ atomic_inc_unchecked(&vcc->stats->rx);
37436 break;
37437
37438 case PKT_STATUS:
37439@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37440 vcc = SKB_CB(oldskb)->vcc;
37441
37442 if (vcc) {
37443- atomic_inc(&vcc->stats->tx);
37444+ atomic_inc_unchecked(&vcc->stats->tx);
37445 solos_pop(vcc, oldskb);
37446 } else {
37447 dev_kfree_skb_irq(oldskb);
37448diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37449index 0215934..ce9f5b1 100644
37450--- a/drivers/atm/suni.c
37451+++ b/drivers/atm/suni.c
37452@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37453
37454
37455 #define ADD_LIMITED(s,v) \
37456- atomic_add((v),&stats->s); \
37457- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37458+ atomic_add_unchecked((v),&stats->s); \
37459+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37460
37461
37462 static void suni_hz(unsigned long from_timer)
37463diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37464index 5120a96..e2572bd 100644
37465--- a/drivers/atm/uPD98402.c
37466+++ b/drivers/atm/uPD98402.c
37467@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37468 struct sonet_stats tmp;
37469 int error = 0;
37470
37471- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37472+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37473 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37474 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37475 if (zero && !error) {
37476@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37477
37478
37479 #define ADD_LIMITED(s,v) \
37480- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37481- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37482- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37483+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37484+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37485+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37486
37487
37488 static void stat_event(struct atm_dev *dev)
37489@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37490 if (reason & uPD98402_INT_PFM) stat_event(dev);
37491 if (reason & uPD98402_INT_PCO) {
37492 (void) GET(PCOCR); /* clear interrupt cause */
37493- atomic_add(GET(HECCT),
37494+ atomic_add_unchecked(GET(HECCT),
37495 &PRIV(dev)->sonet_stats.uncorr_hcs);
37496 }
37497 if ((reason & uPD98402_INT_RFO) &&
37498@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37499 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37500 uPD98402_INT_LOS),PIMR); /* enable them */
37501 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37502- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37503- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37504- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37505+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37506+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37507+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37508 return 0;
37509 }
37510
37511diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37512index 969c3c2..9b72956 100644
37513--- a/drivers/atm/zatm.c
37514+++ b/drivers/atm/zatm.c
37515@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37516 }
37517 if (!size) {
37518 dev_kfree_skb_irq(skb);
37519- if (vcc) atomic_inc(&vcc->stats->rx_err);
37520+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37521 continue;
37522 }
37523 if (!atm_charge(vcc,skb->truesize)) {
37524@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37525 skb->len = size;
37526 ATM_SKB(skb)->vcc = vcc;
37527 vcc->push(vcc,skb);
37528- atomic_inc(&vcc->stats->rx);
37529+ atomic_inc_unchecked(&vcc->stats->rx);
37530 }
37531 zout(pos & 0xffff,MTA(mbx));
37532 #if 0 /* probably a stupid idea */
37533@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37534 skb_queue_head(&zatm_vcc->backlog,skb);
37535 break;
37536 }
37537- atomic_inc(&vcc->stats->tx);
37538+ atomic_inc_unchecked(&vcc->stats->tx);
37539 wake_up(&zatm_vcc->tx_wait);
37540 }
37541
37542diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37543index 876bae5..8978785 100644
37544--- a/drivers/base/bus.c
37545+++ b/drivers/base/bus.c
37546@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37547 return -EINVAL;
37548
37549 mutex_lock(&subsys->p->mutex);
37550- list_add_tail(&sif->node, &subsys->p->interfaces);
37551+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37552 if (sif->add_dev) {
37553 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37554 while ((dev = subsys_dev_iter_next(&iter)))
37555@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37556 subsys = sif->subsys;
37557
37558 mutex_lock(&subsys->p->mutex);
37559- list_del_init(&sif->node);
37560+ pax_list_del_init((struct list_head *)&sif->node);
37561 if (sif->remove_dev) {
37562 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37563 while ((dev = subsys_dev_iter_next(&iter)))
37564diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37565index 25798db..15f130e 100644
37566--- a/drivers/base/devtmpfs.c
37567+++ b/drivers/base/devtmpfs.c
37568@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37569 if (!thread)
37570 return 0;
37571
37572- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37573+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37574 if (err)
37575 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37576 else
37577@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37578 *err = sys_unshare(CLONE_NEWNS);
37579 if (*err)
37580 goto out;
37581- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37582+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37583 if (*err)
37584 goto out;
37585- sys_chdir("/.."); /* will traverse into overmounted root */
37586- sys_chroot(".");
37587+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37588+ sys_chroot((char __force_user *)".");
37589 complete(&setup_done);
37590 while (1) {
37591 spin_lock(&req_lock);
37592diff --git a/drivers/base/node.c b/drivers/base/node.c
37593index a3b82e9..f90a8ce 100644
37594--- a/drivers/base/node.c
37595+++ b/drivers/base/node.c
37596@@ -614,7 +614,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37597 struct node_attr {
37598 struct device_attribute attr;
37599 enum node_states state;
37600-};
37601+} __do_const;
37602
37603 static ssize_t show_node_state(struct device *dev,
37604 struct device_attribute *attr, char *buf)
37605diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37606index 0d8780c..0b5df3f 100644
37607--- a/drivers/base/power/domain.c
37608+++ b/drivers/base/power/domain.c
37609@@ -1725,7 +1725,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37610 {
37611 struct cpuidle_driver *cpuidle_drv;
37612 struct gpd_cpuidle_data *cpuidle_data;
37613- struct cpuidle_state *idle_state;
37614+ cpuidle_state_no_const *idle_state;
37615 int ret = 0;
37616
37617 if (IS_ERR_OR_NULL(genpd) || state < 0)
37618@@ -1793,7 +1793,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37619 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37620 {
37621 struct gpd_cpuidle_data *cpuidle_data;
37622- struct cpuidle_state *idle_state;
37623+ cpuidle_state_no_const *idle_state;
37624 int ret = 0;
37625
37626 if (IS_ERR_OR_NULL(genpd))
37627@@ -2222,7 +2222,10 @@ int genpd_dev_pm_attach(struct device *dev)
37628 return ret;
37629 }
37630
37631- dev->pm_domain->detach = genpd_dev_pm_detach;
37632+ pax_open_kernel();
37633+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37634+ pax_close_kernel();
37635+
37636 pm_genpd_poweron(pd);
37637
37638 return 0;
37639diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37640index d2be3f9..0a3167a 100644
37641--- a/drivers/base/power/sysfs.c
37642+++ b/drivers/base/power/sysfs.c
37643@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37644 return -EIO;
37645 }
37646 }
37647- return sprintf(buf, p);
37648+ return sprintf(buf, "%s", p);
37649 }
37650
37651 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37652diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37653index c2744b3..08fac19 100644
37654--- a/drivers/base/power/wakeup.c
37655+++ b/drivers/base/power/wakeup.c
37656@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37657 * They need to be modified together atomically, so it's better to use one
37658 * atomic variable to hold them both.
37659 */
37660-static atomic_t combined_event_count = ATOMIC_INIT(0);
37661+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37662
37663 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37664 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37665
37666 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37667 {
37668- unsigned int comb = atomic_read(&combined_event_count);
37669+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37670
37671 *cnt = (comb >> IN_PROGRESS_BITS);
37672 *inpr = comb & MAX_IN_PROGRESS;
37673@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37674 ws->start_prevent_time = ws->last_time;
37675
37676 /* Increment the counter of events in progress. */
37677- cec = atomic_inc_return(&combined_event_count);
37678+ cec = atomic_inc_return_unchecked(&combined_event_count);
37679
37680 trace_wakeup_source_activate(ws->name, cec);
37681 }
37682@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37683 * Increment the counter of registered wakeup events and decrement the
37684 * couter of wakeup events in progress simultaneously.
37685 */
37686- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37687+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37688 trace_wakeup_source_deactivate(ws->name, cec);
37689
37690 split_counters(&cnt, &inpr);
37691diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37692index 8d98a32..61d3165 100644
37693--- a/drivers/base/syscore.c
37694+++ b/drivers/base/syscore.c
37695@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37696 void register_syscore_ops(struct syscore_ops *ops)
37697 {
37698 mutex_lock(&syscore_ops_lock);
37699- list_add_tail(&ops->node, &syscore_ops_list);
37700+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37701 mutex_unlock(&syscore_ops_lock);
37702 }
37703 EXPORT_SYMBOL_GPL(register_syscore_ops);
37704@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37705 void unregister_syscore_ops(struct syscore_ops *ops)
37706 {
37707 mutex_lock(&syscore_ops_lock);
37708- list_del(&ops->node);
37709+ pax_list_del((struct list_head *)&ops->node);
37710 mutex_unlock(&syscore_ops_lock);
37711 }
37712 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37713diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37714index ff20f19..018f1da 100644
37715--- a/drivers/block/cciss.c
37716+++ b/drivers/block/cciss.c
37717@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37718 while (!list_empty(&h->reqQ)) {
37719 c = list_entry(h->reqQ.next, CommandList_struct, list);
37720 /* can't do anything if fifo is full */
37721- if ((h->access.fifo_full(h))) {
37722+ if ((h->access->fifo_full(h))) {
37723 dev_warn(&h->pdev->dev, "fifo full\n");
37724 break;
37725 }
37726@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37727 h->Qdepth--;
37728
37729 /* Tell the controller execute command */
37730- h->access.submit_command(h, c);
37731+ h->access->submit_command(h, c);
37732
37733 /* Put job onto the completed Q */
37734 addQ(&h->cmpQ, c);
37735@@ -3444,17 +3444,17 @@ startio:
37736
37737 static inline unsigned long get_next_completion(ctlr_info_t *h)
37738 {
37739- return h->access.command_completed(h);
37740+ return h->access->command_completed(h);
37741 }
37742
37743 static inline int interrupt_pending(ctlr_info_t *h)
37744 {
37745- return h->access.intr_pending(h);
37746+ return h->access->intr_pending(h);
37747 }
37748
37749 static inline long interrupt_not_for_us(ctlr_info_t *h)
37750 {
37751- return ((h->access.intr_pending(h) == 0) ||
37752+ return ((h->access->intr_pending(h) == 0) ||
37753 (h->interrupts_enabled == 0));
37754 }
37755
37756@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
37757 u32 a;
37758
37759 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37760- return h->access.command_completed(h);
37761+ return h->access->command_completed(h);
37762
37763 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37764 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37765@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37766 trans_support & CFGTBL_Trans_use_short_tags);
37767
37768 /* Change the access methods to the performant access methods */
37769- h->access = SA5_performant_access;
37770+ h->access = &SA5_performant_access;
37771 h->transMethod = CFGTBL_Trans_Performant;
37772
37773 return;
37774@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37775 if (prod_index < 0)
37776 return -ENODEV;
37777 h->product_name = products[prod_index].product_name;
37778- h->access = *(products[prod_index].access);
37779+ h->access = products[prod_index].access;
37780
37781 if (cciss_board_disabled(h)) {
37782 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37783@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
37784 }
37785
37786 /* make sure the board interrupts are off */
37787- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37788+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37789 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37790 if (rc)
37791 goto clean2;
37792@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
37793 * fake ones to scoop up any residual completions.
37794 */
37795 spin_lock_irqsave(&h->lock, flags);
37796- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37797+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37798 spin_unlock_irqrestore(&h->lock, flags);
37799 free_irq(h->intr[h->intr_mode], h);
37800 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37801@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
37802 dev_info(&h->pdev->dev, "Board READY.\n");
37803 dev_info(&h->pdev->dev,
37804 "Waiting for stale completions to drain.\n");
37805- h->access.set_intr_mask(h, CCISS_INTR_ON);
37806+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37807 msleep(10000);
37808- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37809+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37810
37811 rc = controller_reset_failed(h->cfgtable);
37812 if (rc)
37813@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
37814 cciss_scsi_setup(h);
37815
37816 /* Turn the interrupts on so we can service requests */
37817- h->access.set_intr_mask(h, CCISS_INTR_ON);
37818+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37819
37820 /* Get the firmware version */
37821 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
37822@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
37823 kfree(flush_buf);
37824 if (return_code != IO_OK)
37825 dev_warn(&h->pdev->dev, "Error flushing cache\n");
37826- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37827+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37828 free_irq(h->intr[h->intr_mode], h);
37829 }
37830
37831diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
37832index 7fda30e..2f27946 100644
37833--- a/drivers/block/cciss.h
37834+++ b/drivers/block/cciss.h
37835@@ -101,7 +101,7 @@ struct ctlr_info
37836 /* information about each logical volume */
37837 drive_info_struct *drv[CISS_MAX_LUN];
37838
37839- struct access_method access;
37840+ struct access_method *access;
37841
37842 /* queue and queue Info */
37843 struct list_head reqQ;
37844@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
37845 }
37846
37847 static struct access_method SA5_access = {
37848- SA5_submit_command,
37849- SA5_intr_mask,
37850- SA5_fifo_full,
37851- SA5_intr_pending,
37852- SA5_completed,
37853+ .submit_command = SA5_submit_command,
37854+ .set_intr_mask = SA5_intr_mask,
37855+ .fifo_full = SA5_fifo_full,
37856+ .intr_pending = SA5_intr_pending,
37857+ .command_completed = SA5_completed,
37858 };
37859
37860 static struct access_method SA5B_access = {
37861- SA5_submit_command,
37862- SA5B_intr_mask,
37863- SA5_fifo_full,
37864- SA5B_intr_pending,
37865- SA5_completed,
37866+ .submit_command = SA5_submit_command,
37867+ .set_intr_mask = SA5B_intr_mask,
37868+ .fifo_full = SA5_fifo_full,
37869+ .intr_pending = SA5B_intr_pending,
37870+ .command_completed = SA5_completed,
37871 };
37872
37873 static struct access_method SA5_performant_access = {
37874- SA5_submit_command,
37875- SA5_performant_intr_mask,
37876- SA5_fifo_full,
37877- SA5_performant_intr_pending,
37878- SA5_performant_completed,
37879+ .submit_command = SA5_submit_command,
37880+ .set_intr_mask = SA5_performant_intr_mask,
37881+ .fifo_full = SA5_fifo_full,
37882+ .intr_pending = SA5_performant_intr_pending,
37883+ .command_completed = SA5_performant_completed,
37884 };
37885
37886 struct board_type {
37887diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
37888index 2b94403..fd6ad1f 100644
37889--- a/drivers/block/cpqarray.c
37890+++ b/drivers/block/cpqarray.c
37891@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37892 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
37893 goto Enomem4;
37894 }
37895- hba[i]->access.set_intr_mask(hba[i], 0);
37896+ hba[i]->access->set_intr_mask(hba[i], 0);
37897 if (request_irq(hba[i]->intr, do_ida_intr,
37898 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
37899 {
37900@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37901 add_timer(&hba[i]->timer);
37902
37903 /* Enable IRQ now that spinlock and rate limit timer are set up */
37904- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37905+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37906
37907 for(j=0; j<NWD; j++) {
37908 struct gendisk *disk = ida_gendisk[i][j];
37909@@ -694,7 +694,7 @@ DBGINFO(
37910 for(i=0; i<NR_PRODUCTS; i++) {
37911 if (board_id == products[i].board_id) {
37912 c->product_name = products[i].product_name;
37913- c->access = *(products[i].access);
37914+ c->access = products[i].access;
37915 break;
37916 }
37917 }
37918@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
37919 hba[ctlr]->intr = intr;
37920 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
37921 hba[ctlr]->product_name = products[j].product_name;
37922- hba[ctlr]->access = *(products[j].access);
37923+ hba[ctlr]->access = products[j].access;
37924 hba[ctlr]->ctlr = ctlr;
37925 hba[ctlr]->board_id = board_id;
37926 hba[ctlr]->pci_dev = NULL; /* not PCI */
37927@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
37928
37929 while((c = h->reqQ) != NULL) {
37930 /* Can't do anything if we're busy */
37931- if (h->access.fifo_full(h) == 0)
37932+ if (h->access->fifo_full(h) == 0)
37933 return;
37934
37935 /* Get the first entry from the request Q */
37936@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
37937 h->Qdepth--;
37938
37939 /* Tell the controller to do our bidding */
37940- h->access.submit_command(h, c);
37941+ h->access->submit_command(h, c);
37942
37943 /* Get onto the completion Q */
37944 addQ(&h->cmpQ, c);
37945@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37946 unsigned long flags;
37947 __u32 a,a1;
37948
37949- istat = h->access.intr_pending(h);
37950+ istat = h->access->intr_pending(h);
37951 /* Is this interrupt for us? */
37952 if (istat == 0)
37953 return IRQ_NONE;
37954@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37955 */
37956 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
37957 if (istat & FIFO_NOT_EMPTY) {
37958- while((a = h->access.command_completed(h))) {
37959+ while((a = h->access->command_completed(h))) {
37960 a1 = a; a &= ~3;
37961 if ((c = h->cmpQ) == NULL)
37962 {
37963@@ -1448,11 +1448,11 @@ static int sendcmd(
37964 /*
37965 * Disable interrupt
37966 */
37967- info_p->access.set_intr_mask(info_p, 0);
37968+ info_p->access->set_intr_mask(info_p, 0);
37969 /* Make sure there is room in the command FIFO */
37970 /* Actually it should be completely empty at this time. */
37971 for (i = 200000; i > 0; i--) {
37972- temp = info_p->access.fifo_full(info_p);
37973+ temp = info_p->access->fifo_full(info_p);
37974 if (temp != 0) {
37975 break;
37976 }
37977@@ -1465,7 +1465,7 @@ DBG(
37978 /*
37979 * Send the cmd
37980 */
37981- info_p->access.submit_command(info_p, c);
37982+ info_p->access->submit_command(info_p, c);
37983 complete = pollcomplete(ctlr);
37984
37985 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
37986@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
37987 * we check the new geometry. Then turn interrupts back on when
37988 * we're done.
37989 */
37990- host->access.set_intr_mask(host, 0);
37991+ host->access->set_intr_mask(host, 0);
37992 getgeometry(ctlr);
37993- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
37994+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
37995
37996 for(i=0; i<NWD; i++) {
37997 struct gendisk *disk = ida_gendisk[ctlr][i];
37998@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
37999 /* Wait (up to 2 seconds) for a command to complete */
38000
38001 for (i = 200000; i > 0; i--) {
38002- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38003+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38004 if (done == 0) {
38005 udelay(10); /* a short fixed delay */
38006 } else
38007diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38008index be73e9d..7fbf140 100644
38009--- a/drivers/block/cpqarray.h
38010+++ b/drivers/block/cpqarray.h
38011@@ -99,7 +99,7 @@ struct ctlr_info {
38012 drv_info_t drv[NWD];
38013 struct proc_dir_entry *proc;
38014
38015- struct access_method access;
38016+ struct access_method *access;
38017
38018 cmdlist_t *reqQ;
38019 cmdlist_t *cmpQ;
38020diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38021index 434c77d..6d3219a 100644
38022--- a/drivers/block/drbd/drbd_bitmap.c
38023+++ b/drivers/block/drbd/drbd_bitmap.c
38024@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38025 submit_bio(rw, bio);
38026 /* this should not count as user activity and cause the
38027 * resync to throttle -- see drbd_rs_should_slow_down(). */
38028- atomic_add(len >> 9, &device->rs_sect_ev);
38029+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38030 }
38031 }
38032
38033diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38034index b905e98..0812ed8 100644
38035--- a/drivers/block/drbd/drbd_int.h
38036+++ b/drivers/block/drbd/drbd_int.h
38037@@ -385,7 +385,7 @@ struct drbd_epoch {
38038 struct drbd_connection *connection;
38039 struct list_head list;
38040 unsigned int barrier_nr;
38041- atomic_t epoch_size; /* increased on every request added. */
38042+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38043 atomic_t active; /* increased on every req. added, and dec on every finished. */
38044 unsigned long flags;
38045 };
38046@@ -946,7 +946,7 @@ struct drbd_device {
38047 unsigned int al_tr_number;
38048 int al_tr_cycle;
38049 wait_queue_head_t seq_wait;
38050- atomic_t packet_seq;
38051+ atomic_unchecked_t packet_seq;
38052 unsigned int peer_seq;
38053 spinlock_t peer_seq_lock;
38054 unsigned long comm_bm_set; /* communicated number of set bits. */
38055@@ -955,8 +955,8 @@ struct drbd_device {
38056 struct mutex own_state_mutex;
38057 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38058 char congestion_reason; /* Why we where congested... */
38059- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38060- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38061+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38062+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38063 int rs_last_sect_ev; /* counter to compare with */
38064 int rs_last_events; /* counter of read or write "events" (unit sectors)
38065 * on the lower level device when we last looked. */
38066diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38067index 1fc8342..7e7742b 100644
38068--- a/drivers/block/drbd/drbd_main.c
38069+++ b/drivers/block/drbd/drbd_main.c
38070@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38071 p->sector = sector;
38072 p->block_id = block_id;
38073 p->blksize = blksize;
38074- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38075+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38076 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38077 }
38078
38079@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38080 return -EIO;
38081 p->sector = cpu_to_be64(req->i.sector);
38082 p->block_id = (unsigned long)req;
38083- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38084+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38085 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38086 if (device->state.conn >= C_SYNC_SOURCE &&
38087 device->state.conn <= C_PAUSED_SYNC_T)
38088@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38089 atomic_set(&device->unacked_cnt, 0);
38090 atomic_set(&device->local_cnt, 0);
38091 atomic_set(&device->pp_in_use_by_net, 0);
38092- atomic_set(&device->rs_sect_in, 0);
38093- atomic_set(&device->rs_sect_ev, 0);
38094+ atomic_set_unchecked(&device->rs_sect_in, 0);
38095+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38096 atomic_set(&device->ap_in_flight, 0);
38097 atomic_set(&device->md_io.in_use, 0);
38098
38099@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38100 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38101 struct drbd_resource *resource = connection->resource;
38102
38103- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38104- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38105+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38106+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38107 kfree(connection->current_epoch);
38108
38109 idr_destroy(&connection->peer_devices);
38110diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38111index 74df8cf..e41fc24 100644
38112--- a/drivers/block/drbd/drbd_nl.c
38113+++ b/drivers/block/drbd/drbd_nl.c
38114@@ -3637,13 +3637,13 @@ finish:
38115
38116 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38117 {
38118- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38119+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38120 struct sk_buff *msg;
38121 struct drbd_genlmsghdr *d_out;
38122 unsigned seq;
38123 int err = -ENOMEM;
38124
38125- seq = atomic_inc_return(&drbd_genl_seq);
38126+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38127 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38128 if (!msg)
38129 goto failed;
38130diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38131index d169b4a..481463f 100644
38132--- a/drivers/block/drbd/drbd_receiver.c
38133+++ b/drivers/block/drbd/drbd_receiver.c
38134@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38135 struct drbd_device *device = peer_device->device;
38136 int err;
38137
38138- atomic_set(&device->packet_seq, 0);
38139+ atomic_set_unchecked(&device->packet_seq, 0);
38140 device->peer_seq = 0;
38141
38142 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38143@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38144 do {
38145 next_epoch = NULL;
38146
38147- epoch_size = atomic_read(&epoch->epoch_size);
38148+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38149
38150 switch (ev & ~EV_CLEANUP) {
38151 case EV_PUT:
38152@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38153 rv = FE_DESTROYED;
38154 } else {
38155 epoch->flags = 0;
38156- atomic_set(&epoch->epoch_size, 0);
38157+ atomic_set_unchecked(&epoch->epoch_size, 0);
38158 /* atomic_set(&epoch->active, 0); is already zero */
38159 if (rv == FE_STILL_LIVE)
38160 rv = FE_RECYCLED;
38161@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38162 conn_wait_active_ee_empty(connection);
38163 drbd_flush(connection);
38164
38165- if (atomic_read(&connection->current_epoch->epoch_size)) {
38166+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38167 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38168 if (epoch)
38169 break;
38170@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38171 }
38172
38173 epoch->flags = 0;
38174- atomic_set(&epoch->epoch_size, 0);
38175+ atomic_set_unchecked(&epoch->epoch_size, 0);
38176 atomic_set(&epoch->active, 0);
38177
38178 spin_lock(&connection->epoch_lock);
38179- if (atomic_read(&connection->current_epoch->epoch_size)) {
38180+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38181 list_add(&epoch->list, &connection->current_epoch->list);
38182 connection->current_epoch = epoch;
38183 connection->epochs++;
38184@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38185 list_add_tail(&peer_req->w.list, &device->sync_ee);
38186 spin_unlock_irq(&device->resource->req_lock);
38187
38188- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38189+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38190 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38191 return 0;
38192
38193@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38194 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38195 }
38196
38197- atomic_add(pi->size >> 9, &device->rs_sect_in);
38198+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38199
38200 return err;
38201 }
38202@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38203
38204 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38205 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38206- atomic_inc(&connection->current_epoch->epoch_size);
38207+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38208 err2 = drbd_drain_block(peer_device, pi->size);
38209 if (!err)
38210 err = err2;
38211@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38212
38213 spin_lock(&connection->epoch_lock);
38214 peer_req->epoch = connection->current_epoch;
38215- atomic_inc(&peer_req->epoch->epoch_size);
38216+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38217 atomic_inc(&peer_req->epoch->active);
38218 spin_unlock(&connection->epoch_lock);
38219
38220@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38221
38222 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38223 (int)part_stat_read(&disk->part0, sectors[1]) -
38224- atomic_read(&device->rs_sect_ev);
38225+ atomic_read_unchecked(&device->rs_sect_ev);
38226
38227 if (atomic_read(&device->ap_actlog_cnt)
38228 || curr_events - device->rs_last_events > 64) {
38229@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38230 device->use_csums = true;
38231 } else if (pi->cmd == P_OV_REPLY) {
38232 /* track progress, we may need to throttle */
38233- atomic_add(size >> 9, &device->rs_sect_in);
38234+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38235 peer_req->w.cb = w_e_end_ov_reply;
38236 dec_rs_pending(device);
38237 /* drbd_rs_begin_io done when we sent this request,
38238@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38239 goto out_free_e;
38240
38241 submit_for_resync:
38242- atomic_add(size >> 9, &device->rs_sect_ev);
38243+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38244
38245 submit:
38246 update_receiver_timing_details(connection, drbd_submit_peer_request);
38247@@ -4564,7 +4564,7 @@ struct data_cmd {
38248 int expect_payload;
38249 size_t pkt_size;
38250 int (*fn)(struct drbd_connection *, struct packet_info *);
38251-};
38252+} __do_const;
38253
38254 static struct data_cmd drbd_cmd_handler[] = {
38255 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38256@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38257 if (!list_empty(&connection->current_epoch->list))
38258 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38259 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38260- atomic_set(&connection->current_epoch->epoch_size, 0);
38261+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38262 connection->send.seen_any_write_yet = false;
38263
38264 drbd_info(connection, "Connection closed\n");
38265@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38266 put_ldev(device);
38267 }
38268 dec_rs_pending(device);
38269- atomic_add(blksize >> 9, &device->rs_sect_in);
38270+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38271
38272 return 0;
38273 }
38274@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38275 struct asender_cmd {
38276 size_t pkt_size;
38277 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38278-};
38279+} __do_const;
38280
38281 static struct asender_cmd asender_tbl[] = {
38282 [P_PING] = { 0, got_Ping },
38283diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38284index d0fae55..4469096 100644
38285--- a/drivers/block/drbd/drbd_worker.c
38286+++ b/drivers/block/drbd/drbd_worker.c
38287@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38288 list_add_tail(&peer_req->w.list, &device->read_ee);
38289 spin_unlock_irq(&device->resource->req_lock);
38290
38291- atomic_add(size >> 9, &device->rs_sect_ev);
38292+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38293 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38294 return 0;
38295
38296@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38297 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38298 int number, mxb;
38299
38300- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38301+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38302 device->rs_in_flight -= sect_in;
38303
38304 rcu_read_lock();
38305@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38306 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38307 struct fifo_buffer *plan;
38308
38309- atomic_set(&device->rs_sect_in, 0);
38310- atomic_set(&device->rs_sect_ev, 0);
38311+ atomic_set_unchecked(&device->rs_sect_in, 0);
38312+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38313 device->rs_in_flight = 0;
38314 device->rs_last_events =
38315 (int)part_stat_read(&disk->part0, sectors[0]) +
38316diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38317index 6cb1beb..bf490f7 100644
38318--- a/drivers/block/loop.c
38319+++ b/drivers/block/loop.c
38320@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
38321
38322 file_start_write(file);
38323 set_fs(get_ds());
38324- bw = file->f_op->write(file, buf, len, &pos);
38325+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38326 set_fs(old_fs);
38327 file_end_write(file);
38328 if (likely(bw == len))
38329diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
38330index d826bf3..8eb406c 100644
38331--- a/drivers/block/nvme-core.c
38332+++ b/drivers/block/nvme-core.c
38333@@ -76,7 +76,6 @@ static LIST_HEAD(dev_list);
38334 static struct task_struct *nvme_thread;
38335 static struct workqueue_struct *nvme_workq;
38336 static wait_queue_head_t nvme_kthread_wait;
38337-static struct notifier_block nvme_nb;
38338
38339 static void nvme_reset_failed_dev(struct work_struct *ws);
38340 static int nvme_process_cq(struct nvme_queue *nvmeq);
38341@@ -2955,7 +2954,6 @@ static int __init nvme_init(void)
38342 static void __exit nvme_exit(void)
38343 {
38344 pci_unregister_driver(&nvme_driver);
38345- unregister_hotcpu_notifier(&nvme_nb);
38346 unregister_blkdev(nvme_major, "nvme");
38347 destroy_workqueue(nvme_workq);
38348 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
38349diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38350index 09e628da..7607aaa 100644
38351--- a/drivers/block/pktcdvd.c
38352+++ b/drivers/block/pktcdvd.c
38353@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38354
38355 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38356 {
38357- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38358+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38359 }
38360
38361 /*
38362@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38363 return -EROFS;
38364 }
38365 pd->settings.fp = ti.fp;
38366- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38367+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38368
38369 if (ti.nwa_v) {
38370 pd->nwa = be32_to_cpu(ti.next_writable);
38371diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38372index 8a86b62..f54c87e 100644
38373--- a/drivers/block/rbd.c
38374+++ b/drivers/block/rbd.c
38375@@ -63,7 +63,7 @@
38376 * If the counter is already at its maximum value returns
38377 * -EINVAL without updating it.
38378 */
38379-static int atomic_inc_return_safe(atomic_t *v)
38380+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38381 {
38382 unsigned int counter;
38383
38384diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38385index e5565fb..71be10b4 100644
38386--- a/drivers/block/smart1,2.h
38387+++ b/drivers/block/smart1,2.h
38388@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38389 }
38390
38391 static struct access_method smart4_access = {
38392- smart4_submit_command,
38393- smart4_intr_mask,
38394- smart4_fifo_full,
38395- smart4_intr_pending,
38396- smart4_completed,
38397+ .submit_command = smart4_submit_command,
38398+ .set_intr_mask = smart4_intr_mask,
38399+ .fifo_full = smart4_fifo_full,
38400+ .intr_pending = smart4_intr_pending,
38401+ .command_completed = smart4_completed,
38402 };
38403
38404 /*
38405@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38406 }
38407
38408 static struct access_method smart2_access = {
38409- smart2_submit_command,
38410- smart2_intr_mask,
38411- smart2_fifo_full,
38412- smart2_intr_pending,
38413- smart2_completed,
38414+ .submit_command = smart2_submit_command,
38415+ .set_intr_mask = smart2_intr_mask,
38416+ .fifo_full = smart2_fifo_full,
38417+ .intr_pending = smart2_intr_pending,
38418+ .command_completed = smart2_completed,
38419 };
38420
38421 /*
38422@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38423 }
38424
38425 static struct access_method smart2e_access = {
38426- smart2e_submit_command,
38427- smart2e_intr_mask,
38428- smart2e_fifo_full,
38429- smart2e_intr_pending,
38430- smart2e_completed,
38431+ .submit_command = smart2e_submit_command,
38432+ .set_intr_mask = smart2e_intr_mask,
38433+ .fifo_full = smart2e_fifo_full,
38434+ .intr_pending = smart2e_intr_pending,
38435+ .command_completed = smart2e_completed,
38436 };
38437
38438 /*
38439@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38440 }
38441
38442 static struct access_method smart1_access = {
38443- smart1_submit_command,
38444- smart1_intr_mask,
38445- smart1_fifo_full,
38446- smart1_intr_pending,
38447- smart1_completed,
38448+ .submit_command = smart1_submit_command,
38449+ .set_intr_mask = smart1_intr_mask,
38450+ .fifo_full = smart1_fifo_full,
38451+ .intr_pending = smart1_intr_pending,
38452+ .command_completed = smart1_completed,
38453 };
38454diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38455index 55c135b..9f8d60c 100644
38456--- a/drivers/bluetooth/btwilink.c
38457+++ b/drivers/bluetooth/btwilink.c
38458@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38459
38460 static int bt_ti_probe(struct platform_device *pdev)
38461 {
38462- static struct ti_st *hst;
38463+ struct ti_st *hst;
38464 struct hci_dev *hdev;
38465 int err;
38466
38467diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38468index 5d28a45..a538f90 100644
38469--- a/drivers/cdrom/cdrom.c
38470+++ b/drivers/cdrom/cdrom.c
38471@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38472 ENSURE(reset, CDC_RESET);
38473 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38474 cdi->mc_flags = 0;
38475- cdo->n_minors = 0;
38476 cdi->options = CDO_USE_FFLAGS;
38477
38478 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38479@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38480 else
38481 cdi->cdda_method = CDDA_OLD;
38482
38483- if (!cdo->generic_packet)
38484- cdo->generic_packet = cdrom_dummy_generic_packet;
38485+ if (!cdo->generic_packet) {
38486+ pax_open_kernel();
38487+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38488+ pax_close_kernel();
38489+ }
38490
38491 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38492 mutex_lock(&cdrom_mutex);
38493@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38494 if (cdi->exit)
38495 cdi->exit(cdi);
38496
38497- cdi->ops->n_minors--;
38498 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38499 }
38500
38501@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38502 */
38503 nr = nframes;
38504 do {
38505- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38506+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38507 if (cgc.buffer)
38508 break;
38509
38510@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38511 struct cdrom_device_info *cdi;
38512 int ret;
38513
38514- ret = scnprintf(info + *pos, max_size - *pos, header);
38515+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38516 if (!ret)
38517 return 1;
38518
38519diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38520index 584bc31..e64a12c 100644
38521--- a/drivers/cdrom/gdrom.c
38522+++ b/drivers/cdrom/gdrom.c
38523@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38524 .audio_ioctl = gdrom_audio_ioctl,
38525 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38526 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38527- .n_minors = 1,
38528 };
38529
38530 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38531diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38532index efefd12..4f1d494 100644
38533--- a/drivers/char/Kconfig
38534+++ b/drivers/char/Kconfig
38535@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
38536
38537 config DEVKMEM
38538 bool "/dev/kmem virtual device support"
38539- default y
38540+ default n
38541+ depends on !GRKERNSEC_KMEM
38542 help
38543 Say Y here if you want to support the /dev/kmem device. The
38544 /dev/kmem device is rarely used, but can be used for certain
38545@@ -577,6 +578,7 @@ config DEVPORT
38546 bool
38547 depends on !M68K
38548 depends on ISA || PCI
38549+ depends on !GRKERNSEC_KMEM
38550 default y
38551
38552 source "drivers/s390/char/Kconfig"
38553diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38554index a48e05b..6bac831 100644
38555--- a/drivers/char/agp/compat_ioctl.c
38556+++ b/drivers/char/agp/compat_ioctl.c
38557@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38558 return -ENOMEM;
38559 }
38560
38561- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38562+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38563 sizeof(*usegment) * ureserve.seg_count)) {
38564 kfree(usegment);
38565 kfree(ksegment);
38566diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38567index 09f17eb..8531d2f 100644
38568--- a/drivers/char/agp/frontend.c
38569+++ b/drivers/char/agp/frontend.c
38570@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38571 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38572 return -EFAULT;
38573
38574- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38575+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38576 return -EFAULT;
38577
38578 client = agp_find_client_by_pid(reserve.pid);
38579@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38580 if (segment == NULL)
38581 return -ENOMEM;
38582
38583- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38584+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38585 sizeof(struct agp_segment) * reserve.seg_count)) {
38586 kfree(segment);
38587 return -EFAULT;
38588diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38589index 4f94375..413694e 100644
38590--- a/drivers/char/genrtc.c
38591+++ b/drivers/char/genrtc.c
38592@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38593 switch (cmd) {
38594
38595 case RTC_PLL_GET:
38596+ memset(&pll, 0, sizeof(pll));
38597 if (get_rtc_pll(&pll))
38598 return -EINVAL;
38599 else
38600diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38601index d5d4cd8..22d561d 100644
38602--- a/drivers/char/hpet.c
38603+++ b/drivers/char/hpet.c
38604@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38605 }
38606
38607 static int
38608-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38609+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38610 struct hpet_info *info)
38611 {
38612 struct hpet_timer __iomem *timer;
38613diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38614index 6b65fa4..8ebbc99 100644
38615--- a/drivers/char/ipmi/ipmi_msghandler.c
38616+++ b/drivers/char/ipmi/ipmi_msghandler.c
38617@@ -436,7 +436,7 @@ struct ipmi_smi {
38618 struct proc_dir_entry *proc_dir;
38619 char proc_dir_name[10];
38620
38621- atomic_t stats[IPMI_NUM_STATS];
38622+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38623
38624 /*
38625 * run_to_completion duplicate of smb_info, smi_info
38626@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38627 static DEFINE_MUTEX(smi_watchers_mutex);
38628
38629 #define ipmi_inc_stat(intf, stat) \
38630- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38631+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38632 #define ipmi_get_stat(intf, stat) \
38633- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38634+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38635
38636 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38637 "ACPI", "SMBIOS", "PCI",
38638@@ -2837,7 +2837,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38639 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38640 init_waitqueue_head(&intf->waitq);
38641 for (i = 0; i < IPMI_NUM_STATS; i++)
38642- atomic_set(&intf->stats[i], 0);
38643+ atomic_set_unchecked(&intf->stats[i], 0);
38644
38645 intf->proc_dir = NULL;
38646
38647diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38648index 967b73a..946e94c 100644
38649--- a/drivers/char/ipmi/ipmi_si_intf.c
38650+++ b/drivers/char/ipmi/ipmi_si_intf.c
38651@@ -284,7 +284,7 @@ struct smi_info {
38652 unsigned char slave_addr;
38653
38654 /* Counters and things for the proc filesystem. */
38655- atomic_t stats[SI_NUM_STATS];
38656+ atomic_unchecked_t stats[SI_NUM_STATS];
38657
38658 struct task_struct *thread;
38659
38660@@ -293,9 +293,9 @@ struct smi_info {
38661 };
38662
38663 #define smi_inc_stat(smi, stat) \
38664- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38665+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38666 #define smi_get_stat(smi, stat) \
38667- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38668+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38669
38670 #define SI_MAX_PARMS 4
38671
38672@@ -3412,7 +3412,7 @@ static int try_smi_init(struct smi_info *new_smi)
38673 atomic_set(&new_smi->req_events, 0);
38674 new_smi->run_to_completion = false;
38675 for (i = 0; i < SI_NUM_STATS; i++)
38676- atomic_set(&new_smi->stats[i], 0);
38677+ atomic_set_unchecked(&new_smi->stats[i], 0);
38678
38679 new_smi->interrupt_disabled = true;
38680 atomic_set(&new_smi->need_watch, 0);
38681diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38682index 4c58333..d5cca27 100644
38683--- a/drivers/char/mem.c
38684+++ b/drivers/char/mem.c
38685@@ -18,6 +18,7 @@
38686 #include <linux/raw.h>
38687 #include <linux/tty.h>
38688 #include <linux/capability.h>
38689+#include <linux/security.h>
38690 #include <linux/ptrace.h>
38691 #include <linux/device.h>
38692 #include <linux/highmem.h>
38693@@ -36,6 +37,10 @@
38694
38695 #define DEVPORT_MINOR 4
38696
38697+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38698+extern const struct file_operations grsec_fops;
38699+#endif
38700+
38701 static inline unsigned long size_inside_page(unsigned long start,
38702 unsigned long size)
38703 {
38704@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38705
38706 while (cursor < to) {
38707 if (!devmem_is_allowed(pfn)) {
38708+#ifdef CONFIG_GRKERNSEC_KMEM
38709+ gr_handle_mem_readwrite(from, to);
38710+#else
38711 printk(KERN_INFO
38712 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38713 current->comm, from, to);
38714+#endif
38715 return 0;
38716 }
38717 cursor += PAGE_SIZE;
38718@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38719 }
38720 return 1;
38721 }
38722+#elif defined(CONFIG_GRKERNSEC_KMEM)
38723+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38724+{
38725+ return 0;
38726+}
38727 #else
38728 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38729 {
38730@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38731 #endif
38732
38733 while (count > 0) {
38734- unsigned long remaining;
38735+ unsigned long remaining = 0;
38736+ char *temp;
38737
38738 sz = size_inside_page(p, count);
38739
38740@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38741 if (!ptr)
38742 return -EFAULT;
38743
38744- remaining = copy_to_user(buf, ptr, sz);
38745+#ifdef CONFIG_PAX_USERCOPY
38746+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38747+ if (!temp) {
38748+ unxlate_dev_mem_ptr(p, ptr);
38749+ return -ENOMEM;
38750+ }
38751+ remaining = probe_kernel_read(temp, ptr, sz);
38752+#else
38753+ temp = ptr;
38754+#endif
38755+
38756+ if (!remaining)
38757+ remaining = copy_to_user(buf, temp, sz);
38758+
38759+#ifdef CONFIG_PAX_USERCOPY
38760+ kfree(temp);
38761+#endif
38762+
38763 unxlate_dev_mem_ptr(p, ptr);
38764 if (remaining)
38765 return -EFAULT;
38766@@ -372,9 +404,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38767 size_t count, loff_t *ppos)
38768 {
38769 unsigned long p = *ppos;
38770- ssize_t low_count, read, sz;
38771+ ssize_t low_count, read, sz, err = 0;
38772 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38773- int err = 0;
38774
38775 read = 0;
38776 if (p < (unsigned long) high_memory) {
38777@@ -396,6 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38778 }
38779 #endif
38780 while (low_count > 0) {
38781+ char *temp;
38782+
38783 sz = size_inside_page(p, low_count);
38784
38785 /*
38786@@ -405,7 +438,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38787 */
38788 kbuf = xlate_dev_kmem_ptr((void *)p);
38789
38790- if (copy_to_user(buf, kbuf, sz))
38791+#ifdef CONFIG_PAX_USERCOPY
38792+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38793+ if (!temp)
38794+ return -ENOMEM;
38795+ err = probe_kernel_read(temp, kbuf, sz);
38796+#else
38797+ temp = kbuf;
38798+#endif
38799+
38800+ if (!err)
38801+ err = copy_to_user(buf, temp, sz);
38802+
38803+#ifdef CONFIG_PAX_USERCOPY
38804+ kfree(temp);
38805+#endif
38806+
38807+ if (err)
38808 return -EFAULT;
38809 buf += sz;
38810 p += sz;
38811@@ -800,6 +849,9 @@ static const struct memdev {
38812 #ifdef CONFIG_PRINTK
38813 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
38814 #endif
38815+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38816+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
38817+#endif
38818 };
38819
38820 static int memory_open(struct inode *inode, struct file *filp)
38821@@ -871,7 +923,7 @@ static int __init chr_dev_init(void)
38822 continue;
38823
38824 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38825- NULL, devlist[minor].name);
38826+ NULL, "%s", devlist[minor].name);
38827 }
38828
38829 return tty_init();
38830diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
38831index 9df78e2..01ba9ae 100644
38832--- a/drivers/char/nvram.c
38833+++ b/drivers/char/nvram.c
38834@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
38835
38836 spin_unlock_irq(&rtc_lock);
38837
38838- if (copy_to_user(buf, contents, tmp - contents))
38839+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
38840 return -EFAULT;
38841
38842 *ppos = i;
38843diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
38844index 0ea9986..e7b07e4 100644
38845--- a/drivers/char/pcmcia/synclink_cs.c
38846+++ b/drivers/char/pcmcia/synclink_cs.c
38847@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38848
38849 if (debug_level >= DEBUG_LEVEL_INFO)
38850 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
38851- __FILE__, __LINE__, info->device_name, port->count);
38852+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
38853
38854 if (tty_port_close_start(port, tty, filp) == 0)
38855 goto cleanup;
38856@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38857 cleanup:
38858 if (debug_level >= DEBUG_LEVEL_INFO)
38859 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
38860- tty->driver->name, port->count);
38861+ tty->driver->name, atomic_read(&port->count));
38862 }
38863
38864 /* Wait until the transmitter is empty.
38865@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38866
38867 if (debug_level >= DEBUG_LEVEL_INFO)
38868 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
38869- __FILE__, __LINE__, tty->driver->name, port->count);
38870+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
38871
38872 /* If port is closing, signal caller to try again */
38873 if (port->flags & ASYNC_CLOSING){
38874@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38875 goto cleanup;
38876 }
38877 spin_lock(&port->lock);
38878- port->count++;
38879+ atomic_inc(&port->count);
38880 spin_unlock(&port->lock);
38881 spin_unlock_irqrestore(&info->netlock, flags);
38882
38883- if (port->count == 1) {
38884+ if (atomic_read(&port->count) == 1) {
38885 /* 1st open on this device, init hardware */
38886 retval = startup(info, tty);
38887 if (retval < 0)
38888@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38889 unsigned short new_crctype;
38890
38891 /* return error if TTY interface open */
38892- if (info->port.count)
38893+ if (atomic_read(&info->port.count))
38894 return -EBUSY;
38895
38896 switch (encoding)
38897@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
38898
38899 /* arbitrate between network and tty opens */
38900 spin_lock_irqsave(&info->netlock, flags);
38901- if (info->port.count != 0 || info->netcount != 0) {
38902+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38903 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38904 spin_unlock_irqrestore(&info->netlock, flags);
38905 return -EBUSY;
38906@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38907 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
38908
38909 /* return error if TTY interface open */
38910- if (info->port.count)
38911+ if (atomic_read(&info->port.count))
38912 return -EBUSY;
38913
38914 if (cmd != SIOCWANDEV)
38915diff --git a/drivers/char/random.c b/drivers/char/random.c
38916index 9cd6968..6416f00 100644
38917--- a/drivers/char/random.c
38918+++ b/drivers/char/random.c
38919@@ -289,9 +289,6 @@
38920 /*
38921 * To allow fractional bits to be tracked, the entropy_count field is
38922 * denominated in units of 1/8th bits.
38923- *
38924- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
38925- * credit_entropy_bits() needs to be 64 bits wide.
38926 */
38927 #define ENTROPY_SHIFT 3
38928 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
38929@@ -439,9 +436,9 @@ struct entropy_store {
38930 };
38931
38932 static void push_to_pool(struct work_struct *work);
38933-static __u32 input_pool_data[INPUT_POOL_WORDS];
38934-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
38935-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
38936+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
38937+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38938+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38939
38940 static struct entropy_store input_pool = {
38941 .poolinfo = &poolinfo_table[0],
38942@@ -635,7 +632,7 @@ retry:
38943 /* The +2 corresponds to the /4 in the denominator */
38944
38945 do {
38946- unsigned int anfrac = min(pnfrac, pool_size/2);
38947+ u64 anfrac = min(pnfrac, pool_size/2);
38948 unsigned int add =
38949 ((pool_size - entropy_count)*anfrac*3) >> s;
38950
38951@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
38952
38953 extract_buf(r, tmp);
38954 i = min_t(int, nbytes, EXTRACT_SIZE);
38955- if (copy_to_user(buf, tmp, i)) {
38956+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
38957 ret = -EFAULT;
38958 break;
38959 }
38960@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
38961 static int proc_do_uuid(struct ctl_table *table, int write,
38962 void __user *buffer, size_t *lenp, loff_t *ppos)
38963 {
38964- struct ctl_table fake_table;
38965+ ctl_table_no_const fake_table;
38966 unsigned char buf[64], tmp_uuid[16], *uuid;
38967
38968 uuid = table->data;
38969@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
38970 static int proc_do_entropy(struct ctl_table *table, int write,
38971 void __user *buffer, size_t *lenp, loff_t *ppos)
38972 {
38973- struct ctl_table fake_table;
38974+ ctl_table_no_const fake_table;
38975 int entropy_count;
38976
38977 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
38978diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
38979index e496dae..b793e7d 100644
38980--- a/drivers/char/sonypi.c
38981+++ b/drivers/char/sonypi.c
38982@@ -54,6 +54,7 @@
38983
38984 #include <asm/uaccess.h>
38985 #include <asm/io.h>
38986+#include <asm/local.h>
38987
38988 #include <linux/sonypi.h>
38989
38990@@ -490,7 +491,7 @@ static struct sonypi_device {
38991 spinlock_t fifo_lock;
38992 wait_queue_head_t fifo_proc_list;
38993 struct fasync_struct *fifo_async;
38994- int open_count;
38995+ local_t open_count;
38996 int model;
38997 struct input_dev *input_jog_dev;
38998 struct input_dev *input_key_dev;
38999@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39000 static int sonypi_misc_release(struct inode *inode, struct file *file)
39001 {
39002 mutex_lock(&sonypi_device.lock);
39003- sonypi_device.open_count--;
39004+ local_dec(&sonypi_device.open_count);
39005 mutex_unlock(&sonypi_device.lock);
39006 return 0;
39007 }
39008@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39009 {
39010 mutex_lock(&sonypi_device.lock);
39011 /* Flush input queue on first open */
39012- if (!sonypi_device.open_count)
39013+ if (!local_read(&sonypi_device.open_count))
39014 kfifo_reset(&sonypi_device.fifo);
39015- sonypi_device.open_count++;
39016+ local_inc(&sonypi_device.open_count);
39017 mutex_unlock(&sonypi_device.lock);
39018
39019 return 0;
39020diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39021index 565a947..dcdc06e 100644
39022--- a/drivers/char/tpm/tpm_acpi.c
39023+++ b/drivers/char/tpm/tpm_acpi.c
39024@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39025 virt = acpi_os_map_iomem(start, len);
39026 if (!virt) {
39027 kfree(log->bios_event_log);
39028+ log->bios_event_log = NULL;
39029 printk("%s: ERROR - Unable to map memory\n", __func__);
39030 return -EIO;
39031 }
39032
39033- memcpy_fromio(log->bios_event_log, virt, len);
39034+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39035
39036 acpi_os_unmap_iomem(virt, len);
39037 return 0;
39038diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39039index 3a56a13..f8cbd25 100644
39040--- a/drivers/char/tpm/tpm_eventlog.c
39041+++ b/drivers/char/tpm/tpm_eventlog.c
39042@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39043 event = addr;
39044
39045 if ((event->event_type == 0 && event->event_size == 0) ||
39046- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39047+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39048 return NULL;
39049
39050 return addr;
39051@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39052 return NULL;
39053
39054 if ((event->event_type == 0 && event->event_size == 0) ||
39055- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39056+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39057 return NULL;
39058
39059 (*pos)++;
39060@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39061 int i;
39062
39063 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39064- seq_putc(m, data[i]);
39065+ if (!seq_putc(m, data[i]))
39066+ return -EFAULT;
39067
39068 return 0;
39069 }
39070diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39071index de03df9..0a309a9 100644
39072--- a/drivers/char/virtio_console.c
39073+++ b/drivers/char/virtio_console.c
39074@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39075 if (to_user) {
39076 ssize_t ret;
39077
39078- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39079+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39080 if (ret)
39081 return -EFAULT;
39082 } else {
39083@@ -788,7 +788,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39084 if (!port_has_data(port) && !port->host_connected)
39085 return 0;
39086
39087- return fill_readbuf(port, ubuf, count, true);
39088+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39089 }
39090
39091 static int wait_port_writable(struct port *port, bool nonblock)
39092diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39093index 4386697..754ceca 100644
39094--- a/drivers/clk/clk-composite.c
39095+++ b/drivers/clk/clk-composite.c
39096@@ -192,7 +192,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39097 struct clk *clk;
39098 struct clk_init_data init;
39099 struct clk_composite *composite;
39100- struct clk_ops *clk_composite_ops;
39101+ clk_ops_no_const *clk_composite_ops;
39102
39103 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39104 if (!composite) {
39105diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39106index dd3a78c..386d49c 100644
39107--- a/drivers/clk/socfpga/clk-gate.c
39108+++ b/drivers/clk/socfpga/clk-gate.c
39109@@ -22,6 +22,7 @@
39110 #include <linux/mfd/syscon.h>
39111 #include <linux/of.h>
39112 #include <linux/regmap.h>
39113+#include <asm/pgtable.h>
39114
39115 #include "clk.h"
39116
39117@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39118 return 0;
39119 }
39120
39121-static struct clk_ops gateclk_ops = {
39122+static clk_ops_no_const gateclk_ops __read_only = {
39123 .prepare = socfpga_clk_prepare,
39124 .recalc_rate = socfpga_clk_recalc_rate,
39125 .get_parent = socfpga_clk_get_parent,
39126@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39127 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39128 socfpga_clk->hw.bit_idx = clk_gate[1];
39129
39130- gateclk_ops.enable = clk_gate_ops.enable;
39131- gateclk_ops.disable = clk_gate_ops.disable;
39132+ pax_open_kernel();
39133+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39134+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39135+ pax_close_kernel();
39136 }
39137
39138 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39139diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39140index de6da95..c98278b 100644
39141--- a/drivers/clk/socfpga/clk-pll.c
39142+++ b/drivers/clk/socfpga/clk-pll.c
39143@@ -21,6 +21,7 @@
39144 #include <linux/io.h>
39145 #include <linux/of.h>
39146 #include <linux/of_address.h>
39147+#include <asm/pgtable.h>
39148
39149 #include "clk.h"
39150
39151@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39152 CLK_MGR_PLL_CLK_SRC_MASK;
39153 }
39154
39155-static struct clk_ops clk_pll_ops = {
39156+static clk_ops_no_const clk_pll_ops __read_only = {
39157 .recalc_rate = clk_pll_recalc_rate,
39158 .get_parent = clk_pll_get_parent,
39159 };
39160@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39161 pll_clk->hw.hw.init = &init;
39162
39163 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39164- clk_pll_ops.enable = clk_gate_ops.enable;
39165- clk_pll_ops.disable = clk_gate_ops.disable;
39166+ pax_open_kernel();
39167+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39168+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39169+ pax_close_kernel();
39170
39171 clk = clk_register(NULL, &pll_clk->hw.hw);
39172 if (WARN_ON(IS_ERR(clk))) {
39173diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39174index b0c18ed..1713a80 100644
39175--- a/drivers/cpufreq/acpi-cpufreq.c
39176+++ b/drivers/cpufreq/acpi-cpufreq.c
39177@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39178 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39179 per_cpu(acfreq_data, cpu) = data;
39180
39181- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39182- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39183+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39184+ pax_open_kernel();
39185+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39186+ pax_close_kernel();
39187+ }
39188
39189 result = acpi_processor_register_performance(data->acpi_data, cpu);
39190 if (result)
39191@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39192 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39193 break;
39194 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39195- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39196+ pax_open_kernel();
39197+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39198+ pax_close_kernel();
39199 break;
39200 default:
39201 break;
39202@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39203 if (!msrs)
39204 return;
39205
39206- acpi_cpufreq_driver.boost_supported = true;
39207- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39208+ pax_open_kernel();
39209+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39210+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39211+ pax_close_kernel();
39212
39213 cpu_notifier_register_begin();
39214
39215diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39216index fde97d6..3631eca 100644
39217--- a/drivers/cpufreq/cpufreq-dt.c
39218+++ b/drivers/cpufreq/cpufreq-dt.c
39219@@ -393,7 +393,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39220 if (!IS_ERR(cpu_reg))
39221 regulator_put(cpu_reg);
39222
39223- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39224+ pax_open_kernel();
39225+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39226+ pax_close_kernel();
39227
39228 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39229 if (ret)
39230diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39231index 7030c40..3a97de6 100644
39232--- a/drivers/cpufreq/cpufreq.c
39233+++ b/drivers/cpufreq/cpufreq.c
39234@@ -2135,7 +2135,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39235 }
39236
39237 mutex_lock(&cpufreq_governor_mutex);
39238- list_del(&governor->governor_list);
39239+ pax_list_del(&governor->governor_list);
39240 mutex_unlock(&cpufreq_governor_mutex);
39241 return;
39242 }
39243@@ -2351,7 +2351,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39244 return NOTIFY_OK;
39245 }
39246
39247-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39248+static struct notifier_block cpufreq_cpu_notifier = {
39249 .notifier_call = cpufreq_cpu_callback,
39250 };
39251
39252@@ -2391,13 +2391,17 @@ int cpufreq_boost_trigger_state(int state)
39253 return 0;
39254
39255 write_lock_irqsave(&cpufreq_driver_lock, flags);
39256- cpufreq_driver->boost_enabled = state;
39257+ pax_open_kernel();
39258+ *(bool *)&cpufreq_driver->boost_enabled = state;
39259+ pax_close_kernel();
39260 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39261
39262 ret = cpufreq_driver->set_boost(state);
39263 if (ret) {
39264 write_lock_irqsave(&cpufreq_driver_lock, flags);
39265- cpufreq_driver->boost_enabled = !state;
39266+ pax_open_kernel();
39267+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39268+ pax_close_kernel();
39269 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39270
39271 pr_err("%s: Cannot %s BOOST\n",
39272@@ -2454,8 +2458,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39273
39274 pr_debug("trying to register driver %s\n", driver_data->name);
39275
39276- if (driver_data->setpolicy)
39277- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39278+ if (driver_data->setpolicy) {
39279+ pax_open_kernel();
39280+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39281+ pax_close_kernel();
39282+ }
39283
39284 write_lock_irqsave(&cpufreq_driver_lock, flags);
39285 if (cpufreq_driver) {
39286@@ -2470,8 +2477,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39287 * Check if driver provides function to enable boost -
39288 * if not, use cpufreq_boost_set_sw as default
39289 */
39290- if (!cpufreq_driver->set_boost)
39291- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39292+ if (!cpufreq_driver->set_boost) {
39293+ pax_open_kernel();
39294+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39295+ pax_close_kernel();
39296+ }
39297
39298 ret = cpufreq_sysfs_create_file(&boost.attr);
39299 if (ret) {
39300diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39301index 1b44496..b80ff5e 100644
39302--- a/drivers/cpufreq/cpufreq_governor.c
39303+++ b/drivers/cpufreq/cpufreq_governor.c
39304@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39305 struct dbs_data *dbs_data;
39306 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39307 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39308- struct od_ops *od_ops = NULL;
39309+ const struct od_ops *od_ops = NULL;
39310 struct od_dbs_tuners *od_tuners = NULL;
39311 struct cs_dbs_tuners *cs_tuners = NULL;
39312 struct cpu_dbs_common_info *cpu_cdbs;
39313@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39314
39315 if ((cdata->governor == GOV_CONSERVATIVE) &&
39316 (!policy->governor->initialized)) {
39317- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39318+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39319
39320 cpufreq_register_notifier(cs_ops->notifier_block,
39321 CPUFREQ_TRANSITION_NOTIFIER);
39322@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39323
39324 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39325 (policy->governor->initialized == 1)) {
39326- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39327+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39328
39329 cpufreq_unregister_notifier(cs_ops->notifier_block,
39330 CPUFREQ_TRANSITION_NOTIFIER);
39331diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39332index cc401d1..8197340 100644
39333--- a/drivers/cpufreq/cpufreq_governor.h
39334+++ b/drivers/cpufreq/cpufreq_governor.h
39335@@ -212,7 +212,7 @@ struct common_dbs_data {
39336 void (*exit)(struct dbs_data *dbs_data);
39337
39338 /* Governor specific ops, see below */
39339- void *gov_ops;
39340+ const void *gov_ops;
39341 };
39342
39343 /* Governor Per policy data */
39344@@ -232,7 +232,7 @@ struct od_ops {
39345 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39346 unsigned int freq_next, unsigned int relation);
39347 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39348-};
39349+} __no_const;
39350
39351 struct cs_ops {
39352 struct notifier_block *notifier_block;
39353diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39354index ad3f38f..8f086cd 100644
39355--- a/drivers/cpufreq/cpufreq_ondemand.c
39356+++ b/drivers/cpufreq/cpufreq_ondemand.c
39357@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39358
39359 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39360
39361-static struct od_ops od_ops = {
39362+static struct od_ops od_ops __read_only = {
39363 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39364 .powersave_bias_target = generic_powersave_bias_target,
39365 .freq_increase = dbs_freq_increase,
39366@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39367 (struct cpufreq_policy *, unsigned int, unsigned int),
39368 unsigned int powersave_bias)
39369 {
39370- od_ops.powersave_bias_target = f;
39371+ pax_open_kernel();
39372+ *(void **)&od_ops.powersave_bias_target = f;
39373+ pax_close_kernel();
39374 od_set_powersave_bias(powersave_bias);
39375 }
39376 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39377
39378 void od_unregister_powersave_bias_handler(void)
39379 {
39380- od_ops.powersave_bias_target = generic_powersave_bias_target;
39381+ pax_open_kernel();
39382+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39383+ pax_close_kernel();
39384 od_set_powersave_bias(0);
39385 }
39386 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39387diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39388index 742eefb..e2fcfc8 100644
39389--- a/drivers/cpufreq/intel_pstate.c
39390+++ b/drivers/cpufreq/intel_pstate.c
39391@@ -133,10 +133,10 @@ struct pstate_funcs {
39392 struct cpu_defaults {
39393 struct pstate_adjust_policy pid_policy;
39394 struct pstate_funcs funcs;
39395-};
39396+} __do_const;
39397
39398 static struct pstate_adjust_policy pid_params;
39399-static struct pstate_funcs pstate_funcs;
39400+static struct pstate_funcs *pstate_funcs;
39401 static int hwp_active;
39402
39403 struct perf_limits {
39404@@ -653,18 +653,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39405
39406 cpu->pstate.current_pstate = pstate;
39407
39408- pstate_funcs.set(cpu, pstate);
39409+ pstate_funcs->set(cpu, pstate);
39410 }
39411
39412 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39413 {
39414- cpu->pstate.min_pstate = pstate_funcs.get_min();
39415- cpu->pstate.max_pstate = pstate_funcs.get_max();
39416- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39417- cpu->pstate.scaling = pstate_funcs.get_scaling();
39418+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39419+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39420+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39421+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39422
39423- if (pstate_funcs.get_vid)
39424- pstate_funcs.get_vid(cpu);
39425+ if (pstate_funcs->get_vid)
39426+ pstate_funcs->get_vid(cpu);
39427 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39428 }
39429
39430@@ -988,9 +988,9 @@ static int intel_pstate_msrs_not_valid(void)
39431 rdmsrl(MSR_IA32_APERF, aperf);
39432 rdmsrl(MSR_IA32_MPERF, mperf);
39433
39434- if (!pstate_funcs.get_max() ||
39435- !pstate_funcs.get_min() ||
39436- !pstate_funcs.get_turbo())
39437+ if (!pstate_funcs->get_max() ||
39438+ !pstate_funcs->get_min() ||
39439+ !pstate_funcs->get_turbo())
39440 return -ENODEV;
39441
39442 rdmsrl(MSR_IA32_APERF, tmp);
39443@@ -1004,7 +1004,7 @@ static int intel_pstate_msrs_not_valid(void)
39444 return 0;
39445 }
39446
39447-static void copy_pid_params(struct pstate_adjust_policy *policy)
39448+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39449 {
39450 pid_params.sample_rate_ms = policy->sample_rate_ms;
39451 pid_params.p_gain_pct = policy->p_gain_pct;
39452@@ -1016,12 +1016,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39453
39454 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39455 {
39456- pstate_funcs.get_max = funcs->get_max;
39457- pstate_funcs.get_min = funcs->get_min;
39458- pstate_funcs.get_turbo = funcs->get_turbo;
39459- pstate_funcs.get_scaling = funcs->get_scaling;
39460- pstate_funcs.set = funcs->set;
39461- pstate_funcs.get_vid = funcs->get_vid;
39462+ pstate_funcs = funcs;
39463 }
39464
39465 #if IS_ENABLED(CONFIG_ACPI)
39466diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39467index 529cfd9..0e28fff 100644
39468--- a/drivers/cpufreq/p4-clockmod.c
39469+++ b/drivers/cpufreq/p4-clockmod.c
39470@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39471 case 0x0F: /* Core Duo */
39472 case 0x16: /* Celeron Core */
39473 case 0x1C: /* Atom */
39474- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39475+ pax_open_kernel();
39476+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39477+ pax_close_kernel();
39478 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39479 case 0x0D: /* Pentium M (Dothan) */
39480- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39481+ pax_open_kernel();
39482+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39483+ pax_close_kernel();
39484 /* fall through */
39485 case 0x09: /* Pentium M (Banias) */
39486 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39487@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39488
39489 /* on P-4s, the TSC runs with constant frequency independent whether
39490 * throttling is active or not. */
39491- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39492+ pax_open_kernel();
39493+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39494+ pax_close_kernel();
39495
39496 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39497 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39498diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39499index 9bb42ba..b01b4a2 100644
39500--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39501+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39502@@ -18,14 +18,12 @@
39503 #include <asm/head.h>
39504 #include <asm/timer.h>
39505
39506-static struct cpufreq_driver *cpufreq_us3_driver;
39507-
39508 struct us3_freq_percpu_info {
39509 struct cpufreq_frequency_table table[4];
39510 };
39511
39512 /* Indexed by cpu number. */
39513-static struct us3_freq_percpu_info *us3_freq_table;
39514+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39515
39516 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39517 * in the Safari config register.
39518@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39519
39520 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39521 {
39522- if (cpufreq_us3_driver)
39523- us3_freq_target(policy, 0);
39524+ us3_freq_target(policy, 0);
39525
39526 return 0;
39527 }
39528
39529+static int __init us3_freq_init(void);
39530+static void __exit us3_freq_exit(void);
39531+
39532+static struct cpufreq_driver cpufreq_us3_driver = {
39533+ .init = us3_freq_cpu_init,
39534+ .verify = cpufreq_generic_frequency_table_verify,
39535+ .target_index = us3_freq_target,
39536+ .get = us3_freq_get,
39537+ .exit = us3_freq_cpu_exit,
39538+ .name = "UltraSPARC-III",
39539+
39540+};
39541+
39542 static int __init us3_freq_init(void)
39543 {
39544 unsigned long manuf, impl, ver;
39545- int ret;
39546
39547 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39548 return -ENODEV;
39549@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39550 (impl == CHEETAH_IMPL ||
39551 impl == CHEETAH_PLUS_IMPL ||
39552 impl == JAGUAR_IMPL ||
39553- impl == PANTHER_IMPL)) {
39554- struct cpufreq_driver *driver;
39555-
39556- ret = -ENOMEM;
39557- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39558- if (!driver)
39559- goto err_out;
39560-
39561- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39562- GFP_KERNEL);
39563- if (!us3_freq_table)
39564- goto err_out;
39565-
39566- driver->init = us3_freq_cpu_init;
39567- driver->verify = cpufreq_generic_frequency_table_verify;
39568- driver->target_index = us3_freq_target;
39569- driver->get = us3_freq_get;
39570- driver->exit = us3_freq_cpu_exit;
39571- strcpy(driver->name, "UltraSPARC-III");
39572-
39573- cpufreq_us3_driver = driver;
39574- ret = cpufreq_register_driver(driver);
39575- if (ret)
39576- goto err_out;
39577-
39578- return 0;
39579-
39580-err_out:
39581- if (driver) {
39582- kfree(driver);
39583- cpufreq_us3_driver = NULL;
39584- }
39585- kfree(us3_freq_table);
39586- us3_freq_table = NULL;
39587- return ret;
39588- }
39589+ impl == PANTHER_IMPL))
39590+ return cpufreq_register_driver(&cpufreq_us3_driver);
39591
39592 return -ENODEV;
39593 }
39594
39595 static void __exit us3_freq_exit(void)
39596 {
39597- if (cpufreq_us3_driver) {
39598- cpufreq_unregister_driver(cpufreq_us3_driver);
39599- kfree(cpufreq_us3_driver);
39600- cpufreq_us3_driver = NULL;
39601- kfree(us3_freq_table);
39602- us3_freq_table = NULL;
39603- }
39604+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39605 }
39606
39607 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39608diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39609index 7d4a315..21bb886 100644
39610--- a/drivers/cpufreq/speedstep-centrino.c
39611+++ b/drivers/cpufreq/speedstep-centrino.c
39612@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39613 !cpu_has(cpu, X86_FEATURE_EST))
39614 return -ENODEV;
39615
39616- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39617- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39618+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39619+ pax_open_kernel();
39620+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39621+ pax_close_kernel();
39622+ }
39623
39624 if (policy->cpu != 0)
39625 return -ENODEV;
39626diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39627index 2697e87..c32476c 100644
39628--- a/drivers/cpuidle/driver.c
39629+++ b/drivers/cpuidle/driver.c
39630@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39631
39632 static void poll_idle_init(struct cpuidle_driver *drv)
39633 {
39634- struct cpuidle_state *state = &drv->states[0];
39635+ cpuidle_state_no_const *state = &drv->states[0];
39636
39637 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39638 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39639diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39640index fb9f511..213e6cc 100644
39641--- a/drivers/cpuidle/governor.c
39642+++ b/drivers/cpuidle/governor.c
39643@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39644 mutex_lock(&cpuidle_lock);
39645 if (__cpuidle_find_governor(gov->name) == NULL) {
39646 ret = 0;
39647- list_add_tail(&gov->governor_list, &cpuidle_governors);
39648+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39649 if (!cpuidle_curr_governor ||
39650 cpuidle_curr_governor->rating < gov->rating)
39651 cpuidle_switch_governor(gov);
39652diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39653index 97c5903..023ad23 100644
39654--- a/drivers/cpuidle/sysfs.c
39655+++ b/drivers/cpuidle/sysfs.c
39656@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39657 NULL
39658 };
39659
39660-static struct attribute_group cpuidle_attr_group = {
39661+static attribute_group_no_const cpuidle_attr_group = {
39662 .attrs = cpuidle_default_attrs,
39663 .name = "cpuidle",
39664 };
39665diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39666index 8d2a772..33826c9 100644
39667--- a/drivers/crypto/hifn_795x.c
39668+++ b/drivers/crypto/hifn_795x.c
39669@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39670 MODULE_PARM_DESC(hifn_pll_ref,
39671 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39672
39673-static atomic_t hifn_dev_number;
39674+static atomic_unchecked_t hifn_dev_number;
39675
39676 #define ACRYPTO_OP_DECRYPT 0
39677 #define ACRYPTO_OP_ENCRYPT 1
39678@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39679 goto err_out_disable_pci_device;
39680
39681 snprintf(name, sizeof(name), "hifn%d",
39682- atomic_inc_return(&hifn_dev_number)-1);
39683+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39684
39685 err = pci_request_regions(pdev, name);
39686 if (err)
39687diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39688index 30b538d8..1610d75 100644
39689--- a/drivers/devfreq/devfreq.c
39690+++ b/drivers/devfreq/devfreq.c
39691@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39692 goto err_out;
39693 }
39694
39695- list_add(&governor->node, &devfreq_governor_list);
39696+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39697
39698 list_for_each_entry(devfreq, &devfreq_list, node) {
39699 int ret = 0;
39700@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39701 }
39702 }
39703
39704- list_del(&governor->node);
39705+ pax_list_del((struct list_head *)&governor->node);
39706 err_out:
39707 mutex_unlock(&devfreq_list_lock);
39708
39709diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39710index 3a2adb1..b3be9a3 100644
39711--- a/drivers/dma/sh/shdma-base.c
39712+++ b/drivers/dma/sh/shdma-base.c
39713@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39714 schan->slave_id = -EINVAL;
39715 }
39716
39717- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39718- sdev->desc_size, GFP_KERNEL);
39719+ schan->desc = kcalloc(sdev->desc_size,
39720+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39721 if (!schan->desc) {
39722 ret = -ENOMEM;
39723 goto edescalloc;
39724diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39725index aec8a84..7b45a1f 100644
39726--- a/drivers/dma/sh/shdmac.c
39727+++ b/drivers/dma/sh/shdmac.c
39728@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39729 return ret;
39730 }
39731
39732-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39733+static struct notifier_block sh_dmae_nmi_notifier = {
39734 .notifier_call = sh_dmae_nmi_handler,
39735
39736 /* Run before NMI debug handler and KGDB */
39737diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39738index 592af5f..bb1d583 100644
39739--- a/drivers/edac/edac_device.c
39740+++ b/drivers/edac/edac_device.c
39741@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39742 */
39743 int edac_device_alloc_index(void)
39744 {
39745- static atomic_t device_indexes = ATOMIC_INIT(0);
39746+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39747
39748- return atomic_inc_return(&device_indexes) - 1;
39749+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39750 }
39751 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39752
39753diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39754index 670d282..6675f4d 100644
39755--- a/drivers/edac/edac_mc_sysfs.c
39756+++ b/drivers/edac/edac_mc_sysfs.c
39757@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
39758 struct dev_ch_attribute {
39759 struct device_attribute attr;
39760 int channel;
39761-};
39762+} __do_const;
39763
39764 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39765 struct dev_ch_attribute dev_attr_legacy_##_name = \
39766@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39767 }
39768
39769 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39770+ pax_open_kernel();
39771 if (mci->get_sdram_scrub_rate) {
39772- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39773- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39774+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39775+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39776 }
39777 if (mci->set_sdram_scrub_rate) {
39778- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39779- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39780+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39781+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39782 }
39783+ pax_close_kernel();
39784 err = device_create_file(&mci->dev,
39785 &dev_attr_sdram_scrub_rate);
39786 if (err) {
39787diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39788index 2cf44b4d..6dd2dc7 100644
39789--- a/drivers/edac/edac_pci.c
39790+++ b/drivers/edac/edac_pci.c
39791@@ -29,7 +29,7 @@
39792
39793 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39794 static LIST_HEAD(edac_pci_list);
39795-static atomic_t pci_indexes = ATOMIC_INIT(0);
39796+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39797
39798 /*
39799 * edac_pci_alloc_ctl_info
39800@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39801 */
39802 int edac_pci_alloc_index(void)
39803 {
39804- return atomic_inc_return(&pci_indexes) - 1;
39805+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
39806 }
39807 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39808
39809diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39810index 24d877f..4e30133 100644
39811--- a/drivers/edac/edac_pci_sysfs.c
39812+++ b/drivers/edac/edac_pci_sysfs.c
39813@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39814 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39815 static int edac_pci_poll_msec = 1000; /* one second workq period */
39816
39817-static atomic_t pci_parity_count = ATOMIC_INIT(0);
39818-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39819+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
39820+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
39821
39822 static struct kobject *edac_pci_top_main_kobj;
39823 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
39824@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
39825 void *value;
39826 ssize_t(*show) (void *, char *);
39827 ssize_t(*store) (void *, const char *, size_t);
39828-};
39829+} __do_const;
39830
39831 /* Set of show/store abstract level functions for PCI Parity object */
39832 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
39833@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39834 edac_printk(KERN_CRIT, EDAC_PCI,
39835 "Signaled System Error on %s\n",
39836 pci_name(dev));
39837- atomic_inc(&pci_nonparity_count);
39838+ atomic_inc_unchecked(&pci_nonparity_count);
39839 }
39840
39841 if (status & (PCI_STATUS_PARITY)) {
39842@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39843 "Master Data Parity Error on %s\n",
39844 pci_name(dev));
39845
39846- atomic_inc(&pci_parity_count);
39847+ atomic_inc_unchecked(&pci_parity_count);
39848 }
39849
39850 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39851@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39852 "Detected Parity Error on %s\n",
39853 pci_name(dev));
39854
39855- atomic_inc(&pci_parity_count);
39856+ atomic_inc_unchecked(&pci_parity_count);
39857 }
39858 }
39859
39860@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39861 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
39862 "Signaled System Error on %s\n",
39863 pci_name(dev));
39864- atomic_inc(&pci_nonparity_count);
39865+ atomic_inc_unchecked(&pci_nonparity_count);
39866 }
39867
39868 if (status & (PCI_STATUS_PARITY)) {
39869@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39870 "Master Data Parity Error on "
39871 "%s\n", pci_name(dev));
39872
39873- atomic_inc(&pci_parity_count);
39874+ atomic_inc_unchecked(&pci_parity_count);
39875 }
39876
39877 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39878@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39879 "Detected Parity Error on %s\n",
39880 pci_name(dev));
39881
39882- atomic_inc(&pci_parity_count);
39883+ atomic_inc_unchecked(&pci_parity_count);
39884 }
39885 }
39886 }
39887@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
39888 if (!check_pci_errors)
39889 return;
39890
39891- before_count = atomic_read(&pci_parity_count);
39892+ before_count = atomic_read_unchecked(&pci_parity_count);
39893
39894 /* scan all PCI devices looking for a Parity Error on devices and
39895 * bridges.
39896@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
39897 /* Only if operator has selected panic on PCI Error */
39898 if (edac_pci_get_panic_on_pe()) {
39899 /* If the count is different 'after' from 'before' */
39900- if (before_count != atomic_read(&pci_parity_count))
39901+ if (before_count != atomic_read_unchecked(&pci_parity_count))
39902 panic("EDAC: PCI Parity Error");
39903 }
39904 }
39905diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
39906index c2359a1..8bd119d 100644
39907--- a/drivers/edac/mce_amd.h
39908+++ b/drivers/edac/mce_amd.h
39909@@ -74,7 +74,7 @@ struct amd_decoder_ops {
39910 bool (*mc0_mce)(u16, u8);
39911 bool (*mc1_mce)(u16, u8);
39912 bool (*mc2_mce)(u16, u8);
39913-};
39914+} __no_const;
39915
39916 void amd_report_gart_errors(bool);
39917 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
39918diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
39919index 57ea7f4..af06b76 100644
39920--- a/drivers/firewire/core-card.c
39921+++ b/drivers/firewire/core-card.c
39922@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
39923 const struct fw_card_driver *driver,
39924 struct device *device)
39925 {
39926- static atomic_t index = ATOMIC_INIT(-1);
39927+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
39928
39929- card->index = atomic_inc_return(&index);
39930+ card->index = atomic_inc_return_unchecked(&index);
39931 card->driver = driver;
39932 card->device = device;
39933 card->current_tlabel = 0;
39934@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
39935
39936 void fw_core_remove_card(struct fw_card *card)
39937 {
39938- struct fw_card_driver dummy_driver = dummy_driver_template;
39939+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
39940
39941 card->driver->update_phy_reg(card, 4,
39942 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
39943diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
39944index f9e3aee..269dbdb 100644
39945--- a/drivers/firewire/core-device.c
39946+++ b/drivers/firewire/core-device.c
39947@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
39948 struct config_rom_attribute {
39949 struct device_attribute attr;
39950 u32 key;
39951-};
39952+} __do_const;
39953
39954 static ssize_t show_immediate(struct device *dev,
39955 struct device_attribute *dattr, char *buf)
39956diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
39957index eb6935c..3cc2bfa 100644
39958--- a/drivers/firewire/core-transaction.c
39959+++ b/drivers/firewire/core-transaction.c
39960@@ -38,6 +38,7 @@
39961 #include <linux/timer.h>
39962 #include <linux/types.h>
39963 #include <linux/workqueue.h>
39964+#include <linux/sched.h>
39965
39966 #include <asm/byteorder.h>
39967
39968diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
39969index e1480ff6..1a429bd 100644
39970--- a/drivers/firewire/core.h
39971+++ b/drivers/firewire/core.h
39972@@ -111,6 +111,7 @@ struct fw_card_driver {
39973
39974 int (*stop_iso)(struct fw_iso_context *ctx);
39975 };
39976+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
39977
39978 void fw_card_initialize(struct fw_card *card,
39979 const struct fw_card_driver *driver, struct device *device);
39980diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
39981index aff9018..fc87ded 100644
39982--- a/drivers/firewire/ohci.c
39983+++ b/drivers/firewire/ohci.c
39984@@ -2054,10 +2054,12 @@ static void bus_reset_work(struct work_struct *work)
39985 be32_to_cpu(ohci->next_header));
39986 }
39987
39988+#ifndef CONFIG_GRKERNSEC
39989 if (param_remote_dma) {
39990 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
39991 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
39992 }
39993+#endif
39994
39995 spin_unlock_irq(&ohci->lock);
39996
39997@@ -2589,8 +2591,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
39998 unsigned long flags;
39999 int n, ret = 0;
40000
40001+#ifndef CONFIG_GRKERNSEC
40002 if (param_remote_dma)
40003 return 0;
40004+#endif
40005
40006 /*
40007 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40008diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40009index 94a58a0..f5eba42 100644
40010--- a/drivers/firmware/dmi-id.c
40011+++ b/drivers/firmware/dmi-id.c
40012@@ -16,7 +16,7 @@
40013 struct dmi_device_attribute{
40014 struct device_attribute dev_attr;
40015 int field;
40016-};
40017+} __do_const;
40018 #define to_dmi_dev_attr(_dev_attr) \
40019 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40020
40021diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40022index 69fac06..820f0c9a 100644
40023--- a/drivers/firmware/dmi_scan.c
40024+++ b/drivers/firmware/dmi_scan.c
40025@@ -901,7 +901,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40026 if (buf == NULL)
40027 return -1;
40028
40029- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40030+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40031
40032 dmi_unmap(buf);
40033 return 0;
40034diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40035index 4fd9961..52d60ce 100644
40036--- a/drivers/firmware/efi/cper.c
40037+++ b/drivers/firmware/efi/cper.c
40038@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40039 */
40040 u64 cper_next_record_id(void)
40041 {
40042- static atomic64_t seq;
40043+ static atomic64_unchecked_t seq;
40044
40045- if (!atomic64_read(&seq))
40046- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40047+ if (!atomic64_read_unchecked(&seq))
40048+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40049
40050- return atomic64_inc_return(&seq);
40051+ return atomic64_inc_return_unchecked(&seq);
40052 }
40053 EXPORT_SYMBOL_GPL(cper_next_record_id);
40054
40055diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40056index 9035c1b..aff45f8 100644
40057--- a/drivers/firmware/efi/efi.c
40058+++ b/drivers/firmware/efi/efi.c
40059@@ -151,14 +151,16 @@ static struct attribute_group efi_subsys_attr_group = {
40060 };
40061
40062 static struct efivars generic_efivars;
40063-static struct efivar_operations generic_ops;
40064+static efivar_operations_no_const generic_ops __read_only;
40065
40066 static int generic_ops_register(void)
40067 {
40068- generic_ops.get_variable = efi.get_variable;
40069- generic_ops.set_variable = efi.set_variable;
40070- generic_ops.get_next_variable = efi.get_next_variable;
40071- generic_ops.query_variable_store = efi_query_variable_store;
40072+ pax_open_kernel();
40073+ *(void **)&generic_ops.get_variable = efi.get_variable;
40074+ *(void **)&generic_ops.set_variable = efi.set_variable;
40075+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40076+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40077+ pax_close_kernel();
40078
40079 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40080 }
40081diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40082index f256ecd..387dcb1 100644
40083--- a/drivers/firmware/efi/efivars.c
40084+++ b/drivers/firmware/efi/efivars.c
40085@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40086 static int
40087 create_efivars_bin_attributes(void)
40088 {
40089- struct bin_attribute *attr;
40090+ bin_attribute_no_const *attr;
40091 int error;
40092
40093 /* new_var */
40094diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40095index 2f569aa..c95f4fb 100644
40096--- a/drivers/firmware/google/memconsole.c
40097+++ b/drivers/firmware/google/memconsole.c
40098@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40099 if (!found_memconsole())
40100 return -ENODEV;
40101
40102- memconsole_bin_attr.size = memconsole_length;
40103+ pax_open_kernel();
40104+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40105+ pax_close_kernel();
40106+
40107 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40108 }
40109
40110diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40111index 3cfcfc6..09d6f117 100644
40112--- a/drivers/gpio/gpio-em.c
40113+++ b/drivers/gpio/gpio-em.c
40114@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40115 struct em_gio_priv *p;
40116 struct resource *io[2], *irq[2];
40117 struct gpio_chip *gpio_chip;
40118- struct irq_chip *irq_chip;
40119+ irq_chip_no_const *irq_chip;
40120 const char *name = dev_name(&pdev->dev);
40121 int ret;
40122
40123diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40124index 7818cd1..1be40e5 100644
40125--- a/drivers/gpio/gpio-ich.c
40126+++ b/drivers/gpio/gpio-ich.c
40127@@ -94,7 +94,7 @@ struct ichx_desc {
40128 * this option allows driver caching written output values
40129 */
40130 bool use_outlvl_cache;
40131-};
40132+} __do_const;
40133
40134 static struct {
40135 spinlock_t lock;
40136diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40137index f476ae2..05e1bdd 100644
40138--- a/drivers/gpio/gpio-omap.c
40139+++ b/drivers/gpio/gpio-omap.c
40140@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40141 const struct omap_gpio_platform_data *pdata;
40142 struct resource *res;
40143 struct gpio_bank *bank;
40144- struct irq_chip *irqc;
40145+ irq_chip_no_const *irqc;
40146 int ret;
40147
40148 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40149diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40150index 584484e..e26ebd6 100644
40151--- a/drivers/gpio/gpio-rcar.c
40152+++ b/drivers/gpio/gpio-rcar.c
40153@@ -366,7 +366,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40154 struct gpio_rcar_priv *p;
40155 struct resource *io, *irq;
40156 struct gpio_chip *gpio_chip;
40157- struct irq_chip *irq_chip;
40158+ irq_chip_no_const *irq_chip;
40159 struct device *dev = &pdev->dev;
40160 const char *name = dev_name(dev);
40161 int ret;
40162diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40163index c1caa45..f0f97d2 100644
40164--- a/drivers/gpio/gpio-vr41xx.c
40165+++ b/drivers/gpio/gpio-vr41xx.c
40166@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40167 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40168 maskl, pendl, maskh, pendh);
40169
40170- atomic_inc(&irq_err_count);
40171+ atomic_inc_unchecked(&irq_err_count);
40172
40173 return -EINVAL;
40174 }
40175diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40176index 568aa2b..d1204d8 100644
40177--- a/drivers/gpio/gpiolib.c
40178+++ b/drivers/gpio/gpiolib.c
40179@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40180 }
40181
40182 if (gpiochip->irqchip) {
40183- gpiochip->irqchip->irq_request_resources = NULL;
40184- gpiochip->irqchip->irq_release_resources = NULL;
40185+ pax_open_kernel();
40186+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40187+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40188+ pax_close_kernel();
40189 gpiochip->irqchip = NULL;
40190 }
40191 }
40192@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40193 gpiochip->irqchip = NULL;
40194 return -EINVAL;
40195 }
40196- irqchip->irq_request_resources = gpiochip_irq_reqres;
40197- irqchip->irq_release_resources = gpiochip_irq_relres;
40198+
40199+ pax_open_kernel();
40200+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40201+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40202+ pax_close_kernel();
40203
40204 /*
40205 * Prepare the mapping since the irqchip shall be orthogonal to
40206diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40207index 5213da4..7ef736e 100644
40208--- a/drivers/gpu/drm/drm_crtc.c
40209+++ b/drivers/gpu/drm/drm_crtc.c
40210@@ -3961,7 +3961,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40211 goto done;
40212 }
40213
40214- if (copy_to_user(&enum_ptr[copied].name,
40215+ if (copy_to_user(enum_ptr[copied].name,
40216 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40217 ret = -EFAULT;
40218 goto done;
40219diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40220index 4f41377..ee33f40 100644
40221--- a/drivers/gpu/drm/drm_drv.c
40222+++ b/drivers/gpu/drm/drm_drv.c
40223@@ -444,7 +444,7 @@ void drm_unplug_dev(struct drm_device *dev)
40224
40225 drm_device_set_unplugged(dev);
40226
40227- if (dev->open_count == 0) {
40228+ if (local_read(&dev->open_count) == 0) {
40229 drm_put_dev(dev);
40230 }
40231 mutex_unlock(&drm_global_mutex);
40232diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40233index 0b9514b..6acd174 100644
40234--- a/drivers/gpu/drm/drm_fops.c
40235+++ b/drivers/gpu/drm/drm_fops.c
40236@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40237 return PTR_ERR(minor);
40238
40239 dev = minor->dev;
40240- if (!dev->open_count++)
40241+ if (local_inc_return(&dev->open_count) == 1)
40242 need_setup = 1;
40243
40244 /* share address_space across all char-devs of a single device */
40245@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40246 return 0;
40247
40248 err_undo:
40249- dev->open_count--;
40250+ local_dec(&dev->open_count);
40251 drm_minor_release(minor);
40252 return retcode;
40253 }
40254@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40255
40256 mutex_lock(&drm_global_mutex);
40257
40258- DRM_DEBUG("open_count = %d\n", dev->open_count);
40259+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40260
40261 mutex_lock(&dev->struct_mutex);
40262 list_del(&file_priv->lhead);
40263@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40264 * Begin inline drm_release
40265 */
40266
40267- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40268+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40269 task_pid_nr(current),
40270 (long)old_encode_dev(file_priv->minor->kdev->devt),
40271- dev->open_count);
40272+ local_read(&dev->open_count));
40273
40274 /* Release any auth tokens that might point to this file_priv,
40275 (do that under the drm_global_mutex) */
40276@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40277 * End inline drm_release
40278 */
40279
40280- if (!--dev->open_count) {
40281+ if (local_dec_and_test(&dev->open_count)) {
40282 retcode = drm_lastclose(dev);
40283 if (drm_device_is_unplugged(dev))
40284 drm_put_dev(dev);
40285diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40286index 3d2e91c..d31c4c9 100644
40287--- a/drivers/gpu/drm/drm_global.c
40288+++ b/drivers/gpu/drm/drm_global.c
40289@@ -36,7 +36,7 @@
40290 struct drm_global_item {
40291 struct mutex mutex;
40292 void *object;
40293- int refcount;
40294+ atomic_t refcount;
40295 };
40296
40297 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40298@@ -49,7 +49,7 @@ void drm_global_init(void)
40299 struct drm_global_item *item = &glob[i];
40300 mutex_init(&item->mutex);
40301 item->object = NULL;
40302- item->refcount = 0;
40303+ atomic_set(&item->refcount, 0);
40304 }
40305 }
40306
40307@@ -59,7 +59,7 @@ void drm_global_release(void)
40308 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40309 struct drm_global_item *item = &glob[i];
40310 BUG_ON(item->object != NULL);
40311- BUG_ON(item->refcount != 0);
40312+ BUG_ON(atomic_read(&item->refcount) != 0);
40313 }
40314 }
40315
40316@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40317 struct drm_global_item *item = &glob[ref->global_type];
40318
40319 mutex_lock(&item->mutex);
40320- if (item->refcount == 0) {
40321+ if (atomic_read(&item->refcount) == 0) {
40322 item->object = kzalloc(ref->size, GFP_KERNEL);
40323 if (unlikely(item->object == NULL)) {
40324 ret = -ENOMEM;
40325@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40326 goto out_err;
40327
40328 }
40329- ++item->refcount;
40330+ atomic_inc(&item->refcount);
40331 ref->object = item->object;
40332 mutex_unlock(&item->mutex);
40333 return 0;
40334@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40335 struct drm_global_item *item = &glob[ref->global_type];
40336
40337 mutex_lock(&item->mutex);
40338- BUG_ON(item->refcount == 0);
40339+ BUG_ON(atomic_read(&item->refcount) == 0);
40340 BUG_ON(ref->object != item->object);
40341- if (--item->refcount == 0) {
40342+ if (atomic_dec_and_test(&item->refcount)) {
40343 ref->release(ref);
40344 item->object = NULL;
40345 }
40346diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40347index 51efebd..2b70935 100644
40348--- a/drivers/gpu/drm/drm_info.c
40349+++ b/drivers/gpu/drm/drm_info.c
40350@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40351 struct drm_local_map *map;
40352 struct drm_map_list *r_list;
40353
40354- /* Hardcoded from _DRM_FRAME_BUFFER,
40355- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40356- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40357- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40358+ static const char * const types[] = {
40359+ [_DRM_FRAME_BUFFER] = "FB",
40360+ [_DRM_REGISTERS] = "REG",
40361+ [_DRM_SHM] = "SHM",
40362+ [_DRM_AGP] = "AGP",
40363+ [_DRM_SCATTER_GATHER] = "SG",
40364+ [_DRM_CONSISTENT] = "PCI"};
40365 const char *type;
40366 int i;
40367
40368@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40369 map = r_list->map;
40370 if (!map)
40371 continue;
40372- if (map->type < 0 || map->type > 5)
40373+ if (map->type >= ARRAY_SIZE(types))
40374 type = "??";
40375 else
40376 type = types[map->type];
40377diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40378index 2f4c4343..dd12cd2 100644
40379--- a/drivers/gpu/drm/drm_ioc32.c
40380+++ b/drivers/gpu/drm/drm_ioc32.c
40381@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40382 request = compat_alloc_user_space(nbytes);
40383 if (!access_ok(VERIFY_WRITE, request, nbytes))
40384 return -EFAULT;
40385- list = (struct drm_buf_desc *) (request + 1);
40386+ list = (struct drm_buf_desc __user *) (request + 1);
40387
40388 if (__put_user(count, &request->count)
40389 || __put_user(list, &request->list))
40390@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40391 request = compat_alloc_user_space(nbytes);
40392 if (!access_ok(VERIFY_WRITE, request, nbytes))
40393 return -EFAULT;
40394- list = (struct drm_buf_pub *) (request + 1);
40395+ list = (struct drm_buf_pub __user *) (request + 1);
40396
40397 if (__put_user(count, &request->count)
40398 || __put_user(list, &request->list))
40399@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40400 return 0;
40401 }
40402
40403-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40404+drm_ioctl_compat_t drm_compat_ioctls[] = {
40405 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40406 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40407 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40408@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40409 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40410 {
40411 unsigned int nr = DRM_IOCTL_NR(cmd);
40412- drm_ioctl_compat_t *fn;
40413 int ret;
40414
40415 /* Assume that ioctls without an explicit compat routine will just
40416@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40417 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40418 return drm_ioctl(filp, cmd, arg);
40419
40420- fn = drm_compat_ioctls[nr];
40421-
40422- if (fn != NULL)
40423- ret = (*fn) (filp, cmd, arg);
40424+ if (drm_compat_ioctls[nr] != NULL)
40425+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40426 else
40427 ret = drm_ioctl(filp, cmd, arg);
40428
40429diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40430index 00587a1..57a65ca 100644
40431--- a/drivers/gpu/drm/drm_ioctl.c
40432+++ b/drivers/gpu/drm/drm_ioctl.c
40433@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
40434 struct drm_file *file_priv = filp->private_data;
40435 struct drm_device *dev;
40436 const struct drm_ioctl_desc *ioctl = NULL;
40437- drm_ioctl_t *func;
40438+ drm_ioctl_no_const_t func;
40439 unsigned int nr = DRM_IOCTL_NR(cmd);
40440 int retcode = -EINVAL;
40441 char stack_kdata[128];
40442diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40443index 93ec5dc..82acbaf 100644
40444--- a/drivers/gpu/drm/i810/i810_drv.h
40445+++ b/drivers/gpu/drm/i810/i810_drv.h
40446@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
40447 int page_flipping;
40448
40449 wait_queue_head_t irq_queue;
40450- atomic_t irq_received;
40451- atomic_t irq_emitted;
40452+ atomic_unchecked_t irq_received;
40453+ atomic_unchecked_t irq_emitted;
40454
40455 int front_offset;
40456 } drm_i810_private_t;
40457diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40458index ecee3bc..ad5ae67 100644
40459--- a/drivers/gpu/drm/i915/i915_dma.c
40460+++ b/drivers/gpu/drm/i915/i915_dma.c
40461@@ -356,7 +356,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40462 * locking inversion with the driver load path. And the access here is
40463 * completely racy anyway. So don't bother with locking for now.
40464 */
40465- return dev->open_count == 0;
40466+ return local_read(&dev->open_count) == 0;
40467 }
40468
40469 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
40470diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40471index 1173831..7dfb389 100644
40472--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40473+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40474@@ -863,12 +863,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40475 static int
40476 validate_exec_list(struct drm_device *dev,
40477 struct drm_i915_gem_exec_object2 *exec,
40478- int count)
40479+ unsigned int count)
40480 {
40481 unsigned relocs_total = 0;
40482 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40483 unsigned invalid_flags;
40484- int i;
40485+ unsigned int i;
40486
40487 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
40488 if (USES_FULL_PPGTT(dev))
40489diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40490index 176de63..1ef9ac7 100644
40491--- a/drivers/gpu/drm/i915/i915_ioc32.c
40492+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40493@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40494 (unsigned long)request);
40495 }
40496
40497-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40498+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40499 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40500 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40501 [DRM_I915_GETPARAM] = compat_i915_getparam,
40502@@ -201,18 +201,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40503 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40504 {
40505 unsigned int nr = DRM_IOCTL_NR(cmd);
40506- drm_ioctl_compat_t *fn = NULL;
40507 int ret;
40508
40509 if (nr < DRM_COMMAND_BASE)
40510 return drm_compat_ioctl(filp, cmd, arg);
40511
40512- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
40513- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40514-
40515- if (fn != NULL)
40516+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
40517+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40518 ret = (*fn) (filp, cmd, arg);
40519- else
40520+ } else
40521 ret = drm_ioctl(filp, cmd, arg);
40522
40523 return ret;
40524diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40525index 30d4eb3..92f2dc8 100644
40526--- a/drivers/gpu/drm/i915/intel_display.c
40527+++ b/drivers/gpu/drm/i915/intel_display.c
40528@@ -12935,13 +12935,13 @@ struct intel_quirk {
40529 int subsystem_vendor;
40530 int subsystem_device;
40531 void (*hook)(struct drm_device *dev);
40532-};
40533+} __do_const;
40534
40535 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40536 struct intel_dmi_quirk {
40537 void (*hook)(struct drm_device *dev);
40538 const struct dmi_system_id (*dmi_id_list)[];
40539-};
40540+} __do_const;
40541
40542 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40543 {
40544@@ -12949,18 +12949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40545 return 1;
40546 }
40547
40548-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40549+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40550 {
40551- .dmi_id_list = &(const struct dmi_system_id[]) {
40552- {
40553- .callback = intel_dmi_reverse_brightness,
40554- .ident = "NCR Corporation",
40555- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40556- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40557- },
40558- },
40559- { } /* terminating entry */
40560+ .callback = intel_dmi_reverse_brightness,
40561+ .ident = "NCR Corporation",
40562+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40563+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40564 },
40565+ },
40566+ { } /* terminating entry */
40567+};
40568+
40569+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40570+ {
40571+ .dmi_id_list = &intel_dmi_quirks_table,
40572 .hook = quirk_invert_brightness,
40573 },
40574 };
40575diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
40576index b250130..98df2a4 100644
40577--- a/drivers/gpu/drm/imx/imx-drm-core.c
40578+++ b/drivers/gpu/drm/imx/imx-drm-core.c
40579@@ -356,7 +356,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
40580 if (imxdrm->pipes >= MAX_CRTC)
40581 return -EINVAL;
40582
40583- if (imxdrm->drm->open_count)
40584+ if (local_read(&imxdrm->drm->open_count))
40585 return -EBUSY;
40586
40587 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
40588diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40589index b4a2014..219ab78 100644
40590--- a/drivers/gpu/drm/mga/mga_drv.h
40591+++ b/drivers/gpu/drm/mga/mga_drv.h
40592@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
40593 u32 clear_cmd;
40594 u32 maccess;
40595
40596- atomic_t vbl_received; /**< Number of vblanks received. */
40597+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40598 wait_queue_head_t fence_queue;
40599- atomic_t last_fence_retired;
40600+ atomic_unchecked_t last_fence_retired;
40601 u32 next_fence_to_post;
40602
40603 unsigned int fb_cpp;
40604diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40605index 729bfd5..ead8823 100644
40606--- a/drivers/gpu/drm/mga/mga_ioc32.c
40607+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40608@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40609 return 0;
40610 }
40611
40612-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40613+drm_ioctl_compat_t mga_compat_ioctls[] = {
40614 [DRM_MGA_INIT] = compat_mga_init,
40615 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40616 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40617@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40618 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40619 {
40620 unsigned int nr = DRM_IOCTL_NR(cmd);
40621- drm_ioctl_compat_t *fn = NULL;
40622 int ret;
40623
40624 if (nr < DRM_COMMAND_BASE)
40625 return drm_compat_ioctl(filp, cmd, arg);
40626
40627- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
40628- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40629-
40630- if (fn != NULL)
40631+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
40632+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40633 ret = (*fn) (filp, cmd, arg);
40634- else
40635+ } else
40636 ret = drm_ioctl(filp, cmd, arg);
40637
40638 return ret;
40639diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40640index 1b071b8..de8601a 100644
40641--- a/drivers/gpu/drm/mga/mga_irq.c
40642+++ b/drivers/gpu/drm/mga/mga_irq.c
40643@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40644 if (crtc != 0)
40645 return 0;
40646
40647- return atomic_read(&dev_priv->vbl_received);
40648+ return atomic_read_unchecked(&dev_priv->vbl_received);
40649 }
40650
40651
40652@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40653 /* VBLANK interrupt */
40654 if (status & MGA_VLINEPEN) {
40655 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40656- atomic_inc(&dev_priv->vbl_received);
40657+ atomic_inc_unchecked(&dev_priv->vbl_received);
40658 drm_handle_vblank(dev, 0);
40659 handled = 1;
40660 }
40661@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40662 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40663 MGA_WRITE(MGA_PRIMEND, prim_end);
40664
40665- atomic_inc(&dev_priv->last_fence_retired);
40666+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40667 wake_up(&dev_priv->fence_queue);
40668 handled = 1;
40669 }
40670@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40671 * using fences.
40672 */
40673 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
40674- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40675+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40676 - *sequence) <= (1 << 23)));
40677
40678 *sequence = cur_fence;
40679diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40680index 7df6acc..84bbe52 100644
40681--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40682+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40683@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40684 struct bit_table {
40685 const char id;
40686 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40687-};
40688+} __no_const;
40689
40690 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40691
40692diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40693index 8ae36f2..1147a30 100644
40694--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40695+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40696@@ -121,7 +121,6 @@ struct nouveau_drm {
40697 struct drm_global_reference mem_global_ref;
40698 struct ttm_bo_global_ref bo_global_ref;
40699 struct ttm_bo_device bdev;
40700- atomic_t validate_sequence;
40701 int (*move)(struct nouveau_channel *,
40702 struct ttm_buffer_object *,
40703 struct ttm_mem_reg *, struct ttm_mem_reg *);
40704diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40705index 462679a..88e32a7 100644
40706--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40707+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40708@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
40709 unsigned long arg)
40710 {
40711 unsigned int nr = DRM_IOCTL_NR(cmd);
40712- drm_ioctl_compat_t *fn = NULL;
40713+ drm_ioctl_compat_t fn = NULL;
40714 int ret;
40715
40716 if (nr < DRM_COMMAND_BASE)
40717diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40718index 3d1cfcb..0542700 100644
40719--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
40720+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40721@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40722 }
40723
40724 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
40725- nouveau_vram_manager_init,
40726- nouveau_vram_manager_fini,
40727- nouveau_vram_manager_new,
40728- nouveau_vram_manager_del,
40729- nouveau_vram_manager_debug
40730+ .init = nouveau_vram_manager_init,
40731+ .takedown = nouveau_vram_manager_fini,
40732+ .get_node = nouveau_vram_manager_new,
40733+ .put_node = nouveau_vram_manager_del,
40734+ .debug = nouveau_vram_manager_debug
40735 };
40736
40737 static int
40738@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40739 }
40740
40741 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
40742- nouveau_gart_manager_init,
40743- nouveau_gart_manager_fini,
40744- nouveau_gart_manager_new,
40745- nouveau_gart_manager_del,
40746- nouveau_gart_manager_debug
40747+ .init = nouveau_gart_manager_init,
40748+ .takedown = nouveau_gart_manager_fini,
40749+ .get_node = nouveau_gart_manager_new,
40750+ .put_node = nouveau_gart_manager_del,
40751+ .debug = nouveau_gart_manager_debug
40752 };
40753
40754 /*XXX*/
40755@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40756 }
40757
40758 const struct ttm_mem_type_manager_func nv04_gart_manager = {
40759- nv04_gart_manager_init,
40760- nv04_gart_manager_fini,
40761- nv04_gart_manager_new,
40762- nv04_gart_manager_del,
40763- nv04_gart_manager_debug
40764+ .init = nv04_gart_manager_init,
40765+ .takedown = nv04_gart_manager_fini,
40766+ .get_node = nv04_gart_manager_new,
40767+ .put_node = nv04_gart_manager_del,
40768+ .debug = nv04_gart_manager_debug
40769 };
40770
40771 int
40772diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
40773index c7592ec..dd45ebc 100644
40774--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
40775+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
40776@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
40777 * locking inversion with the driver load path. And the access here is
40778 * completely racy anyway. So don't bother with locking for now.
40779 */
40780- return dev->open_count == 0;
40781+ return local_read(&dev->open_count) == 0;
40782 }
40783
40784 static const struct vga_switcheroo_client_ops
40785diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
40786index 9782364..89bd954 100644
40787--- a/drivers/gpu/drm/qxl/qxl_cmd.c
40788+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
40789@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
40790 int ret;
40791
40792 mutex_lock(&qdev->async_io_mutex);
40793- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40794+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40795 if (qdev->last_sent_io_cmd > irq_num) {
40796 if (intr)
40797 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40798- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40799+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40800 else
40801 ret = wait_event_timeout(qdev->io_cmd_event,
40802- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40803+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40804 /* 0 is timeout, just bail the "hw" has gone away */
40805 if (ret <= 0)
40806 goto out;
40807- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40808+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40809 }
40810 outb(val, addr);
40811 qdev->last_sent_io_cmd = irq_num + 1;
40812 if (intr)
40813 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40814- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40815+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40816 else
40817 ret = wait_event_timeout(qdev->io_cmd_event,
40818- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40819+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40820 out:
40821 if (ret > 0)
40822 ret = 0;
40823diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
40824index 6911b8c..89d6867 100644
40825--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
40826+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
40827@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
40828 struct drm_info_node *node = (struct drm_info_node *) m->private;
40829 struct qxl_device *qdev = node->minor->dev->dev_private;
40830
40831- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
40832- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
40833- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
40834- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
40835+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
40836+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
40837+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
40838+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
40839 seq_printf(m, "%d\n", qdev->irq_received_error);
40840 return 0;
40841 }
40842diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
40843index 7c6cafe..460f542 100644
40844--- a/drivers/gpu/drm/qxl/qxl_drv.h
40845+++ b/drivers/gpu/drm/qxl/qxl_drv.h
40846@@ -290,10 +290,10 @@ struct qxl_device {
40847 unsigned int last_sent_io_cmd;
40848
40849 /* interrupt handling */
40850- atomic_t irq_received;
40851- atomic_t irq_received_display;
40852- atomic_t irq_received_cursor;
40853- atomic_t irq_received_io_cmd;
40854+ atomic_unchecked_t irq_received;
40855+ atomic_unchecked_t irq_received_display;
40856+ atomic_unchecked_t irq_received_cursor;
40857+ atomic_unchecked_t irq_received_io_cmd;
40858 unsigned irq_received_error;
40859 wait_queue_head_t display_event;
40860 wait_queue_head_t cursor_event;
40861diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
40862index b110883..dd06418 100644
40863--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
40864+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
40865@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40866
40867 /* TODO copy slow path code from i915 */
40868 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
40869- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
40870+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
40871
40872 {
40873 struct qxl_drawable *draw = fb_cmd;
40874@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40875 struct drm_qxl_reloc reloc;
40876
40877 if (copy_from_user(&reloc,
40878- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
40879+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
40880 sizeof(reloc))) {
40881 ret = -EFAULT;
40882 goto out_free_bos;
40883@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
40884
40885 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
40886
40887- struct drm_qxl_command *commands =
40888- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
40889+ struct drm_qxl_command __user *commands =
40890+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
40891
40892- if (copy_from_user(&user_cmd, &commands[cmd_num],
40893+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
40894 sizeof(user_cmd)))
40895 return -EFAULT;
40896
40897diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
40898index 0bf1e20..42a7310 100644
40899--- a/drivers/gpu/drm/qxl/qxl_irq.c
40900+++ b/drivers/gpu/drm/qxl/qxl_irq.c
40901@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
40902 if (!pending)
40903 return IRQ_NONE;
40904
40905- atomic_inc(&qdev->irq_received);
40906+ atomic_inc_unchecked(&qdev->irq_received);
40907
40908 if (pending & QXL_INTERRUPT_DISPLAY) {
40909- atomic_inc(&qdev->irq_received_display);
40910+ atomic_inc_unchecked(&qdev->irq_received_display);
40911 wake_up_all(&qdev->display_event);
40912 qxl_queue_garbage_collect(qdev, false);
40913 }
40914 if (pending & QXL_INTERRUPT_CURSOR) {
40915- atomic_inc(&qdev->irq_received_cursor);
40916+ atomic_inc_unchecked(&qdev->irq_received_cursor);
40917 wake_up_all(&qdev->cursor_event);
40918 }
40919 if (pending & QXL_INTERRUPT_IO_CMD) {
40920- atomic_inc(&qdev->irq_received_io_cmd);
40921+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
40922 wake_up_all(&qdev->io_cmd_event);
40923 }
40924 if (pending & QXL_INTERRUPT_ERROR) {
40925@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
40926 init_waitqueue_head(&qdev->io_cmd_event);
40927 INIT_WORK(&qdev->client_monitors_config_work,
40928 qxl_client_monitors_config_work_func);
40929- atomic_set(&qdev->irq_received, 0);
40930- atomic_set(&qdev->irq_received_display, 0);
40931- atomic_set(&qdev->irq_received_cursor, 0);
40932- atomic_set(&qdev->irq_received_io_cmd, 0);
40933+ atomic_set_unchecked(&qdev->irq_received, 0);
40934+ atomic_set_unchecked(&qdev->irq_received_display, 0);
40935+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
40936+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
40937 qdev->irq_received_error = 0;
40938 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
40939 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
40940diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
40941index 0cbc4c9..0e46686 100644
40942--- a/drivers/gpu/drm/qxl/qxl_ttm.c
40943+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
40944@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
40945 }
40946 }
40947
40948-static struct vm_operations_struct qxl_ttm_vm_ops;
40949+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
40950 static const struct vm_operations_struct *ttm_vm_ops;
40951
40952 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40953@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
40954 return r;
40955 if (unlikely(ttm_vm_ops == NULL)) {
40956 ttm_vm_ops = vma->vm_ops;
40957+ pax_open_kernel();
40958 qxl_ttm_vm_ops = *ttm_vm_ops;
40959 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
40960+ pax_close_kernel();
40961 }
40962 vma->vm_ops = &qxl_ttm_vm_ops;
40963 return 0;
40964@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
40965 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
40966 {
40967 #if defined(CONFIG_DEBUG_FS)
40968- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
40969- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
40970- unsigned i;
40971+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
40972+ {
40973+ .name = "qxl_mem_mm",
40974+ .show = &qxl_mm_dump_table,
40975+ },
40976+ {
40977+ .name = "qxl_surf_mm",
40978+ .show = &qxl_mm_dump_table,
40979+ }
40980+ };
40981
40982- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
40983- if (i == 0)
40984- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
40985- else
40986- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
40987- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
40988- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
40989- qxl_mem_types_list[i].driver_features = 0;
40990- if (i == 0)
40991- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40992- else
40993- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40994+ pax_open_kernel();
40995+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40996+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40997+ pax_close_kernel();
40998
40999- }
41000- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41001+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41002 #else
41003 return 0;
41004 #endif
41005diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41006index 2c45ac9..5d740f8 100644
41007--- a/drivers/gpu/drm/r128/r128_cce.c
41008+++ b/drivers/gpu/drm/r128/r128_cce.c
41009@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41010
41011 /* GH: Simple idle check.
41012 */
41013- atomic_set(&dev_priv->idle_count, 0);
41014+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41015
41016 /* We don't support anything other than bus-mastering ring mode,
41017 * but the ring can be in either AGP or PCI space for the ring
41018diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41019index 723e5d6..102dbaf 100644
41020--- a/drivers/gpu/drm/r128/r128_drv.h
41021+++ b/drivers/gpu/drm/r128/r128_drv.h
41022@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41023 int is_pci;
41024 unsigned long cce_buffers_offset;
41025
41026- atomic_t idle_count;
41027+ atomic_unchecked_t idle_count;
41028
41029 int page_flipping;
41030 int current_page;
41031 u32 crtc_offset;
41032 u32 crtc_offset_cntl;
41033
41034- atomic_t vbl_received;
41035+ atomic_unchecked_t vbl_received;
41036
41037 u32 color_fmt;
41038 unsigned int front_offset;
41039diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41040index 663f38c..c689495 100644
41041--- a/drivers/gpu/drm/r128/r128_ioc32.c
41042+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41043@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41044 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41045 }
41046
41047-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41048+drm_ioctl_compat_t r128_compat_ioctls[] = {
41049 [DRM_R128_INIT] = compat_r128_init,
41050 [DRM_R128_DEPTH] = compat_r128_depth,
41051 [DRM_R128_STIPPLE] = compat_r128_stipple,
41052@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41053 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41054 {
41055 unsigned int nr = DRM_IOCTL_NR(cmd);
41056- drm_ioctl_compat_t *fn = NULL;
41057 int ret;
41058
41059 if (nr < DRM_COMMAND_BASE)
41060 return drm_compat_ioctl(filp, cmd, arg);
41061
41062- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41063- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41064-
41065- if (fn != NULL)
41066+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
41067+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41068 ret = (*fn) (filp, cmd, arg);
41069- else
41070+ } else
41071 ret = drm_ioctl(filp, cmd, arg);
41072
41073 return ret;
41074diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41075index c2ae496..30b5993 100644
41076--- a/drivers/gpu/drm/r128/r128_irq.c
41077+++ b/drivers/gpu/drm/r128/r128_irq.c
41078@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41079 if (crtc != 0)
41080 return 0;
41081
41082- return atomic_read(&dev_priv->vbl_received);
41083+ return atomic_read_unchecked(&dev_priv->vbl_received);
41084 }
41085
41086 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41087@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41088 /* VBLANK interrupt */
41089 if (status & R128_CRTC_VBLANK_INT) {
41090 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41091- atomic_inc(&dev_priv->vbl_received);
41092+ atomic_inc_unchecked(&dev_priv->vbl_received);
41093 drm_handle_vblank(dev, 0);
41094 return IRQ_HANDLED;
41095 }
41096diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41097index 8fd2d9f..18c9660 100644
41098--- a/drivers/gpu/drm/r128/r128_state.c
41099+++ b/drivers/gpu/drm/r128/r128_state.c
41100@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41101
41102 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41103 {
41104- if (atomic_read(&dev_priv->idle_count) == 0)
41105+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41106 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41107 else
41108- atomic_set(&dev_priv->idle_count, 0);
41109+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41110 }
41111
41112 #endif
41113diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41114index b928c17..e5d9400 100644
41115--- a/drivers/gpu/drm/radeon/mkregtable.c
41116+++ b/drivers/gpu/drm/radeon/mkregtable.c
41117@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41118 regex_t mask_rex;
41119 regmatch_t match[4];
41120 char buf[1024];
41121- size_t end;
41122+ long end;
41123 int len;
41124 int done = 0;
41125 int r;
41126 unsigned o;
41127 struct offset *offset;
41128 char last_reg_s[10];
41129- int last_reg;
41130+ unsigned long last_reg;
41131
41132 if (regcomp
41133 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41134diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41135index bd7519f..e1c2cd95 100644
41136--- a/drivers/gpu/drm/radeon/radeon_device.c
41137+++ b/drivers/gpu/drm/radeon/radeon_device.c
41138@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41139 * locking inversion with the driver load path. And the access here is
41140 * completely racy anyway. So don't bother with locking for now.
41141 */
41142- return dev->open_count == 0;
41143+ return local_read(&dev->open_count) == 0;
41144 }
41145
41146 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41147diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41148index 46bd393..6ae4719 100644
41149--- a/drivers/gpu/drm/radeon/radeon_drv.h
41150+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41151@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41152
41153 /* SW interrupt */
41154 wait_queue_head_t swi_queue;
41155- atomic_t swi_emitted;
41156+ atomic_unchecked_t swi_emitted;
41157 int vblank_crtc;
41158 uint32_t irq_enable_reg;
41159 uint32_t r500_disp_irq_reg;
41160diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41161index 0b98ea1..0881827 100644
41162--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41163+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41164@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41165 request = compat_alloc_user_space(sizeof(*request));
41166 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41167 || __put_user(req32.param, &request->param)
41168- || __put_user((void __user *)(unsigned long)req32.value,
41169+ || __put_user((unsigned long)req32.value,
41170 &request->value))
41171 return -EFAULT;
41172
41173@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41174 #define compat_radeon_cp_setparam NULL
41175 #endif /* X86_64 || IA64 */
41176
41177-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41178+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41179 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41180 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41181 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41182@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41183 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41184 {
41185 unsigned int nr = DRM_IOCTL_NR(cmd);
41186- drm_ioctl_compat_t *fn = NULL;
41187 int ret;
41188
41189 if (nr < DRM_COMMAND_BASE)
41190 return drm_compat_ioctl(filp, cmd, arg);
41191
41192- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41193- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41194-
41195- if (fn != NULL)
41196+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
41197+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41198 ret = (*fn) (filp, cmd, arg);
41199- else
41200+ } else
41201 ret = drm_ioctl(filp, cmd, arg);
41202
41203 return ret;
41204diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41205index 244b19b..c19226d 100644
41206--- a/drivers/gpu/drm/radeon/radeon_irq.c
41207+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41208@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41209 unsigned int ret;
41210 RING_LOCALS;
41211
41212- atomic_inc(&dev_priv->swi_emitted);
41213- ret = atomic_read(&dev_priv->swi_emitted);
41214+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41215+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41216
41217 BEGIN_RING(4);
41218 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41219@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41220 drm_radeon_private_t *dev_priv =
41221 (drm_radeon_private_t *) dev->dev_private;
41222
41223- atomic_set(&dev_priv->swi_emitted, 0);
41224+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41225 init_waitqueue_head(&dev_priv->swi_queue);
41226
41227 dev->max_vblank_count = 0x001fffff;
41228diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41229index 15aee72..cda326e 100644
41230--- a/drivers/gpu/drm/radeon/radeon_state.c
41231+++ b/drivers/gpu/drm/radeon/radeon_state.c
41232@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41233 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41234 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41235
41236- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41237+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41238 sarea_priv->nbox * sizeof(depth_boxes[0])))
41239 return -EFAULT;
41240
41241@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41242 {
41243 drm_radeon_private_t *dev_priv = dev->dev_private;
41244 drm_radeon_getparam_t *param = data;
41245- int value;
41246+ int value = 0;
41247
41248 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41249
41250diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41251index d02aa1d..ca19e2c 100644
41252--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41253+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41254@@ -959,7 +959,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41255 man->size = size >> PAGE_SHIFT;
41256 }
41257
41258-static struct vm_operations_struct radeon_ttm_vm_ops;
41259+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41260 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41261
41262 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41263@@ -1000,8 +1000,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41264 }
41265 if (unlikely(ttm_vm_ops == NULL)) {
41266 ttm_vm_ops = vma->vm_ops;
41267+ pax_open_kernel();
41268 radeon_ttm_vm_ops = *ttm_vm_ops;
41269 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41270+ pax_close_kernel();
41271 }
41272 vma->vm_ops = &radeon_ttm_vm_ops;
41273 return 0;
41274diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41275index 978993f..e36e50e 100644
41276--- a/drivers/gpu/drm/tegra/dc.c
41277+++ b/drivers/gpu/drm/tegra/dc.c
41278@@ -1416,7 +1416,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41279 }
41280
41281 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41282- dc->debugfs_files[i].data = dc;
41283+ *(void **)&dc->debugfs_files[i].data = dc;
41284
41285 err = drm_debugfs_create_files(dc->debugfs_files,
41286 ARRAY_SIZE(debugfs_files),
41287diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41288index 33f67fd..55ee9761 100644
41289--- a/drivers/gpu/drm/tegra/dsi.c
41290+++ b/drivers/gpu/drm/tegra/dsi.c
41291@@ -39,7 +39,7 @@ struct tegra_dsi {
41292 struct clk *clk_lp;
41293 struct clk *clk;
41294
41295- struct drm_info_list *debugfs_files;
41296+ drm_info_list_no_const *debugfs_files;
41297 struct drm_minor *minor;
41298 struct dentry *debugfs;
41299
41300diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41301index ffe2654..03c7b1c 100644
41302--- a/drivers/gpu/drm/tegra/hdmi.c
41303+++ b/drivers/gpu/drm/tegra/hdmi.c
41304@@ -60,7 +60,7 @@ struct tegra_hdmi {
41305 bool stereo;
41306 bool dvi;
41307
41308- struct drm_info_list *debugfs_files;
41309+ drm_info_list_no_const *debugfs_files;
41310 struct drm_minor *minor;
41311 struct dentry *debugfs;
41312 };
41313diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41314index aa0bd054..aea6a01 100644
41315--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41316+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41317@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41318 }
41319
41320 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41321- ttm_bo_man_init,
41322- ttm_bo_man_takedown,
41323- ttm_bo_man_get_node,
41324- ttm_bo_man_put_node,
41325- ttm_bo_man_debug
41326+ .init = ttm_bo_man_init,
41327+ .takedown = ttm_bo_man_takedown,
41328+ .get_node = ttm_bo_man_get_node,
41329+ .put_node = ttm_bo_man_put_node,
41330+ .debug = ttm_bo_man_debug
41331 };
41332 EXPORT_SYMBOL(ttm_bo_manager_func);
41333diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41334index a1803fb..c53f6b0 100644
41335--- a/drivers/gpu/drm/ttm/ttm_memory.c
41336+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41337@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41338 zone->glob = glob;
41339 glob->zone_kernel = zone;
41340 ret = kobject_init_and_add(
41341- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41342+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41343 if (unlikely(ret != 0)) {
41344 kobject_put(&zone->kobj);
41345 return ret;
41346@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41347 zone->glob = glob;
41348 glob->zone_dma32 = zone;
41349 ret = kobject_init_and_add(
41350- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41351+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41352 if (unlikely(ret != 0)) {
41353 kobject_put(&zone->kobj);
41354 return ret;
41355diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41356index 025c429..314062f 100644
41357--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41358+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41359@@ -54,7 +54,7 @@
41360
41361 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41362 #define SMALL_ALLOCATION 16
41363-#define FREE_ALL_PAGES (~0U)
41364+#define FREE_ALL_PAGES (~0UL)
41365 /* times are in msecs */
41366 #define PAGE_FREE_INTERVAL 1000
41367
41368@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41369 * @free_all: If set to true will free all pages in pool
41370 * @use_static: Safe to use static buffer
41371 **/
41372-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41373+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41374 bool use_static)
41375 {
41376 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41377 unsigned long irq_flags;
41378 struct page *p;
41379 struct page **pages_to_free;
41380- unsigned freed_pages = 0,
41381- npages_to_free = nr_free;
41382+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41383
41384 if (NUM_PAGES_TO_ALLOC < nr_free)
41385 npages_to_free = NUM_PAGES_TO_ALLOC;
41386@@ -371,7 +370,8 @@ restart:
41387 __list_del(&p->lru, &pool->list);
41388
41389 ttm_pool_update_free_locked(pool, freed_pages);
41390- nr_free -= freed_pages;
41391+ if (likely(nr_free != FREE_ALL_PAGES))
41392+ nr_free -= freed_pages;
41393 }
41394
41395 spin_unlock_irqrestore(&pool->lock, irq_flags);
41396@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41397 unsigned i;
41398 unsigned pool_offset;
41399 struct ttm_page_pool *pool;
41400- int shrink_pages = sc->nr_to_scan;
41401+ unsigned long shrink_pages = sc->nr_to_scan;
41402 unsigned long freed = 0;
41403
41404 if (!mutex_trylock(&lock))
41405@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41406 pool_offset = ++start_pool % NUM_POOLS;
41407 /* select start pool in round robin fashion */
41408 for (i = 0; i < NUM_POOLS; ++i) {
41409- unsigned nr_free = shrink_pages;
41410+ unsigned long nr_free = shrink_pages;
41411 if (shrink_pages == 0)
41412 break;
41413 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
41414@@ -673,7 +673,7 @@ out:
41415 }
41416
41417 /* Put all pages in pages list to correct pool to wait for reuse */
41418-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
41419+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
41420 enum ttm_caching_state cstate)
41421 {
41422 unsigned long irq_flags;
41423@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
41424 struct list_head plist;
41425 struct page *p = NULL;
41426 gfp_t gfp_flags = GFP_USER;
41427- unsigned count;
41428+ unsigned long count;
41429 int r;
41430
41431 /* set zero flag for page allocation if required */
41432diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41433index 01e1d27..aaa018a 100644
41434--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41435+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41436@@ -56,7 +56,7 @@
41437
41438 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41439 #define SMALL_ALLOCATION 4
41440-#define FREE_ALL_PAGES (~0U)
41441+#define FREE_ALL_PAGES (~0UL)
41442 /* times are in msecs */
41443 #define IS_UNDEFINED (0)
41444 #define IS_WC (1<<1)
41445@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
41446 * @nr_free: If set to true will free all pages in pool
41447 * @use_static: Safe to use static buffer
41448 **/
41449-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41450+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
41451 bool use_static)
41452 {
41453 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41454@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41455 struct dma_page *dma_p, *tmp;
41456 struct page **pages_to_free;
41457 struct list_head d_pages;
41458- unsigned freed_pages = 0,
41459- npages_to_free = nr_free;
41460+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41461
41462 if (NUM_PAGES_TO_ALLOC < nr_free)
41463 npages_to_free = NUM_PAGES_TO_ALLOC;
41464@@ -499,7 +498,8 @@ restart:
41465 /* remove range of pages from the pool */
41466 if (freed_pages) {
41467 ttm_pool_update_free_locked(pool, freed_pages);
41468- nr_free -= freed_pages;
41469+ if (likely(nr_free != FREE_ALL_PAGES))
41470+ nr_free -= freed_pages;
41471 }
41472
41473 spin_unlock_irqrestore(&pool->lock, irq_flags);
41474@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
41475 struct dma_page *d_page, *next;
41476 enum pool_type type;
41477 bool is_cached = false;
41478- unsigned count = 0, i, npages = 0;
41479+ unsigned long count = 0, i, npages = 0;
41480 unsigned long irq_flags;
41481
41482 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
41483@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41484 static unsigned start_pool;
41485 unsigned idx = 0;
41486 unsigned pool_offset;
41487- unsigned shrink_pages = sc->nr_to_scan;
41488+ unsigned long shrink_pages = sc->nr_to_scan;
41489 struct device_pools *p;
41490 unsigned long freed = 0;
41491
41492@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41493 goto out;
41494 pool_offset = ++start_pool % _manager->npools;
41495 list_for_each_entry(p, &_manager->pools, pools) {
41496- unsigned nr_free;
41497+ unsigned long nr_free;
41498
41499 if (!p->dev)
41500 continue;
41501@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41502 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
41503 freed += nr_free - shrink_pages;
41504
41505- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
41506+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
41507 p->pool->dev_name, p->pool->name, current->pid,
41508 nr_free, shrink_pages);
41509 }
41510diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41511index 8cbcb45..a4d9cf7 100644
41512--- a/drivers/gpu/drm/udl/udl_fb.c
41513+++ b/drivers/gpu/drm/udl/udl_fb.c
41514@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41515 fb_deferred_io_cleanup(info);
41516 kfree(info->fbdefio);
41517 info->fbdefio = NULL;
41518- info->fbops->fb_mmap = udl_fb_mmap;
41519 }
41520
41521 pr_warn("released /dev/fb%d user=%d count=%d\n",
41522diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41523index ef8c500..01030c8 100644
41524--- a/drivers/gpu/drm/via/via_drv.h
41525+++ b/drivers/gpu/drm/via/via_drv.h
41526@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
41527 typedef uint32_t maskarray_t[5];
41528
41529 typedef struct drm_via_irq {
41530- atomic_t irq_received;
41531+ atomic_unchecked_t irq_received;
41532 uint32_t pending_mask;
41533 uint32_t enable_mask;
41534 wait_queue_head_t irq_queue;
41535@@ -77,7 +77,7 @@ typedef struct drm_via_private {
41536 struct timeval last_vblank;
41537 int last_vblank_valid;
41538 unsigned usec_per_vblank;
41539- atomic_t vbl_received;
41540+ atomic_unchecked_t vbl_received;
41541 drm_via_state_t hc_state;
41542 char pci_buf[VIA_PCI_BUF_SIZE];
41543 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41544diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41545index 1319433..a993b0c 100644
41546--- a/drivers/gpu/drm/via/via_irq.c
41547+++ b/drivers/gpu/drm/via/via_irq.c
41548@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41549 if (crtc != 0)
41550 return 0;
41551
41552- return atomic_read(&dev_priv->vbl_received);
41553+ return atomic_read_unchecked(&dev_priv->vbl_received);
41554 }
41555
41556 irqreturn_t via_driver_irq_handler(int irq, void *arg)
41557@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41558
41559 status = VIA_READ(VIA_REG_INTERRUPT);
41560 if (status & VIA_IRQ_VBLANK_PENDING) {
41561- atomic_inc(&dev_priv->vbl_received);
41562- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41563+ atomic_inc_unchecked(&dev_priv->vbl_received);
41564+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41565 do_gettimeofday(&cur_vblank);
41566 if (dev_priv->last_vblank_valid) {
41567 dev_priv->usec_per_vblank =
41568@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41569 dev_priv->last_vblank = cur_vblank;
41570 dev_priv->last_vblank_valid = 1;
41571 }
41572- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41573+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41574 DRM_DEBUG("US per vblank is: %u\n",
41575 dev_priv->usec_per_vblank);
41576 }
41577@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41578
41579 for (i = 0; i < dev_priv->num_irqs; ++i) {
41580 if (status & cur_irq->pending_mask) {
41581- atomic_inc(&cur_irq->irq_received);
41582+ atomic_inc_unchecked(&cur_irq->irq_received);
41583 wake_up(&cur_irq->irq_queue);
41584 handled = 1;
41585 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41586@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41587 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41588 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41589 masks[irq][4]));
41590- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41591+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41592 } else {
41593 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41594 (((cur_irq_sequence =
41595- atomic_read(&cur_irq->irq_received)) -
41596+ atomic_read_unchecked(&cur_irq->irq_received)) -
41597 *sequence) <= (1 << 23)));
41598 }
41599 *sequence = cur_irq_sequence;
41600@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41601 }
41602
41603 for (i = 0; i < dev_priv->num_irqs; ++i) {
41604- atomic_set(&cur_irq->irq_received, 0);
41605+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41606 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41607 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41608 init_waitqueue_head(&cur_irq->irq_queue);
41609@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41610 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41611 case VIA_IRQ_RELATIVE:
41612 irqwait->request.sequence +=
41613- atomic_read(&cur_irq->irq_received);
41614+ atomic_read_unchecked(&cur_irq->irq_received);
41615 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41616 case VIA_IRQ_ABSOLUTE:
41617 break;
41618diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41619index d26a6da..5fa41ed 100644
41620--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41621+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41622@@ -447,7 +447,7 @@ struct vmw_private {
41623 * Fencing and IRQs.
41624 */
41625
41626- atomic_t marker_seq;
41627+ atomic_unchecked_t marker_seq;
41628 wait_queue_head_t fence_queue;
41629 wait_queue_head_t fifo_queue;
41630 spinlock_t waiter_lock;
41631diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41632index 39f2b03..d1b0a64 100644
41633--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41634+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41635@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41636 (unsigned int) min,
41637 (unsigned int) fifo->capabilities);
41638
41639- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41640+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41641 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41642 vmw_marker_queue_init(&fifo->marker_queue);
41643 return vmw_fifo_send_fence(dev_priv, &dummy);
41644@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41645 if (reserveable)
41646 iowrite32(bytes, fifo_mem +
41647 SVGA_FIFO_RESERVED);
41648- return fifo_mem + (next_cmd >> 2);
41649+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41650 } else {
41651 need_bounce = true;
41652 }
41653@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41654
41655 fm = vmw_fifo_reserve(dev_priv, bytes);
41656 if (unlikely(fm == NULL)) {
41657- *seqno = atomic_read(&dev_priv->marker_seq);
41658+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41659 ret = -ENOMEM;
41660 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41661 false, 3*HZ);
41662@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41663 }
41664
41665 do {
41666- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41667+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41668 } while (*seqno == 0);
41669
41670 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41671diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41672index 170b61b..fec7348 100644
41673--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41674+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41675@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41676 }
41677
41678 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41679- vmw_gmrid_man_init,
41680- vmw_gmrid_man_takedown,
41681- vmw_gmrid_man_get_node,
41682- vmw_gmrid_man_put_node,
41683- vmw_gmrid_man_debug
41684+ .init = vmw_gmrid_man_init,
41685+ .takedown = vmw_gmrid_man_takedown,
41686+ .get_node = vmw_gmrid_man_get_node,
41687+ .put_node = vmw_gmrid_man_put_node,
41688+ .debug = vmw_gmrid_man_debug
41689 };
41690diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41691index 69c8ce2..cacb0ab 100644
41692--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41693+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41694@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41695 int ret;
41696
41697 num_clips = arg->num_clips;
41698- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41699+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41700
41701 if (unlikely(num_clips == 0))
41702 return 0;
41703@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41704 int ret;
41705
41706 num_clips = arg->num_clips;
41707- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41708+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41709
41710 if (unlikely(num_clips == 0))
41711 return 0;
41712diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41713index 9fe9827..0aa2fc0 100644
41714--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41715+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41716@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41717 * emitted. Then the fence is stale and signaled.
41718 */
41719
41720- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41721+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41722 > VMW_FENCE_WRAP);
41723
41724 return ret;
41725@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
41726
41727 if (fifo_idle)
41728 down_read(&fifo_state->rwsem);
41729- signal_seq = atomic_read(&dev_priv->marker_seq);
41730+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
41731 ret = 0;
41732
41733 for (;;) {
41734diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41735index efd1ffd..0ae13ca 100644
41736--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41737+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41738@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
41739 while (!vmw_lag_lt(queue, us)) {
41740 spin_lock(&queue->lock);
41741 if (list_empty(&queue->head))
41742- seqno = atomic_read(&dev_priv->marker_seq);
41743+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41744 else {
41745 marker = list_first_entry(&queue->head,
41746 struct vmw_marker, head);
41747diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
41748index 37ac7b5..d52a5c9 100644
41749--- a/drivers/gpu/vga/vga_switcheroo.c
41750+++ b/drivers/gpu/vga/vga_switcheroo.c
41751@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
41752
41753 /* this version is for the case where the power switch is separate
41754 to the device being powered down. */
41755-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
41756+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
41757 {
41758 /* copy over all the bus versions */
41759 if (dev->bus && dev->bus->pm) {
41760@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
41761 return ret;
41762 }
41763
41764-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
41765+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
41766 {
41767 /* copy over all the bus versions */
41768 if (dev->bus && dev->bus->pm) {
41769diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
41770index 8b63879..a5a5e72 100644
41771--- a/drivers/hid/hid-core.c
41772+++ b/drivers/hid/hid-core.c
41773@@ -2508,7 +2508,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
41774
41775 int hid_add_device(struct hid_device *hdev)
41776 {
41777- static atomic_t id = ATOMIC_INIT(0);
41778+ static atomic_unchecked_t id = ATOMIC_INIT(0);
41779 int ret;
41780
41781 if (WARN_ON(hdev->status & HID_STAT_ADDED))
41782@@ -2551,7 +2551,7 @@ int hid_add_device(struct hid_device *hdev)
41783 /* XXX hack, any other cleaner solution after the driver core
41784 * is converted to allow more than 20 bytes as the device name? */
41785 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
41786- hdev->vendor, hdev->product, atomic_inc_return(&id));
41787+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
41788
41789 hid_debug_register(hdev, dev_name(&hdev->dev));
41790 ret = device_add(&hdev->dev);
41791diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
41792index 5bc6d80..e47b55a 100644
41793--- a/drivers/hid/hid-logitech-dj.c
41794+++ b/drivers/hid/hid-logitech-dj.c
41795@@ -853,6 +853,12 @@ static int logi_dj_dj_event(struct hid_device *hdev,
41796 * case we forward it to the correct hid device (via hid_input_report()
41797 * ) and return 1 so hid-core does not anything else with it.
41798 */
41799+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41800+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41801+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
41802+ __func__, dj_report->device_index);
41803+ return false;
41804+ }
41805
41806 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41807 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41808diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
41809index c13fb5b..55a3802 100644
41810--- a/drivers/hid/hid-wiimote-debug.c
41811+++ b/drivers/hid/hid-wiimote-debug.c
41812@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
41813 else if (size == 0)
41814 return -EIO;
41815
41816- if (copy_to_user(u, buf, size))
41817+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
41818 return -EFAULT;
41819
41820 *off += size;
41821diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
41822index 433f72a..2926005 100644
41823--- a/drivers/hv/channel.c
41824+++ b/drivers/hv/channel.c
41825@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
41826 unsigned long flags;
41827 int ret = 0;
41828
41829- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
41830- atomic_inc(&vmbus_connection.next_gpadl_handle);
41831+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
41832+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
41833
41834 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
41835 if (ret)
41836diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
41837index 3e4235c..877d0e5 100644
41838--- a/drivers/hv/hv.c
41839+++ b/drivers/hv/hv.c
41840@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
41841 u64 output_address = (output) ? virt_to_phys(output) : 0;
41842 u32 output_address_hi = output_address >> 32;
41843 u32 output_address_lo = output_address & 0xFFFFFFFF;
41844- void *hypercall_page = hv_context.hypercall_page;
41845+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
41846
41847 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
41848 "=a"(hv_status_lo) : "d" (control_hi),
41849@@ -156,7 +156,7 @@ int hv_init(void)
41850 /* See if the hypercall page is already set */
41851 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
41852
41853- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
41854+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
41855
41856 if (!virtaddr)
41857 goto cleanup;
41858diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
41859index b958ded..b2452bb 100644
41860--- a/drivers/hv/hv_balloon.c
41861+++ b/drivers/hv/hv_balloon.c
41862@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
41863
41864 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
41865 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
41866-static atomic_t trans_id = ATOMIC_INIT(0);
41867+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
41868
41869 static int dm_ring_size = (5 * PAGE_SIZE);
41870
41871@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
41872 pr_info("Memory hot add failed\n");
41873
41874 dm->state = DM_INITIALIZED;
41875- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41876+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41877 vmbus_sendpacket(dm->dev->channel, &resp,
41878 sizeof(struct dm_hot_add_response),
41879 (unsigned long)NULL,
41880@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
41881 memset(&status, 0, sizeof(struct dm_status));
41882 status.hdr.type = DM_STATUS_REPORT;
41883 status.hdr.size = sizeof(struct dm_status);
41884- status.hdr.trans_id = atomic_inc_return(&trans_id);
41885+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41886
41887 /*
41888 * The host expects the guest to report free memory.
41889@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
41890 * send the status. This can happen if we were interrupted
41891 * after we picked our transaction ID.
41892 */
41893- if (status.hdr.trans_id != atomic_read(&trans_id))
41894+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
41895 return;
41896
41897 /*
41898@@ -1133,7 +1133,7 @@ static void balloon_up(struct work_struct *dummy)
41899 */
41900
41901 do {
41902- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
41903+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41904 ret = vmbus_sendpacket(dm_device.dev->channel,
41905 bl_resp,
41906 bl_resp->hdr.size,
41907@@ -1179,7 +1179,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
41908
41909 memset(&resp, 0, sizeof(struct dm_unballoon_response));
41910 resp.hdr.type = DM_UNBALLOON_RESPONSE;
41911- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41912+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41913 resp.hdr.size = sizeof(struct dm_unballoon_response);
41914
41915 vmbus_sendpacket(dm_device.dev->channel, &resp,
41916@@ -1243,7 +1243,7 @@ static void version_resp(struct hv_dynmem_device *dm,
41917 memset(&version_req, 0, sizeof(struct dm_version_request));
41918 version_req.hdr.type = DM_VERSION_REQUEST;
41919 version_req.hdr.size = sizeof(struct dm_version_request);
41920- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41921+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41922 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
41923 version_req.is_last_attempt = 1;
41924
41925@@ -1413,7 +1413,7 @@ static int balloon_probe(struct hv_device *dev,
41926 memset(&version_req, 0, sizeof(struct dm_version_request));
41927 version_req.hdr.type = DM_VERSION_REQUEST;
41928 version_req.hdr.size = sizeof(struct dm_version_request);
41929- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41930+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41931 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
41932 version_req.is_last_attempt = 0;
41933
41934@@ -1444,7 +1444,7 @@ static int balloon_probe(struct hv_device *dev,
41935 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
41936 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
41937 cap_msg.hdr.size = sizeof(struct dm_capabilities);
41938- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
41939+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41940
41941 cap_msg.caps.cap_bits.balloon = 1;
41942 cap_msg.caps.cap_bits.hot_add = 1;
41943diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
41944index c386d8d..d6004c4 100644
41945--- a/drivers/hv/hyperv_vmbus.h
41946+++ b/drivers/hv/hyperv_vmbus.h
41947@@ -611,7 +611,7 @@ enum vmbus_connect_state {
41948 struct vmbus_connection {
41949 enum vmbus_connect_state conn_state;
41950
41951- atomic_t next_gpadl_handle;
41952+ atomic_unchecked_t next_gpadl_handle;
41953
41954 /*
41955 * Represents channel interrupts. Each bit position represents a
41956diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
41957index 4d6b269..2e23b86 100644
41958--- a/drivers/hv/vmbus_drv.c
41959+++ b/drivers/hv/vmbus_drv.c
41960@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
41961 {
41962 int ret = 0;
41963
41964- static atomic_t device_num = ATOMIC_INIT(0);
41965+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41966
41967 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
41968- atomic_inc_return(&device_num));
41969+ atomic_inc_return_unchecked(&device_num));
41970
41971 child_device_obj->device.bus = &hv_bus;
41972 child_device_obj->device.parent = &hv_acpi_dev->dev;
41973diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
41974index 579bdf9..75118b5 100644
41975--- a/drivers/hwmon/acpi_power_meter.c
41976+++ b/drivers/hwmon/acpi_power_meter.c
41977@@ -116,7 +116,7 @@ struct sensor_template {
41978 struct device_attribute *devattr,
41979 const char *buf, size_t count);
41980 int index;
41981-};
41982+} __do_const;
41983
41984 /* Averaging interval */
41985 static int update_avg_interval(struct acpi_power_meter_resource *resource)
41986@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
41987 struct sensor_template *attrs)
41988 {
41989 struct device *dev = &resource->acpi_dev->dev;
41990- struct sensor_device_attribute *sensors =
41991+ sensor_device_attribute_no_const *sensors =
41992 &resource->sensors[resource->num_sensors];
41993 int res = 0;
41994
41995diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
41996index 0af63da..05a183a 100644
41997--- a/drivers/hwmon/applesmc.c
41998+++ b/drivers/hwmon/applesmc.c
41999@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42000 {
42001 struct applesmc_node_group *grp;
42002 struct applesmc_dev_attr *node;
42003- struct attribute *attr;
42004+ attribute_no_const *attr;
42005 int ret, i;
42006
42007 for (grp = groups; grp->format; grp++) {
42008diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42009index cccef87..06ce8ec 100644
42010--- a/drivers/hwmon/asus_atk0110.c
42011+++ b/drivers/hwmon/asus_atk0110.c
42012@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42013 struct atk_sensor_data {
42014 struct list_head list;
42015 struct atk_data *data;
42016- struct device_attribute label_attr;
42017- struct device_attribute input_attr;
42018- struct device_attribute limit1_attr;
42019- struct device_attribute limit2_attr;
42020+ device_attribute_no_const label_attr;
42021+ device_attribute_no_const input_attr;
42022+ device_attribute_no_const limit1_attr;
42023+ device_attribute_no_const limit2_attr;
42024 char label_attr_name[ATTR_NAME_SIZE];
42025 char input_attr_name[ATTR_NAME_SIZE];
42026 char limit1_attr_name[ATTR_NAME_SIZE];
42027@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42028 static struct device_attribute atk_name_attr =
42029 __ATTR(name, 0444, atk_name_show, NULL);
42030
42031-static void atk_init_attribute(struct device_attribute *attr, char *name,
42032+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42033 sysfs_show_func show)
42034 {
42035 sysfs_attr_init(&attr->attr);
42036diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42037index 5b7fec8..05c957a 100644
42038--- a/drivers/hwmon/coretemp.c
42039+++ b/drivers/hwmon/coretemp.c
42040@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42041 return NOTIFY_OK;
42042 }
42043
42044-static struct notifier_block coretemp_cpu_notifier __refdata = {
42045+static struct notifier_block coretemp_cpu_notifier = {
42046 .notifier_call = coretemp_cpu_callback,
42047 };
42048
42049diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42050index 7a8a6fb..015c1fd 100644
42051--- a/drivers/hwmon/ibmaem.c
42052+++ b/drivers/hwmon/ibmaem.c
42053@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42054 struct aem_rw_sensor_template *rw)
42055 {
42056 struct device *dev = &data->pdev->dev;
42057- struct sensor_device_attribute *sensors = data->sensors;
42058+ sensor_device_attribute_no_const *sensors = data->sensors;
42059 int err;
42060
42061 /* Set up read-only sensors */
42062diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42063index 17ae2eb..21b71dd 100644
42064--- a/drivers/hwmon/iio_hwmon.c
42065+++ b/drivers/hwmon/iio_hwmon.c
42066@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42067 {
42068 struct device *dev = &pdev->dev;
42069 struct iio_hwmon_state *st;
42070- struct sensor_device_attribute *a;
42071+ sensor_device_attribute_no_const *a;
42072 int ret, i;
42073 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42074 enum iio_chan_type type;
42075diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42076index f3830db..9f4d6d5 100644
42077--- a/drivers/hwmon/nct6683.c
42078+++ b/drivers/hwmon/nct6683.c
42079@@ -397,11 +397,11 @@ static struct attribute_group *
42080 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42081 int repeat)
42082 {
42083- struct sensor_device_attribute_2 *a2;
42084- struct sensor_device_attribute *a;
42085+ sensor_device_attribute_2_no_const *a2;
42086+ sensor_device_attribute_no_const *a;
42087 struct sensor_device_template **t;
42088 struct sensor_device_attr_u *su;
42089- struct attribute_group *group;
42090+ attribute_group_no_const *group;
42091 struct attribute **attrs;
42092 int i, j, count;
42093
42094diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42095index 1be4117..88ae1e1 100644
42096--- a/drivers/hwmon/nct6775.c
42097+++ b/drivers/hwmon/nct6775.c
42098@@ -952,10 +952,10 @@ static struct attribute_group *
42099 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42100 int repeat)
42101 {
42102- struct attribute_group *group;
42103+ attribute_group_no_const *group;
42104 struct sensor_device_attr_u *su;
42105- struct sensor_device_attribute *a;
42106- struct sensor_device_attribute_2 *a2;
42107+ sensor_device_attribute_no_const *a;
42108+ sensor_device_attribute_2_no_const *a2;
42109 struct attribute **attrs;
42110 struct sensor_device_template **t;
42111 int i, count;
42112diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42113index f2e47c7..45d7941 100644
42114--- a/drivers/hwmon/pmbus/pmbus_core.c
42115+++ b/drivers/hwmon/pmbus/pmbus_core.c
42116@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42117 return 0;
42118 }
42119
42120-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42121+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42122 const char *name,
42123 umode_t mode,
42124 ssize_t (*show)(struct device *dev,
42125@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42126 dev_attr->store = store;
42127 }
42128
42129-static void pmbus_attr_init(struct sensor_device_attribute *a,
42130+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42131 const char *name,
42132 umode_t mode,
42133 ssize_t (*show)(struct device *dev,
42134@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42135 u16 reg, u8 mask)
42136 {
42137 struct pmbus_boolean *boolean;
42138- struct sensor_device_attribute *a;
42139+ sensor_device_attribute_no_const *a;
42140
42141 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42142 if (!boolean)
42143@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42144 bool update, bool readonly)
42145 {
42146 struct pmbus_sensor *sensor;
42147- struct device_attribute *a;
42148+ device_attribute_no_const *a;
42149
42150 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42151 if (!sensor)
42152@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42153 const char *lstring, int index)
42154 {
42155 struct pmbus_label *label;
42156- struct device_attribute *a;
42157+ device_attribute_no_const *a;
42158
42159 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42160 if (!label)
42161diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42162index d4f0935..7420593 100644
42163--- a/drivers/hwmon/sht15.c
42164+++ b/drivers/hwmon/sht15.c
42165@@ -169,7 +169,7 @@ struct sht15_data {
42166 int supply_uv;
42167 bool supply_uv_valid;
42168 struct work_struct update_supply_work;
42169- atomic_t interrupt_handled;
42170+ atomic_unchecked_t interrupt_handled;
42171 };
42172
42173 /**
42174@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42175 ret = gpio_direction_input(data->pdata->gpio_data);
42176 if (ret)
42177 return ret;
42178- atomic_set(&data->interrupt_handled, 0);
42179+ atomic_set_unchecked(&data->interrupt_handled, 0);
42180
42181 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42182 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42183 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42184 /* Only relevant if the interrupt hasn't occurred. */
42185- if (!atomic_read(&data->interrupt_handled))
42186+ if (!atomic_read_unchecked(&data->interrupt_handled))
42187 schedule_work(&data->read_work);
42188 }
42189 ret = wait_event_timeout(data->wait_queue,
42190@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42191
42192 /* First disable the interrupt */
42193 disable_irq_nosync(irq);
42194- atomic_inc(&data->interrupt_handled);
42195+ atomic_inc_unchecked(&data->interrupt_handled);
42196 /* Then schedule a reading work struct */
42197 if (data->state != SHT15_READING_NOTHING)
42198 schedule_work(&data->read_work);
42199@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42200 * If not, then start the interrupt again - care here as could
42201 * have gone low in meantime so verify it hasn't!
42202 */
42203- atomic_set(&data->interrupt_handled, 0);
42204+ atomic_set_unchecked(&data->interrupt_handled, 0);
42205 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42206 /* If still not occurred or another handler was scheduled */
42207 if (gpio_get_value(data->pdata->gpio_data)
42208- || atomic_read(&data->interrupt_handled))
42209+ || atomic_read_unchecked(&data->interrupt_handled))
42210 return;
42211 }
42212
42213diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42214index ac91c07..8e69663 100644
42215--- a/drivers/hwmon/via-cputemp.c
42216+++ b/drivers/hwmon/via-cputemp.c
42217@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42218 return NOTIFY_OK;
42219 }
42220
42221-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42222+static struct notifier_block via_cputemp_cpu_notifier = {
42223 .notifier_call = via_cputemp_cpu_callback,
42224 };
42225
42226diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42227index 65e3240..e6c511d 100644
42228--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42229+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42230@@ -39,7 +39,7 @@
42231 extern struct i2c_adapter amd756_smbus;
42232
42233 static struct i2c_adapter *s4882_adapter;
42234-static struct i2c_algorithm *s4882_algo;
42235+static i2c_algorithm_no_const *s4882_algo;
42236
42237 /* Wrapper access functions for multiplexed SMBus */
42238 static DEFINE_MUTEX(amd756_lock);
42239diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42240index b19a310..d6eece0 100644
42241--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42242+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42243@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42244 /* usb layer */
42245
42246 /* Send command to device, and get response. */
42247-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42248+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42249 {
42250 int ret = 0;
42251 int actual;
42252diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42253index 88eda09..cf40434 100644
42254--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42255+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42256@@ -37,7 +37,7 @@
42257 extern struct i2c_adapter *nforce2_smbus;
42258
42259 static struct i2c_adapter *s4985_adapter;
42260-static struct i2c_algorithm *s4985_algo;
42261+static i2c_algorithm_no_const *s4985_algo;
42262
42263 /* Wrapper access functions for multiplexed SMBus */
42264 static DEFINE_MUTEX(nforce2_lock);
42265diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42266index 71c7a39..71dd3e0 100644
42267--- a/drivers/i2c/i2c-dev.c
42268+++ b/drivers/i2c/i2c-dev.c
42269@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42270 break;
42271 }
42272
42273- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42274+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42275 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42276 if (IS_ERR(rdwr_pa[i].buf)) {
42277 res = PTR_ERR(rdwr_pa[i].buf);
42278diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42279index 0b510ba..4fbb5085 100644
42280--- a/drivers/ide/ide-cd.c
42281+++ b/drivers/ide/ide-cd.c
42282@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42283 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42284 if ((unsigned long)buf & alignment
42285 || blk_rq_bytes(rq) & q->dma_pad_mask
42286- || object_is_on_stack(buf))
42287+ || object_starts_on_stack(buf))
42288 drive->dma = 0;
42289 }
42290 }
42291diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42292index af3e76d..96dfe5e 100644
42293--- a/drivers/iio/industrialio-core.c
42294+++ b/drivers/iio/industrialio-core.c
42295@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42296 }
42297
42298 static
42299-int __iio_device_attr_init(struct device_attribute *dev_attr,
42300+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42301 const char *postfix,
42302 struct iio_chan_spec const *chan,
42303 ssize_t (*readfunc)(struct device *dev,
42304diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42305index e28a494..f7c2671 100644
42306--- a/drivers/infiniband/core/cm.c
42307+++ b/drivers/infiniband/core/cm.c
42308@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42309
42310 struct cm_counter_group {
42311 struct kobject obj;
42312- atomic_long_t counter[CM_ATTR_COUNT];
42313+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42314 };
42315
42316 struct cm_counter_attribute {
42317@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42318 struct ib_mad_send_buf *msg = NULL;
42319 int ret;
42320
42321- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42322+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42323 counter[CM_REQ_COUNTER]);
42324
42325 /* Quick state check to discard duplicate REQs. */
42326@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42327 if (!cm_id_priv)
42328 return;
42329
42330- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42331+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42332 counter[CM_REP_COUNTER]);
42333 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42334 if (ret)
42335@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42336 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42337 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42338 spin_unlock_irq(&cm_id_priv->lock);
42339- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42340+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42341 counter[CM_RTU_COUNTER]);
42342 goto out;
42343 }
42344@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42345 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42346 dreq_msg->local_comm_id);
42347 if (!cm_id_priv) {
42348- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42349+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42350 counter[CM_DREQ_COUNTER]);
42351 cm_issue_drep(work->port, work->mad_recv_wc);
42352 return -EINVAL;
42353@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42354 case IB_CM_MRA_REP_RCVD:
42355 break;
42356 case IB_CM_TIMEWAIT:
42357- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42358+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42359 counter[CM_DREQ_COUNTER]);
42360 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42361 goto unlock;
42362@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42363 cm_free_msg(msg);
42364 goto deref;
42365 case IB_CM_DREQ_RCVD:
42366- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42367+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42368 counter[CM_DREQ_COUNTER]);
42369 goto unlock;
42370 default:
42371@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42372 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42373 cm_id_priv->msg, timeout)) {
42374 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42375- atomic_long_inc(&work->port->
42376+ atomic_long_inc_unchecked(&work->port->
42377 counter_group[CM_RECV_DUPLICATES].
42378 counter[CM_MRA_COUNTER]);
42379 goto out;
42380@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42381 break;
42382 case IB_CM_MRA_REQ_RCVD:
42383 case IB_CM_MRA_REP_RCVD:
42384- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42385+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42386 counter[CM_MRA_COUNTER]);
42387 /* fall through */
42388 default:
42389@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42390 case IB_CM_LAP_IDLE:
42391 break;
42392 case IB_CM_MRA_LAP_SENT:
42393- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42394+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42395 counter[CM_LAP_COUNTER]);
42396 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42397 goto unlock;
42398@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
42399 cm_free_msg(msg);
42400 goto deref;
42401 case IB_CM_LAP_RCVD:
42402- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42403+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42404 counter[CM_LAP_COUNTER]);
42405 goto unlock;
42406 default:
42407@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42408 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42409 if (cur_cm_id_priv) {
42410 spin_unlock_irq(&cm.lock);
42411- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42412+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42413 counter[CM_SIDR_REQ_COUNTER]);
42414 goto out; /* Duplicate message. */
42415 }
42416@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42417 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42418 msg->retries = 1;
42419
42420- atomic_long_add(1 + msg->retries,
42421+ atomic_long_add_unchecked(1 + msg->retries,
42422 &port->counter_group[CM_XMIT].counter[attr_index]);
42423 if (msg->retries)
42424- atomic_long_add(msg->retries,
42425+ atomic_long_add_unchecked(msg->retries,
42426 &port->counter_group[CM_XMIT_RETRIES].
42427 counter[attr_index]);
42428
42429@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42430 }
42431
42432 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42433- atomic_long_inc(&port->counter_group[CM_RECV].
42434+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42435 counter[attr_id - CM_ATTR_ID_OFFSET]);
42436
42437 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42438@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42439 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42440
42441 return sprintf(buf, "%ld\n",
42442- atomic_long_read(&group->counter[cm_attr->index]));
42443+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42444 }
42445
42446 static const struct sysfs_ops cm_counter_ops = {
42447diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42448index 9f5ad7c..588cd84 100644
42449--- a/drivers/infiniband/core/fmr_pool.c
42450+++ b/drivers/infiniband/core/fmr_pool.c
42451@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42452
42453 struct task_struct *thread;
42454
42455- atomic_t req_ser;
42456- atomic_t flush_ser;
42457+ atomic_unchecked_t req_ser;
42458+ atomic_unchecked_t flush_ser;
42459
42460 wait_queue_head_t force_wait;
42461 };
42462@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42463 struct ib_fmr_pool *pool = pool_ptr;
42464
42465 do {
42466- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42467+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42468 ib_fmr_batch_release(pool);
42469
42470- atomic_inc(&pool->flush_ser);
42471+ atomic_inc_unchecked(&pool->flush_ser);
42472 wake_up_interruptible(&pool->force_wait);
42473
42474 if (pool->flush_function)
42475@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42476 }
42477
42478 set_current_state(TASK_INTERRUPTIBLE);
42479- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42480+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42481 !kthread_should_stop())
42482 schedule();
42483 __set_current_state(TASK_RUNNING);
42484@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42485 pool->dirty_watermark = params->dirty_watermark;
42486 pool->dirty_len = 0;
42487 spin_lock_init(&pool->pool_lock);
42488- atomic_set(&pool->req_ser, 0);
42489- atomic_set(&pool->flush_ser, 0);
42490+ atomic_set_unchecked(&pool->req_ser, 0);
42491+ atomic_set_unchecked(&pool->flush_ser, 0);
42492 init_waitqueue_head(&pool->force_wait);
42493
42494 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42495@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42496 }
42497 spin_unlock_irq(&pool->pool_lock);
42498
42499- serial = atomic_inc_return(&pool->req_ser);
42500+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42501 wake_up_process(pool->thread);
42502
42503 if (wait_event_interruptible(pool->force_wait,
42504- atomic_read(&pool->flush_ser) - serial >= 0))
42505+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42506 return -EINTR;
42507
42508 return 0;
42509@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42510 } else {
42511 list_add_tail(&fmr->list, &pool->dirty_list);
42512 if (++pool->dirty_len >= pool->dirty_watermark) {
42513- atomic_inc(&pool->req_ser);
42514+ atomic_inc_unchecked(&pool->req_ser);
42515 wake_up_process(pool->thread);
42516 }
42517 }
42518diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
42519index aec7a6a..8c014b5 100644
42520--- a/drivers/infiniband/core/umem.c
42521+++ b/drivers/infiniband/core/umem.c
42522@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
42523 if (dmasync)
42524 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
42525
42526+ /*
42527+ * If the combination of the addr and size requested for this memory
42528+ * region causes an integer overflow, return error.
42529+ */
42530+ if ((PAGE_ALIGN(addr + size) <= size) ||
42531+ (PAGE_ALIGN(addr + size) <= addr))
42532+ return ERR_PTR(-EINVAL);
42533+
42534 if (!can_do_mlock())
42535 return ERR_PTR(-EPERM);
42536
42537diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42538index cb43c22..2e12dd7 100644
42539--- a/drivers/infiniband/hw/cxgb4/mem.c
42540+++ b/drivers/infiniband/hw/cxgb4/mem.c
42541@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42542 int err;
42543 struct fw_ri_tpte tpt;
42544 u32 stag_idx;
42545- static atomic_t key;
42546+ static atomic_unchecked_t key;
42547
42548 if (c4iw_fatal_error(rdev))
42549 return -EIO;
42550@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42551 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42552 rdev->stats.stag.max = rdev->stats.stag.cur;
42553 mutex_unlock(&rdev->stats.lock);
42554- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42555+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42556 }
42557 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42558 __func__, stag_state, type, pdid, stag_idx);
42559diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42560index 79b3dbc..96e5fcc 100644
42561--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42562+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42563@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42564 struct ib_atomic_eth *ateth;
42565 struct ipath_ack_entry *e;
42566 u64 vaddr;
42567- atomic64_t *maddr;
42568+ atomic64_unchecked_t *maddr;
42569 u64 sdata;
42570 u32 rkey;
42571 u8 next;
42572@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42573 IB_ACCESS_REMOTE_ATOMIC)))
42574 goto nack_acc_unlck;
42575 /* Perform atomic OP and save result. */
42576- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42577+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42578 sdata = be64_to_cpu(ateth->swap_data);
42579 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42580 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42581- (u64) atomic64_add_return(sdata, maddr) - sdata :
42582+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42583 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42584 be64_to_cpu(ateth->compare_data),
42585 sdata);
42586diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42587index 1f95bba..9530f87 100644
42588--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42589+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42590@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42591 unsigned long flags;
42592 struct ib_wc wc;
42593 u64 sdata;
42594- atomic64_t *maddr;
42595+ atomic64_unchecked_t *maddr;
42596 enum ib_wc_status send_status;
42597
42598 /*
42599@@ -382,11 +382,11 @@ again:
42600 IB_ACCESS_REMOTE_ATOMIC)))
42601 goto acc_err;
42602 /* Perform atomic OP and save result. */
42603- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42604+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42605 sdata = wqe->wr.wr.atomic.compare_add;
42606 *(u64 *) sqp->s_sge.sge.vaddr =
42607 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42608- (u64) atomic64_add_return(sdata, maddr) - sdata :
42609+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42610 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42611 sdata, wqe->wr.wr.atomic.swap);
42612 goto send_comp;
42613diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42614index 82a7dd8..8fb6ba6 100644
42615--- a/drivers/infiniband/hw/mlx4/mad.c
42616+++ b/drivers/infiniband/hw/mlx4/mad.c
42617@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42618
42619 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42620 {
42621- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42622+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42623 cpu_to_be64(0xff00000000000000LL);
42624 }
42625
42626diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42627index ed327e6..ca1739e0 100644
42628--- a/drivers/infiniband/hw/mlx4/mcg.c
42629+++ b/drivers/infiniband/hw/mlx4/mcg.c
42630@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42631 {
42632 char name[20];
42633
42634- atomic_set(&ctx->tid, 0);
42635+ atomic_set_unchecked(&ctx->tid, 0);
42636 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42637 ctx->mcg_wq = create_singlethread_workqueue(name);
42638 if (!ctx->mcg_wq)
42639diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42640index 6eb743f..a7b0f6d 100644
42641--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42642+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42643@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
42644 struct list_head mcg_mgid0_list;
42645 struct workqueue_struct *mcg_wq;
42646 struct mlx4_ib_demux_pv_ctx **tun;
42647- atomic_t tid;
42648+ atomic_unchecked_t tid;
42649 int flushing; /* flushing the work queue */
42650 };
42651
42652diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42653index 9d3e5c1..6f166df 100644
42654--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42655+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42656@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42657 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42658 }
42659
42660-int mthca_QUERY_FW(struct mthca_dev *dev)
42661+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42662 {
42663 struct mthca_mailbox *mailbox;
42664 u32 *outbox;
42665@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42666 CMD_TIME_CLASS_B);
42667 }
42668
42669-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42670+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42671 int num_mtt)
42672 {
42673 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42674@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42675 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42676 }
42677
42678-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42679+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42680 int eq_num)
42681 {
42682 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42683@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42684 CMD_TIME_CLASS_B);
42685 }
42686
42687-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42688+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42689 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42690 void *in_mad, void *response_mad)
42691 {
42692diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42693index ded76c1..0cf0a08 100644
42694--- a/drivers/infiniband/hw/mthca/mthca_main.c
42695+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42696@@ -692,7 +692,7 @@ err_close:
42697 return err;
42698 }
42699
42700-static int mthca_setup_hca(struct mthca_dev *dev)
42701+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42702 {
42703 int err;
42704
42705diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42706index ed9a989..6aa5dc2 100644
42707--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42708+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42709@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42710 * through the bitmaps)
42711 */
42712
42713-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42714+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42715 {
42716 int o;
42717 int m;
42718@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42719 return key;
42720 }
42721
42722-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42723+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42724 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42725 {
42726 struct mthca_mailbox *mailbox;
42727@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42728 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42729 }
42730
42731-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42732+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42733 u64 *buffer_list, int buffer_size_shift,
42734 int list_len, u64 iova, u64 total_size,
42735 u32 access, struct mthca_mr *mr)
42736diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42737index 415f8e1..e34214e 100644
42738--- a/drivers/infiniband/hw/mthca/mthca_provider.c
42739+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
42740@@ -764,7 +764,7 @@ unlock:
42741 return 0;
42742 }
42743
42744-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42745+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42746 {
42747 struct mthca_dev *dev = to_mdev(ibcq->device);
42748 struct mthca_cq *cq = to_mcq(ibcq);
42749diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
42750index 3b2a6dc..bce26ff 100644
42751--- a/drivers/infiniband/hw/nes/nes.c
42752+++ b/drivers/infiniband/hw/nes/nes.c
42753@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
42754 LIST_HEAD(nes_adapter_list);
42755 static LIST_HEAD(nes_dev_list);
42756
42757-atomic_t qps_destroyed;
42758+atomic_unchecked_t qps_destroyed;
42759
42760 static unsigned int ee_flsh_adapter;
42761 static unsigned int sysfs_nonidx_addr;
42762@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
42763 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
42764 struct nes_adapter *nesadapter = nesdev->nesadapter;
42765
42766- atomic_inc(&qps_destroyed);
42767+ atomic_inc_unchecked(&qps_destroyed);
42768
42769 /* Free the control structures */
42770
42771diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
42772index bd9d132..70d84f4 100644
42773--- a/drivers/infiniband/hw/nes/nes.h
42774+++ b/drivers/infiniband/hw/nes/nes.h
42775@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
42776 extern unsigned int wqm_quanta;
42777 extern struct list_head nes_adapter_list;
42778
42779-extern atomic_t cm_connects;
42780-extern atomic_t cm_accepts;
42781-extern atomic_t cm_disconnects;
42782-extern atomic_t cm_closes;
42783-extern atomic_t cm_connecteds;
42784-extern atomic_t cm_connect_reqs;
42785-extern atomic_t cm_rejects;
42786-extern atomic_t mod_qp_timouts;
42787-extern atomic_t qps_created;
42788-extern atomic_t qps_destroyed;
42789-extern atomic_t sw_qps_destroyed;
42790+extern atomic_unchecked_t cm_connects;
42791+extern atomic_unchecked_t cm_accepts;
42792+extern atomic_unchecked_t cm_disconnects;
42793+extern atomic_unchecked_t cm_closes;
42794+extern atomic_unchecked_t cm_connecteds;
42795+extern atomic_unchecked_t cm_connect_reqs;
42796+extern atomic_unchecked_t cm_rejects;
42797+extern atomic_unchecked_t mod_qp_timouts;
42798+extern atomic_unchecked_t qps_created;
42799+extern atomic_unchecked_t qps_destroyed;
42800+extern atomic_unchecked_t sw_qps_destroyed;
42801 extern u32 mh_detected;
42802 extern u32 mh_pauses_sent;
42803 extern u32 cm_packets_sent;
42804@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
42805 extern u32 cm_packets_received;
42806 extern u32 cm_packets_dropped;
42807 extern u32 cm_packets_retrans;
42808-extern atomic_t cm_listens_created;
42809-extern atomic_t cm_listens_destroyed;
42810+extern atomic_unchecked_t cm_listens_created;
42811+extern atomic_unchecked_t cm_listens_destroyed;
42812 extern u32 cm_backlog_drops;
42813-extern atomic_t cm_loopbacks;
42814-extern atomic_t cm_nodes_created;
42815-extern atomic_t cm_nodes_destroyed;
42816-extern atomic_t cm_accel_dropped_pkts;
42817-extern atomic_t cm_resets_recvd;
42818-extern atomic_t pau_qps_created;
42819-extern atomic_t pau_qps_destroyed;
42820+extern atomic_unchecked_t cm_loopbacks;
42821+extern atomic_unchecked_t cm_nodes_created;
42822+extern atomic_unchecked_t cm_nodes_destroyed;
42823+extern atomic_unchecked_t cm_accel_dropped_pkts;
42824+extern atomic_unchecked_t cm_resets_recvd;
42825+extern atomic_unchecked_t pau_qps_created;
42826+extern atomic_unchecked_t pau_qps_destroyed;
42827
42828 extern u32 int_mod_timer_init;
42829 extern u32 int_mod_cq_depth_256;
42830diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
42831index 6f09a72..cf4399d 100644
42832--- a/drivers/infiniband/hw/nes/nes_cm.c
42833+++ b/drivers/infiniband/hw/nes/nes_cm.c
42834@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
42835 u32 cm_packets_retrans;
42836 u32 cm_packets_created;
42837 u32 cm_packets_received;
42838-atomic_t cm_listens_created;
42839-atomic_t cm_listens_destroyed;
42840+atomic_unchecked_t cm_listens_created;
42841+atomic_unchecked_t cm_listens_destroyed;
42842 u32 cm_backlog_drops;
42843-atomic_t cm_loopbacks;
42844-atomic_t cm_nodes_created;
42845-atomic_t cm_nodes_destroyed;
42846-atomic_t cm_accel_dropped_pkts;
42847-atomic_t cm_resets_recvd;
42848+atomic_unchecked_t cm_loopbacks;
42849+atomic_unchecked_t cm_nodes_created;
42850+atomic_unchecked_t cm_nodes_destroyed;
42851+atomic_unchecked_t cm_accel_dropped_pkts;
42852+atomic_unchecked_t cm_resets_recvd;
42853
42854 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
42855 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
42856@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
42857 /* instance of function pointers for client API */
42858 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
42859 static struct nes_cm_ops nes_cm_api = {
42860- mini_cm_accelerated,
42861- mini_cm_listen,
42862- mini_cm_del_listen,
42863- mini_cm_connect,
42864- mini_cm_close,
42865- mini_cm_accept,
42866- mini_cm_reject,
42867- mini_cm_recv_pkt,
42868- mini_cm_dealloc_core,
42869- mini_cm_get,
42870- mini_cm_set
42871+ .accelerated = mini_cm_accelerated,
42872+ .listen = mini_cm_listen,
42873+ .stop_listener = mini_cm_del_listen,
42874+ .connect = mini_cm_connect,
42875+ .close = mini_cm_close,
42876+ .accept = mini_cm_accept,
42877+ .reject = mini_cm_reject,
42878+ .recv_pkt = mini_cm_recv_pkt,
42879+ .destroy_cm_core = mini_cm_dealloc_core,
42880+ .get = mini_cm_get,
42881+ .set = mini_cm_set
42882 };
42883
42884 static struct nes_cm_core *g_cm_core;
42885
42886-atomic_t cm_connects;
42887-atomic_t cm_accepts;
42888-atomic_t cm_disconnects;
42889-atomic_t cm_closes;
42890-atomic_t cm_connecteds;
42891-atomic_t cm_connect_reqs;
42892-atomic_t cm_rejects;
42893+atomic_unchecked_t cm_connects;
42894+atomic_unchecked_t cm_accepts;
42895+atomic_unchecked_t cm_disconnects;
42896+atomic_unchecked_t cm_closes;
42897+atomic_unchecked_t cm_connecteds;
42898+atomic_unchecked_t cm_connect_reqs;
42899+atomic_unchecked_t cm_rejects;
42900
42901 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
42902 {
42903@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
42904 kfree(listener);
42905 listener = NULL;
42906 ret = 0;
42907- atomic_inc(&cm_listens_destroyed);
42908+ atomic_inc_unchecked(&cm_listens_destroyed);
42909 } else {
42910 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
42911 }
42912@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
42913 cm_node->rem_mac);
42914
42915 add_hte_node(cm_core, cm_node);
42916- atomic_inc(&cm_nodes_created);
42917+ atomic_inc_unchecked(&cm_nodes_created);
42918
42919 return cm_node;
42920 }
42921@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
42922 }
42923
42924 atomic_dec(&cm_core->node_cnt);
42925- atomic_inc(&cm_nodes_destroyed);
42926+ atomic_inc_unchecked(&cm_nodes_destroyed);
42927 nesqp = cm_node->nesqp;
42928 if (nesqp) {
42929 nesqp->cm_node = NULL;
42930@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
42931
42932 static void drop_packet(struct sk_buff *skb)
42933 {
42934- atomic_inc(&cm_accel_dropped_pkts);
42935+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42936 dev_kfree_skb_any(skb);
42937 }
42938
42939@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
42940 {
42941
42942 int reset = 0; /* whether to send reset in case of err.. */
42943- atomic_inc(&cm_resets_recvd);
42944+ atomic_inc_unchecked(&cm_resets_recvd);
42945 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
42946 " refcnt=%d\n", cm_node, cm_node->state,
42947 atomic_read(&cm_node->ref_count));
42948@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
42949 rem_ref_cm_node(cm_node->cm_core, cm_node);
42950 return NULL;
42951 }
42952- atomic_inc(&cm_loopbacks);
42953+ atomic_inc_unchecked(&cm_loopbacks);
42954 loopbackremotenode->loopbackpartner = cm_node;
42955 loopbackremotenode->tcp_cntxt.rcv_wscale =
42956 NES_CM_DEFAULT_RCV_WND_SCALE;
42957@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
42958 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
42959 else {
42960 rem_ref_cm_node(cm_core, cm_node);
42961- atomic_inc(&cm_accel_dropped_pkts);
42962+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42963 dev_kfree_skb_any(skb);
42964 }
42965 break;
42966@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42967
42968 if ((cm_id) && (cm_id->event_handler)) {
42969 if (issue_disconn) {
42970- atomic_inc(&cm_disconnects);
42971+ atomic_inc_unchecked(&cm_disconnects);
42972 cm_event.event = IW_CM_EVENT_DISCONNECT;
42973 cm_event.status = disconn_status;
42974 cm_event.local_addr = cm_id->local_addr;
42975@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42976 }
42977
42978 if (issue_close) {
42979- atomic_inc(&cm_closes);
42980+ atomic_inc_unchecked(&cm_closes);
42981 nes_disconnect(nesqp, 1);
42982
42983 cm_id->provider_data = nesqp;
42984@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
42985
42986 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
42987 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
42988- atomic_inc(&cm_accepts);
42989+ atomic_inc_unchecked(&cm_accepts);
42990
42991 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
42992 netdev_refcnt_read(nesvnic->netdev));
42993@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
42994 struct nes_cm_core *cm_core;
42995 u8 *start_buff;
42996
42997- atomic_inc(&cm_rejects);
42998+ atomic_inc_unchecked(&cm_rejects);
42999 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43000 loopback = cm_node->loopbackpartner;
43001 cm_core = cm_node->cm_core;
43002@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43003 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43004 ntohs(laddr->sin_port));
43005
43006- atomic_inc(&cm_connects);
43007+ atomic_inc_unchecked(&cm_connects);
43008 nesqp->active_conn = 1;
43009
43010 /* cache the cm_id in the qp */
43011@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43012 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43013 return err;
43014 }
43015- atomic_inc(&cm_listens_created);
43016+ atomic_inc_unchecked(&cm_listens_created);
43017 }
43018
43019 cm_id->add_ref(cm_id);
43020@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43021
43022 if (nesqp->destroyed)
43023 return;
43024- atomic_inc(&cm_connecteds);
43025+ atomic_inc_unchecked(&cm_connecteds);
43026 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43027 " local port 0x%04X. jiffies = %lu.\n",
43028 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43029@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43030
43031 cm_id->add_ref(cm_id);
43032 ret = cm_id->event_handler(cm_id, &cm_event);
43033- atomic_inc(&cm_closes);
43034+ atomic_inc_unchecked(&cm_closes);
43035 cm_event.event = IW_CM_EVENT_CLOSE;
43036 cm_event.status = 0;
43037 cm_event.provider_data = cm_id->provider_data;
43038@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43039 return;
43040 cm_id = cm_node->cm_id;
43041
43042- atomic_inc(&cm_connect_reqs);
43043+ atomic_inc_unchecked(&cm_connect_reqs);
43044 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43045 cm_node, cm_id, jiffies);
43046
43047@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43048 return;
43049 cm_id = cm_node->cm_id;
43050
43051- atomic_inc(&cm_connect_reqs);
43052+ atomic_inc_unchecked(&cm_connect_reqs);
43053 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43054 cm_node, cm_id, jiffies);
43055
43056diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43057index 4166452..fc952c3 100644
43058--- a/drivers/infiniband/hw/nes/nes_mgt.c
43059+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43060@@ -40,8 +40,8 @@
43061 #include "nes.h"
43062 #include "nes_mgt.h"
43063
43064-atomic_t pau_qps_created;
43065-atomic_t pau_qps_destroyed;
43066+atomic_unchecked_t pau_qps_created;
43067+atomic_unchecked_t pau_qps_destroyed;
43068
43069 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43070 {
43071@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43072 {
43073 struct sk_buff *skb;
43074 unsigned long flags;
43075- atomic_inc(&pau_qps_destroyed);
43076+ atomic_inc_unchecked(&pau_qps_destroyed);
43077
43078 /* Free packets that have not yet been forwarded */
43079 /* Lock is acquired by skb_dequeue when removing the skb */
43080@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43081 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43082 skb_queue_head_init(&nesqp->pau_list);
43083 spin_lock_init(&nesqp->pau_lock);
43084- atomic_inc(&pau_qps_created);
43085+ atomic_inc_unchecked(&pau_qps_created);
43086 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43087 }
43088
43089diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43090index 49eb511..a774366 100644
43091--- a/drivers/infiniband/hw/nes/nes_nic.c
43092+++ b/drivers/infiniband/hw/nes/nes_nic.c
43093@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43094 target_stat_values[++index] = mh_detected;
43095 target_stat_values[++index] = mh_pauses_sent;
43096 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43097- target_stat_values[++index] = atomic_read(&cm_connects);
43098- target_stat_values[++index] = atomic_read(&cm_accepts);
43099- target_stat_values[++index] = atomic_read(&cm_disconnects);
43100- target_stat_values[++index] = atomic_read(&cm_connecteds);
43101- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43102- target_stat_values[++index] = atomic_read(&cm_rejects);
43103- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43104- target_stat_values[++index] = atomic_read(&qps_created);
43105- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43106- target_stat_values[++index] = atomic_read(&qps_destroyed);
43107- target_stat_values[++index] = atomic_read(&cm_closes);
43108+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43109+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43110+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43111+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43112+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43113+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43114+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43115+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43116+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43117+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43118+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43119 target_stat_values[++index] = cm_packets_sent;
43120 target_stat_values[++index] = cm_packets_bounced;
43121 target_stat_values[++index] = cm_packets_created;
43122 target_stat_values[++index] = cm_packets_received;
43123 target_stat_values[++index] = cm_packets_dropped;
43124 target_stat_values[++index] = cm_packets_retrans;
43125- target_stat_values[++index] = atomic_read(&cm_listens_created);
43126- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43127+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43128+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43129 target_stat_values[++index] = cm_backlog_drops;
43130- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43131- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43132- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43133- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43134- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43135+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43136+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43137+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43138+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43139+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43140 target_stat_values[++index] = nesadapter->free_4kpbl;
43141 target_stat_values[++index] = nesadapter->free_256pbl;
43142 target_stat_values[++index] = int_mod_timer_init;
43143 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43144 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43145 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43146- target_stat_values[++index] = atomic_read(&pau_qps_created);
43147- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43148+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43149+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43150 }
43151
43152 /**
43153diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43154index c0d0296..3185f57 100644
43155--- a/drivers/infiniband/hw/nes/nes_verbs.c
43156+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43157@@ -46,9 +46,9 @@
43158
43159 #include <rdma/ib_umem.h>
43160
43161-atomic_t mod_qp_timouts;
43162-atomic_t qps_created;
43163-atomic_t sw_qps_destroyed;
43164+atomic_unchecked_t mod_qp_timouts;
43165+atomic_unchecked_t qps_created;
43166+atomic_unchecked_t sw_qps_destroyed;
43167
43168 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43169
43170@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43171 if (init_attr->create_flags)
43172 return ERR_PTR(-EINVAL);
43173
43174- atomic_inc(&qps_created);
43175+ atomic_inc_unchecked(&qps_created);
43176 switch (init_attr->qp_type) {
43177 case IB_QPT_RC:
43178 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43179@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43180 struct iw_cm_event cm_event;
43181 int ret = 0;
43182
43183- atomic_inc(&sw_qps_destroyed);
43184+ atomic_inc_unchecked(&sw_qps_destroyed);
43185 nesqp->destroyed = 1;
43186
43187 /* Blow away the connection if it exists. */
43188diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43189index b218254..1d1aa3c 100644
43190--- a/drivers/infiniband/hw/qib/qib.h
43191+++ b/drivers/infiniband/hw/qib/qib.h
43192@@ -52,6 +52,7 @@
43193 #include <linux/kref.h>
43194 #include <linux/sched.h>
43195 #include <linux/kthread.h>
43196+#include <linux/slab.h>
43197
43198 #include "qib_common.h"
43199 #include "qib_verbs.h"
43200diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43201index cdc7df4..a2fdfdb 100644
43202--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43203+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43204@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43205 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43206 }
43207
43208-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43209+static struct rtnl_link_ops ipoib_link_ops = {
43210 .kind = "ipoib",
43211 .maxtype = IFLA_IPOIB_MAX,
43212 .policy = ipoib_policy,
43213diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43214index e853a21..56fc5a8 100644
43215--- a/drivers/input/gameport/gameport.c
43216+++ b/drivers/input/gameport/gameport.c
43217@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43218 */
43219 static void gameport_init_port(struct gameport *gameport)
43220 {
43221- static atomic_t gameport_no = ATOMIC_INIT(-1);
43222+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43223
43224 __module_get(THIS_MODULE);
43225
43226 mutex_init(&gameport->drv_mutex);
43227 device_initialize(&gameport->dev);
43228 dev_set_name(&gameport->dev, "gameport%lu",
43229- (unsigned long)atomic_inc_return(&gameport_no));
43230+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43231 gameport->dev.bus = &gameport_bus;
43232 gameport->dev.release = gameport_release_port;
43233 if (gameport->parent)
43234diff --git a/drivers/input/input.c b/drivers/input/input.c
43235index 213e3a1..4fea837 100644
43236--- a/drivers/input/input.c
43237+++ b/drivers/input/input.c
43238@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
43239 */
43240 struct input_dev *input_allocate_device(void)
43241 {
43242- static atomic_t input_no = ATOMIC_INIT(-1);
43243+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43244 struct input_dev *dev;
43245
43246 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43247@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
43248 INIT_LIST_HEAD(&dev->node);
43249
43250 dev_set_name(&dev->dev, "input%lu",
43251- (unsigned long)atomic_inc_return(&input_no));
43252+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43253
43254 __module_get(THIS_MODULE);
43255 }
43256diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43257index 4a95b22..874c182 100644
43258--- a/drivers/input/joystick/sidewinder.c
43259+++ b/drivers/input/joystick/sidewinder.c
43260@@ -30,6 +30,7 @@
43261 #include <linux/kernel.h>
43262 #include <linux/module.h>
43263 #include <linux/slab.h>
43264+#include <linux/sched.h>
43265 #include <linux/input.h>
43266 #include <linux/gameport.h>
43267 #include <linux/jiffies.h>
43268diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43269index 3aa2f3f..53c00ea 100644
43270--- a/drivers/input/joystick/xpad.c
43271+++ b/drivers/input/joystick/xpad.c
43272@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43273
43274 static int xpad_led_probe(struct usb_xpad *xpad)
43275 {
43276- static atomic_t led_seq = ATOMIC_INIT(-1);
43277+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43278 unsigned long led_no;
43279 struct xpad_led *led;
43280 struct led_classdev *led_cdev;
43281@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43282 if (!led)
43283 return -ENOMEM;
43284
43285- led_no = atomic_inc_return(&led_seq);
43286+ led_no = atomic_inc_return_unchecked(&led_seq);
43287
43288 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43289 led->xpad = xpad;
43290diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43291index ac1fa5f..5f7502c 100644
43292--- a/drivers/input/misc/ims-pcu.c
43293+++ b/drivers/input/misc/ims-pcu.c
43294@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43295
43296 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43297 {
43298- static atomic_t device_no = ATOMIC_INIT(-1);
43299+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43300
43301 const struct ims_pcu_device_info *info;
43302 int error;
43303@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43304 }
43305
43306 /* Device appears to be operable, complete initialization */
43307- pcu->device_no = atomic_inc_return(&device_no);
43308+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43309
43310 /*
43311 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43312diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43313index f4cf664..3204fda 100644
43314--- a/drivers/input/mouse/psmouse.h
43315+++ b/drivers/input/mouse/psmouse.h
43316@@ -117,7 +117,7 @@ struct psmouse_attribute {
43317 ssize_t (*set)(struct psmouse *psmouse, void *data,
43318 const char *buf, size_t count);
43319 bool protect;
43320-};
43321+} __do_const;
43322 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43323
43324 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43325diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43326index b604564..3f14ae4 100644
43327--- a/drivers/input/mousedev.c
43328+++ b/drivers/input/mousedev.c
43329@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43330
43331 spin_unlock_irq(&client->packet_lock);
43332
43333- if (copy_to_user(buffer, data, count))
43334+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43335 return -EFAULT;
43336
43337 return count;
43338diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43339index a05a517..323a2fd 100644
43340--- a/drivers/input/serio/serio.c
43341+++ b/drivers/input/serio/serio.c
43342@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43343 */
43344 static void serio_init_port(struct serio *serio)
43345 {
43346- static atomic_t serio_no = ATOMIC_INIT(-1);
43347+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43348
43349 __module_get(THIS_MODULE);
43350
43351@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43352 mutex_init(&serio->drv_mutex);
43353 device_initialize(&serio->dev);
43354 dev_set_name(&serio->dev, "serio%lu",
43355- (unsigned long)atomic_inc_return(&serio_no));
43356+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43357 serio->dev.bus = &serio_bus;
43358 serio->dev.release = serio_release_port;
43359 serio->dev.groups = serio_device_attr_groups;
43360diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43361index 71ef5d6..93380a9 100644
43362--- a/drivers/input/serio/serio_raw.c
43363+++ b/drivers/input/serio/serio_raw.c
43364@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43365
43366 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43367 {
43368- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43369+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43370 struct serio_raw *serio_raw;
43371 int err;
43372
43373@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43374 }
43375
43376 snprintf(serio_raw->name, sizeof(serio_raw->name),
43377- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43378+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43379 kref_init(&serio_raw->kref);
43380 INIT_LIST_HEAD(&serio_raw->client_list);
43381 init_waitqueue_head(&serio_raw->wait);
43382diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43383index 9802485..2e9941d 100644
43384--- a/drivers/iommu/amd_iommu.c
43385+++ b/drivers/iommu/amd_iommu.c
43386@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
43387
43388 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
43389 {
43390+ phys_addr_t physaddr;
43391 WARN_ON(address & 0x7ULL);
43392
43393 memset(cmd, 0, sizeof(*cmd));
43394- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
43395- cmd->data[1] = upper_32_bits(__pa(address));
43396+
43397+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
43398+ if (object_starts_on_stack((void *)address)) {
43399+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
43400+ physaddr = __pa((u64)adjbuf);
43401+ } else
43402+#endif
43403+ physaddr = __pa(address);
43404+
43405+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
43406+ cmd->data[1] = upper_32_bits(physaddr);
43407 cmd->data[2] = 1;
43408 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
43409 }
43410diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
43411index 6cd47b7..264d14a 100644
43412--- a/drivers/iommu/arm-smmu.c
43413+++ b/drivers/iommu/arm-smmu.c
43414@@ -968,7 +968,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
43415 cfg->irptndx = cfg->cbndx;
43416 }
43417
43418- ACCESS_ONCE(smmu_domain->smmu) = smmu;
43419+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
43420 arm_smmu_init_context_bank(smmu_domain);
43421 spin_unlock_irqrestore(&smmu_domain->lock, flags);
43422
43423diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
43424index f7718d7..3ef740b 100644
43425--- a/drivers/iommu/iommu.c
43426+++ b/drivers/iommu/iommu.c
43427@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
43428 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
43429 {
43430 int err;
43431- struct notifier_block *nb;
43432+ notifier_block_no_const *nb;
43433 struct iommu_callback_data cb = {
43434 .ops = ops,
43435 };
43436diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43437index 89c4846..1de796f 100644
43438--- a/drivers/iommu/irq_remapping.c
43439+++ b/drivers/iommu/irq_remapping.c
43440@@ -353,7 +353,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43441 void panic_if_irq_remap(const char *msg)
43442 {
43443 if (irq_remapping_enabled)
43444- panic(msg);
43445+ panic("%s", msg);
43446 }
43447
43448 static void ir_ack_apic_edge(struct irq_data *data)
43449@@ -374,10 +374,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43450
43451 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43452 {
43453- chip->irq_print_chip = ir_print_prefix;
43454- chip->irq_ack = ir_ack_apic_edge;
43455- chip->irq_eoi = ir_ack_apic_level;
43456- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43457+ pax_open_kernel();
43458+ *(void **)&chip->irq_print_chip = ir_print_prefix;
43459+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
43460+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
43461+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43462+ pax_close_kernel();
43463 }
43464
43465 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43466diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43467index d617ee5..df8be8b 100644
43468--- a/drivers/irqchip/irq-gic.c
43469+++ b/drivers/irqchip/irq-gic.c
43470@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43471 * Supported arch specific GIC irq extension.
43472 * Default make them NULL.
43473 */
43474-struct irq_chip gic_arch_extn = {
43475+irq_chip_no_const gic_arch_extn = {
43476 .irq_eoi = NULL,
43477 .irq_mask = NULL,
43478 .irq_unmask = NULL,
43479@@ -311,7 +311,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43480 chained_irq_exit(chip, desc);
43481 }
43482
43483-static struct irq_chip gic_chip = {
43484+static irq_chip_no_const gic_chip __read_only = {
43485 .name = "GIC",
43486 .irq_mask = gic_mask_irq,
43487 .irq_unmask = gic_unmask_irq,
43488diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
43489index 078cac5..fb0f846 100644
43490--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
43491+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
43492@@ -353,7 +353,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
43493 struct intc_irqpin_iomem *i;
43494 struct resource *io[INTC_IRQPIN_REG_NR];
43495 struct resource *irq;
43496- struct irq_chip *irq_chip;
43497+ irq_chip_no_const *irq_chip;
43498 void (*enable_fn)(struct irq_data *d);
43499 void (*disable_fn)(struct irq_data *d);
43500 const char *name = dev_name(dev);
43501diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
43502index 384e6ed..7a771b2 100644
43503--- a/drivers/irqchip/irq-renesas-irqc.c
43504+++ b/drivers/irqchip/irq-renesas-irqc.c
43505@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
43506 struct irqc_priv *p;
43507 struct resource *io;
43508 struct resource *irq;
43509- struct irq_chip *irq_chip;
43510+ irq_chip_no_const *irq_chip;
43511 const char *name = dev_name(&pdev->dev);
43512 int ret;
43513 int k;
43514diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43515index 6a2df32..dc962f1 100644
43516--- a/drivers/isdn/capi/capi.c
43517+++ b/drivers/isdn/capi/capi.c
43518@@ -81,8 +81,8 @@ struct capiminor {
43519
43520 struct capi20_appl *ap;
43521 u32 ncci;
43522- atomic_t datahandle;
43523- atomic_t msgid;
43524+ atomic_unchecked_t datahandle;
43525+ atomic_unchecked_t msgid;
43526
43527 struct tty_port port;
43528 int ttyinstop;
43529@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43530 capimsg_setu16(s, 2, mp->ap->applid);
43531 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43532 capimsg_setu8 (s, 5, CAPI_RESP);
43533- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43534+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43535 capimsg_setu32(s, 8, mp->ncci);
43536 capimsg_setu16(s, 12, datahandle);
43537 }
43538@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43539 mp->outbytes -= len;
43540 spin_unlock_bh(&mp->outlock);
43541
43542- datahandle = atomic_inc_return(&mp->datahandle);
43543+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43544 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43545 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43546 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43547 capimsg_setu16(skb->data, 2, mp->ap->applid);
43548 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43549 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43550- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43551+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43552 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43553 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43554 capimsg_setu16(skb->data, 16, len); /* Data length */
43555diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43556index aecec6d..11e13c5 100644
43557--- a/drivers/isdn/gigaset/bas-gigaset.c
43558+++ b/drivers/isdn/gigaset/bas-gigaset.c
43559@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43560
43561
43562 static const struct gigaset_ops gigops = {
43563- gigaset_write_cmd,
43564- gigaset_write_room,
43565- gigaset_chars_in_buffer,
43566- gigaset_brkchars,
43567- gigaset_init_bchannel,
43568- gigaset_close_bchannel,
43569- gigaset_initbcshw,
43570- gigaset_freebcshw,
43571- gigaset_reinitbcshw,
43572- gigaset_initcshw,
43573- gigaset_freecshw,
43574- gigaset_set_modem_ctrl,
43575- gigaset_baud_rate,
43576- gigaset_set_line_ctrl,
43577- gigaset_isoc_send_skb,
43578- gigaset_isoc_input,
43579+ .write_cmd = gigaset_write_cmd,
43580+ .write_room = gigaset_write_room,
43581+ .chars_in_buffer = gigaset_chars_in_buffer,
43582+ .brkchars = gigaset_brkchars,
43583+ .init_bchannel = gigaset_init_bchannel,
43584+ .close_bchannel = gigaset_close_bchannel,
43585+ .initbcshw = gigaset_initbcshw,
43586+ .freebcshw = gigaset_freebcshw,
43587+ .reinitbcshw = gigaset_reinitbcshw,
43588+ .initcshw = gigaset_initcshw,
43589+ .freecshw = gigaset_freecshw,
43590+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43591+ .baud_rate = gigaset_baud_rate,
43592+ .set_line_ctrl = gigaset_set_line_ctrl,
43593+ .send_skb = gigaset_isoc_send_skb,
43594+ .handle_input = gigaset_isoc_input,
43595 };
43596
43597 /* bas_gigaset_init
43598diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43599index 600c79b..3752bab 100644
43600--- a/drivers/isdn/gigaset/interface.c
43601+++ b/drivers/isdn/gigaset/interface.c
43602@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43603 }
43604 tty->driver_data = cs;
43605
43606- ++cs->port.count;
43607+ atomic_inc(&cs->port.count);
43608
43609- if (cs->port.count == 1) {
43610+ if (atomic_read(&cs->port.count) == 1) {
43611 tty_port_tty_set(&cs->port, tty);
43612 cs->port.low_latency = 1;
43613 }
43614@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43615
43616 if (!cs->connected)
43617 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43618- else if (!cs->port.count)
43619+ else if (!atomic_read(&cs->port.count))
43620 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43621- else if (!--cs->port.count)
43622+ else if (!atomic_dec_return(&cs->port.count))
43623 tty_port_tty_set(&cs->port, NULL);
43624
43625 mutex_unlock(&cs->mutex);
43626diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43627index 8c91fd5..14f13ce 100644
43628--- a/drivers/isdn/gigaset/ser-gigaset.c
43629+++ b/drivers/isdn/gigaset/ser-gigaset.c
43630@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43631 }
43632
43633 static const struct gigaset_ops ops = {
43634- gigaset_write_cmd,
43635- gigaset_write_room,
43636- gigaset_chars_in_buffer,
43637- gigaset_brkchars,
43638- gigaset_init_bchannel,
43639- gigaset_close_bchannel,
43640- gigaset_initbcshw,
43641- gigaset_freebcshw,
43642- gigaset_reinitbcshw,
43643- gigaset_initcshw,
43644- gigaset_freecshw,
43645- gigaset_set_modem_ctrl,
43646- gigaset_baud_rate,
43647- gigaset_set_line_ctrl,
43648- gigaset_m10x_send_skb, /* asyncdata.c */
43649- gigaset_m10x_input, /* asyncdata.c */
43650+ .write_cmd = gigaset_write_cmd,
43651+ .write_room = gigaset_write_room,
43652+ .chars_in_buffer = gigaset_chars_in_buffer,
43653+ .brkchars = gigaset_brkchars,
43654+ .init_bchannel = gigaset_init_bchannel,
43655+ .close_bchannel = gigaset_close_bchannel,
43656+ .initbcshw = gigaset_initbcshw,
43657+ .freebcshw = gigaset_freebcshw,
43658+ .reinitbcshw = gigaset_reinitbcshw,
43659+ .initcshw = gigaset_initcshw,
43660+ .freecshw = gigaset_freecshw,
43661+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43662+ .baud_rate = gigaset_baud_rate,
43663+ .set_line_ctrl = gigaset_set_line_ctrl,
43664+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43665+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43666 };
43667
43668
43669diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43670index 5f306e2..5342f88 100644
43671--- a/drivers/isdn/gigaset/usb-gigaset.c
43672+++ b/drivers/isdn/gigaset/usb-gigaset.c
43673@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43674 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43675 memcpy(cs->hw.usb->bchars, buf, 6);
43676 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43677- 0, 0, &buf, 6, 2000);
43678+ 0, 0, buf, 6, 2000);
43679 }
43680
43681 static void gigaset_freebcshw(struct bc_state *bcs)
43682@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43683 }
43684
43685 static const struct gigaset_ops ops = {
43686- gigaset_write_cmd,
43687- gigaset_write_room,
43688- gigaset_chars_in_buffer,
43689- gigaset_brkchars,
43690- gigaset_init_bchannel,
43691- gigaset_close_bchannel,
43692- gigaset_initbcshw,
43693- gigaset_freebcshw,
43694- gigaset_reinitbcshw,
43695- gigaset_initcshw,
43696- gigaset_freecshw,
43697- gigaset_set_modem_ctrl,
43698- gigaset_baud_rate,
43699- gigaset_set_line_ctrl,
43700- gigaset_m10x_send_skb,
43701- gigaset_m10x_input,
43702+ .write_cmd = gigaset_write_cmd,
43703+ .write_room = gigaset_write_room,
43704+ .chars_in_buffer = gigaset_chars_in_buffer,
43705+ .brkchars = gigaset_brkchars,
43706+ .init_bchannel = gigaset_init_bchannel,
43707+ .close_bchannel = gigaset_close_bchannel,
43708+ .initbcshw = gigaset_initbcshw,
43709+ .freebcshw = gigaset_freebcshw,
43710+ .reinitbcshw = gigaset_reinitbcshw,
43711+ .initcshw = gigaset_initcshw,
43712+ .freecshw = gigaset_freecshw,
43713+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43714+ .baud_rate = gigaset_baud_rate,
43715+ .set_line_ctrl = gigaset_set_line_ctrl,
43716+ .send_skb = gigaset_m10x_send_skb,
43717+ .handle_input = gigaset_m10x_input,
43718 };
43719
43720 /*
43721diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43722index 4d9b195..455075c 100644
43723--- a/drivers/isdn/hardware/avm/b1.c
43724+++ b/drivers/isdn/hardware/avm/b1.c
43725@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43726 }
43727 if (left) {
43728 if (t4file->user) {
43729- if (copy_from_user(buf, dp, left))
43730+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43731 return -EFAULT;
43732 } else {
43733 memcpy(buf, dp, left);
43734@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43735 }
43736 if (left) {
43737 if (config->user) {
43738- if (copy_from_user(buf, dp, left))
43739+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43740 return -EFAULT;
43741 } else {
43742 memcpy(buf, dp, left);
43743diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
43744index 9b856e1..fa03c92 100644
43745--- a/drivers/isdn/i4l/isdn_common.c
43746+++ b/drivers/isdn/i4l/isdn_common.c
43747@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
43748 } else
43749 return -EINVAL;
43750 case IIOCDBGVAR:
43751+ if (!capable(CAP_SYS_RAWIO))
43752+ return -EPERM;
43753 if (arg) {
43754 if (copy_to_user(argp, &dev, sizeof(ulong)))
43755 return -EFAULT;
43756diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
43757index 91d5730..336523e 100644
43758--- a/drivers/isdn/i4l/isdn_concap.c
43759+++ b/drivers/isdn/i4l/isdn_concap.c
43760@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
43761 }
43762
43763 struct concap_device_ops isdn_concap_reliable_dl_dops = {
43764- &isdn_concap_dl_data_req,
43765- &isdn_concap_dl_connect_req,
43766- &isdn_concap_dl_disconn_req
43767+ .data_req = &isdn_concap_dl_data_req,
43768+ .connect_req = &isdn_concap_dl_connect_req,
43769+ .disconn_req = &isdn_concap_dl_disconn_req
43770 };
43771
43772 /* The following should better go into a dedicated source file such that
43773diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
43774index bc91261..2ef7e36 100644
43775--- a/drivers/isdn/i4l/isdn_tty.c
43776+++ b/drivers/isdn/i4l/isdn_tty.c
43777@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
43778
43779 #ifdef ISDN_DEBUG_MODEM_OPEN
43780 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
43781- port->count);
43782+ atomic_read(&port->count));
43783 #endif
43784- port->count++;
43785+ atomic_inc(&port->count);
43786 port->tty = tty;
43787 /*
43788 * Start up serial port
43789@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43790 #endif
43791 return;
43792 }
43793- if ((tty->count == 1) && (port->count != 1)) {
43794+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
43795 /*
43796 * Uh, oh. tty->count is 1, which means that the tty
43797 * structure will be freed. Info->count should always
43798@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43799 * serial port won't be shutdown.
43800 */
43801 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
43802- "info->count is %d\n", port->count);
43803- port->count = 1;
43804+ "info->count is %d\n", atomic_read(&port->count));
43805+ atomic_set(&port->count, 1);
43806 }
43807- if (--port->count < 0) {
43808+ if (atomic_dec_return(&port->count) < 0) {
43809 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
43810- info->line, port->count);
43811- port->count = 0;
43812+ info->line, atomic_read(&port->count));
43813+ atomic_set(&port->count, 0);
43814 }
43815- if (port->count) {
43816+ if (atomic_read(&port->count)) {
43817 #ifdef ISDN_DEBUG_MODEM_OPEN
43818 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
43819 #endif
43820@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
43821 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
43822 return;
43823 isdn_tty_shutdown(info);
43824- port->count = 0;
43825+ atomic_set(&port->count, 0);
43826 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43827 port->tty = NULL;
43828 wake_up_interruptible(&port->open_wait);
43829@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
43830 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
43831 modem_info *info = &dev->mdm.info[i];
43832
43833- if (info->port.count == 0)
43834+ if (atomic_read(&info->port.count) == 0)
43835 continue;
43836 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
43837 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
43838diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
43839index e2d4e58..40cd045 100644
43840--- a/drivers/isdn/i4l/isdn_x25iface.c
43841+++ b/drivers/isdn/i4l/isdn_x25iface.c
43842@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
43843
43844
43845 static struct concap_proto_ops ix25_pops = {
43846- &isdn_x25iface_proto_new,
43847- &isdn_x25iface_proto_del,
43848- &isdn_x25iface_proto_restart,
43849- &isdn_x25iface_proto_close,
43850- &isdn_x25iface_xmit,
43851- &isdn_x25iface_receive,
43852- &isdn_x25iface_connect_ind,
43853- &isdn_x25iface_disconn_ind
43854+ .proto_new = &isdn_x25iface_proto_new,
43855+ .proto_del = &isdn_x25iface_proto_del,
43856+ .restart = &isdn_x25iface_proto_restart,
43857+ .close = &isdn_x25iface_proto_close,
43858+ .encap_and_xmit = &isdn_x25iface_xmit,
43859+ .data_ind = &isdn_x25iface_receive,
43860+ .connect_ind = &isdn_x25iface_connect_ind,
43861+ .disconn_ind = &isdn_x25iface_disconn_ind
43862 };
43863
43864 /* error message helper function */
43865diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
43866index 6a7447c..b4987ea 100644
43867--- a/drivers/isdn/icn/icn.c
43868+++ b/drivers/isdn/icn/icn.c
43869@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
43870 if (count > len)
43871 count = len;
43872 if (user) {
43873- if (copy_from_user(msg, buf, count))
43874+ if (count > sizeof msg || copy_from_user(msg, buf, count))
43875 return -EFAULT;
43876 } else
43877 memcpy(msg, buf, count);
43878@@ -1609,7 +1609,7 @@ icn_setup(char *line)
43879 if (ints[0] > 1)
43880 membase = (unsigned long)ints[2];
43881 if (str && *str) {
43882- strcpy(sid, str);
43883+ strlcpy(sid, str, sizeof(sid));
43884 icn_id = sid;
43885 if ((p = strchr(sid, ','))) {
43886 *p++ = 0;
43887diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
43888index 87f7dff..7300125 100644
43889--- a/drivers/isdn/mISDN/dsp_cmx.c
43890+++ b/drivers/isdn/mISDN/dsp_cmx.c
43891@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
43892 static u16 dsp_count; /* last sample count */
43893 static int dsp_count_valid; /* if we have last sample count */
43894
43895-void
43896+void __intentional_overflow(-1)
43897 dsp_cmx_send(void *arg)
43898 {
43899 struct dsp_conf *conf;
43900diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
43901index 0f9ed1e..2715d6f 100644
43902--- a/drivers/leds/leds-clevo-mail.c
43903+++ b/drivers/leds/leds-clevo-mail.c
43904@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
43905 * detected as working, but in reality it is not) as low as
43906 * possible.
43907 */
43908-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
43909+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
43910 {
43911 .callback = clevo_mail_led_dmi_callback,
43912 .ident = "Clevo D410J",
43913diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
43914index 046cb70..6b20d39 100644
43915--- a/drivers/leds/leds-ss4200.c
43916+++ b/drivers/leds/leds-ss4200.c
43917@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
43918 * detected as working, but in reality it is not) as low as
43919 * possible.
43920 */
43921-static struct dmi_system_id nas_led_whitelist[] __initdata = {
43922+static struct dmi_system_id nas_led_whitelist[] __initconst = {
43923 {
43924 .callback = ss4200_led_dmi_callback,
43925 .ident = "Intel SS4200-E",
43926diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
43927index 6590558..a74c5dd 100644
43928--- a/drivers/lguest/core.c
43929+++ b/drivers/lguest/core.c
43930@@ -96,9 +96,17 @@ static __init int map_switcher(void)
43931 * The end address needs +1 because __get_vm_area allocates an
43932 * extra guard page, so we need space for that.
43933 */
43934+
43935+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
43936+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43937+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
43938+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43939+#else
43940 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43941 VM_ALLOC, switcher_addr, switcher_addr
43942 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43943+#endif
43944+
43945 if (!switcher_vma) {
43946 err = -ENOMEM;
43947 printk("lguest: could not map switcher pages high\n");
43948@@ -121,7 +129,7 @@ static __init int map_switcher(void)
43949 * Now the Switcher is mapped at the right address, we can't fail!
43950 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
43951 */
43952- memcpy(switcher_vma->addr, start_switcher_text,
43953+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
43954 end_switcher_text - start_switcher_text);
43955
43956 printk(KERN_INFO "lguest: mapped switcher at %p\n",
43957diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
43958index e8b55c3..3514c37 100644
43959--- a/drivers/lguest/page_tables.c
43960+++ b/drivers/lguest/page_tables.c
43961@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
43962 /*:*/
43963
43964 #ifdef CONFIG_X86_PAE
43965-static void release_pmd(pmd_t *spmd)
43966+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
43967 {
43968 /* If the entry's not present, there's nothing to release. */
43969 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
43970diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
43971index 922a1ac..9dd0c2a 100644
43972--- a/drivers/lguest/x86/core.c
43973+++ b/drivers/lguest/x86/core.c
43974@@ -59,7 +59,7 @@ static struct {
43975 /* Offset from where switcher.S was compiled to where we've copied it */
43976 static unsigned long switcher_offset(void)
43977 {
43978- return switcher_addr - (unsigned long)start_switcher_text;
43979+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
43980 }
43981
43982 /* This cpu's struct lguest_pages (after the Switcher text page) */
43983@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
43984 * These copies are pretty cheap, so we do them unconditionally: */
43985 /* Save the current Host top-level page directory.
43986 */
43987+
43988+#ifdef CONFIG_PAX_PER_CPU_PGD
43989+ pages->state.host_cr3 = read_cr3();
43990+#else
43991 pages->state.host_cr3 = __pa(current->mm->pgd);
43992+#endif
43993+
43994 /*
43995 * Set up the Guest's page tables to see this CPU's pages (and no
43996 * other CPU's pages).
43997@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
43998 * compiled-in switcher code and the high-mapped copy we just made.
43999 */
44000 for (i = 0; i < IDT_ENTRIES; i++)
44001- default_idt_entries[i] += switcher_offset();
44002+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
44003
44004 /*
44005 * Set up the Switcher's per-cpu areas.
44006@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
44007 * it will be undisturbed when we switch. To change %cs and jump we
44008 * need this structure to feed to Intel's "lcall" instruction.
44009 */
44010- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
44011+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
44012 lguest_entry.segment = LGUEST_CS;
44013
44014 /*
44015diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
44016index 40634b0..4f5855e 100644
44017--- a/drivers/lguest/x86/switcher_32.S
44018+++ b/drivers/lguest/x86/switcher_32.S
44019@@ -87,6 +87,7 @@
44020 #include <asm/page.h>
44021 #include <asm/segment.h>
44022 #include <asm/lguest.h>
44023+#include <asm/processor-flags.h>
44024
44025 // We mark the start of the code to copy
44026 // It's placed in .text tho it's never run here
44027@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44028 // Changes type when we load it: damn Intel!
44029 // For after we switch over our page tables
44030 // That entry will be read-only: we'd crash.
44031+
44032+#ifdef CONFIG_PAX_KERNEXEC
44033+ mov %cr0, %edx
44034+ xor $X86_CR0_WP, %edx
44035+ mov %edx, %cr0
44036+#endif
44037+
44038 movl $(GDT_ENTRY_TSS*8), %edx
44039 ltr %dx
44040
44041@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44042 // Let's clear it again for our return.
44043 // The GDT descriptor of the Host
44044 // Points to the table after two "size" bytes
44045- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44046+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44047 // Clear "used" from type field (byte 5, bit 2)
44048- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44049+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44050+
44051+#ifdef CONFIG_PAX_KERNEXEC
44052+ mov %cr0, %eax
44053+ xor $X86_CR0_WP, %eax
44054+ mov %eax, %cr0
44055+#endif
44056
44057 // Once our page table's switched, the Guest is live!
44058 // The Host fades as we run this final step.
44059@@ -295,13 +309,12 @@ deliver_to_host:
44060 // I consulted gcc, and it gave
44061 // These instructions, which I gladly credit:
44062 leal (%edx,%ebx,8), %eax
44063- movzwl (%eax),%edx
44064- movl 4(%eax), %eax
44065- xorw %ax, %ax
44066- orl %eax, %edx
44067+ movl 4(%eax), %edx
44068+ movw (%eax), %dx
44069 // Now the address of the handler's in %edx
44070 // We call it now: its "iret" drops us home.
44071- jmp *%edx
44072+ ljmp $__KERNEL_CS, $1f
44073+1: jmp *%edx
44074
44075 // Every interrupt can come to us here
44076 // But we must truly tell each apart.
44077diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44078index a08e3ee..df8ade2 100644
44079--- a/drivers/md/bcache/closure.h
44080+++ b/drivers/md/bcache/closure.h
44081@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44082 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44083 struct workqueue_struct *wq)
44084 {
44085- BUG_ON(object_is_on_stack(cl));
44086+ BUG_ON(object_starts_on_stack(cl));
44087 closure_set_ip(cl);
44088 cl->fn = fn;
44089 cl->wq = wq;
44090diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44091index 1695ee5..89f18ab 100644
44092--- a/drivers/md/bitmap.c
44093+++ b/drivers/md/bitmap.c
44094@@ -1784,7 +1784,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44095 chunk_kb ? "KB" : "B");
44096 if (bitmap->storage.file) {
44097 seq_printf(seq, ", file: ");
44098- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44099+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44100 }
44101
44102 seq_printf(seq, "\n");
44103diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44104index 73f791b..8c5d3ac 100644
44105--- a/drivers/md/dm-ioctl.c
44106+++ b/drivers/md/dm-ioctl.c
44107@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44108 cmd == DM_LIST_VERSIONS_CMD)
44109 return 0;
44110
44111- if ((cmd == DM_DEV_CREATE_CMD)) {
44112+ if (cmd == DM_DEV_CREATE_CMD) {
44113 if (!*param->name) {
44114 DMWARN("name not supplied when creating device");
44115 return -EINVAL;
44116diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44117index 089d627..ef7352e 100644
44118--- a/drivers/md/dm-raid1.c
44119+++ b/drivers/md/dm-raid1.c
44120@@ -40,7 +40,7 @@ enum dm_raid1_error {
44121
44122 struct mirror {
44123 struct mirror_set *ms;
44124- atomic_t error_count;
44125+ atomic_unchecked_t error_count;
44126 unsigned long error_type;
44127 struct dm_dev *dev;
44128 sector_t offset;
44129@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44130 struct mirror *m;
44131
44132 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44133- if (!atomic_read(&m->error_count))
44134+ if (!atomic_read_unchecked(&m->error_count))
44135 return m;
44136
44137 return NULL;
44138@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44139 * simple way to tell if a device has encountered
44140 * errors.
44141 */
44142- atomic_inc(&m->error_count);
44143+ atomic_inc_unchecked(&m->error_count);
44144
44145 if (test_and_set_bit(error_type, &m->error_type))
44146 return;
44147@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44148 struct mirror *m = get_default_mirror(ms);
44149
44150 do {
44151- if (likely(!atomic_read(&m->error_count)))
44152+ if (likely(!atomic_read_unchecked(&m->error_count)))
44153 return m;
44154
44155 if (m-- == ms->mirror)
44156@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44157 {
44158 struct mirror *default_mirror = get_default_mirror(m->ms);
44159
44160- return !atomic_read(&default_mirror->error_count);
44161+ return !atomic_read_unchecked(&default_mirror->error_count);
44162 }
44163
44164 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44165@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44166 */
44167 if (likely(region_in_sync(ms, region, 1)))
44168 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44169- else if (m && atomic_read(&m->error_count))
44170+ else if (m && atomic_read_unchecked(&m->error_count))
44171 m = NULL;
44172
44173 if (likely(m))
44174@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44175 }
44176
44177 ms->mirror[mirror].ms = ms;
44178- atomic_set(&(ms->mirror[mirror].error_count), 0);
44179+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44180 ms->mirror[mirror].error_type = 0;
44181 ms->mirror[mirror].offset = offset;
44182
44183@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
44184 */
44185 static char device_status_char(struct mirror *m)
44186 {
44187- if (!atomic_read(&(m->error_count)))
44188+ if (!atomic_read_unchecked(&(m->error_count)))
44189 return 'A';
44190
44191 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44192diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44193index f478a4c..4b8e5ef 100644
44194--- a/drivers/md/dm-stats.c
44195+++ b/drivers/md/dm-stats.c
44196@@ -382,7 +382,7 @@ do_sync_free:
44197 synchronize_rcu_expedited();
44198 dm_stat_free(&s->rcu_head);
44199 } else {
44200- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44201+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44202 call_rcu(&s->rcu_head, dm_stat_free);
44203 }
44204 return 0;
44205@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44206 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44207 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44208 ));
44209- ACCESS_ONCE(last->last_sector) = end_sector;
44210- ACCESS_ONCE(last->last_rw) = bi_rw;
44211+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44212+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44213 }
44214
44215 rcu_read_lock();
44216diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44217index f8b37d4..5c5cafd 100644
44218--- a/drivers/md/dm-stripe.c
44219+++ b/drivers/md/dm-stripe.c
44220@@ -21,7 +21,7 @@ struct stripe {
44221 struct dm_dev *dev;
44222 sector_t physical_start;
44223
44224- atomic_t error_count;
44225+ atomic_unchecked_t error_count;
44226 };
44227
44228 struct stripe_c {
44229@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44230 kfree(sc);
44231 return r;
44232 }
44233- atomic_set(&(sc->stripe[i].error_count), 0);
44234+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44235 }
44236
44237 ti->private = sc;
44238@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44239 DMEMIT("%d ", sc->stripes);
44240 for (i = 0; i < sc->stripes; i++) {
44241 DMEMIT("%s ", sc->stripe[i].dev->name);
44242- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44243+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44244 'D' : 'A';
44245 }
44246 buffer[i] = '\0';
44247@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44248 */
44249 for (i = 0; i < sc->stripes; i++)
44250 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44251- atomic_inc(&(sc->stripe[i].error_count));
44252- if (atomic_read(&(sc->stripe[i].error_count)) <
44253+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44254+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44255 DM_IO_ERROR_THRESHOLD)
44256 schedule_work(&sc->trigger_event);
44257 }
44258diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44259index 3afae9e..4e1c954 100644
44260--- a/drivers/md/dm-table.c
44261+++ b/drivers/md/dm-table.c
44262@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44263 if (!dev_size)
44264 return 0;
44265
44266- if ((start >= dev_size) || (start + len > dev_size)) {
44267+ if ((start >= dev_size) || (len > dev_size - start)) {
44268 DMWARN("%s: %s too small for target: "
44269 "start=%llu, len=%llu, dev_size=%llu",
44270 dm_device_name(ti->table->md), bdevname(bdev, b),
44271diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44272index 43adbb8..7b34305 100644
44273--- a/drivers/md/dm-thin-metadata.c
44274+++ b/drivers/md/dm-thin-metadata.c
44275@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44276 {
44277 pmd->info.tm = pmd->tm;
44278 pmd->info.levels = 2;
44279- pmd->info.value_type.context = pmd->data_sm;
44280+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44281 pmd->info.value_type.size = sizeof(__le64);
44282 pmd->info.value_type.inc = data_block_inc;
44283 pmd->info.value_type.dec = data_block_dec;
44284@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44285
44286 pmd->bl_info.tm = pmd->tm;
44287 pmd->bl_info.levels = 1;
44288- pmd->bl_info.value_type.context = pmd->data_sm;
44289+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44290 pmd->bl_info.value_type.size = sizeof(__le64);
44291 pmd->bl_info.value_type.inc = data_block_inc;
44292 pmd->bl_info.value_type.dec = data_block_dec;
44293diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44294index 64b10e0..07db8f4 100644
44295--- a/drivers/md/dm.c
44296+++ b/drivers/md/dm.c
44297@@ -185,9 +185,9 @@ struct mapped_device {
44298 /*
44299 * Event handling.
44300 */
44301- atomic_t event_nr;
44302+ atomic_unchecked_t event_nr;
44303 wait_queue_head_t eventq;
44304- atomic_t uevent_seq;
44305+ atomic_unchecked_t uevent_seq;
44306 struct list_head uevent_list;
44307 spinlock_t uevent_lock; /* Protect access to uevent_list */
44308
44309@@ -2070,8 +2070,8 @@ static struct mapped_device *alloc_dev(int minor)
44310 spin_lock_init(&md->deferred_lock);
44311 atomic_set(&md->holders, 1);
44312 atomic_set(&md->open_count, 0);
44313- atomic_set(&md->event_nr, 0);
44314- atomic_set(&md->uevent_seq, 0);
44315+ atomic_set_unchecked(&md->event_nr, 0);
44316+ atomic_set_unchecked(&md->uevent_seq, 0);
44317 INIT_LIST_HEAD(&md->uevent_list);
44318 INIT_LIST_HEAD(&md->table_devices);
44319 spin_lock_init(&md->uevent_lock);
44320@@ -2227,7 +2227,7 @@ static void event_callback(void *context)
44321
44322 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44323
44324- atomic_inc(&md->event_nr);
44325+ atomic_inc_unchecked(&md->event_nr);
44326 wake_up(&md->eventq);
44327 }
44328
44329@@ -3034,18 +3034,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44330
44331 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44332 {
44333- return atomic_add_return(1, &md->uevent_seq);
44334+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44335 }
44336
44337 uint32_t dm_get_event_nr(struct mapped_device *md)
44338 {
44339- return atomic_read(&md->event_nr);
44340+ return atomic_read_unchecked(&md->event_nr);
44341 }
44342
44343 int dm_wait_event(struct mapped_device *md, int event_nr)
44344 {
44345 return wait_event_interruptible(md->eventq,
44346- (event_nr != atomic_read(&md->event_nr)));
44347+ (event_nr != atomic_read_unchecked(&md->event_nr)));
44348 }
44349
44350 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
44351diff --git a/drivers/md/md.c b/drivers/md/md.c
44352index 709755f..5bc3fa4 100644
44353--- a/drivers/md/md.c
44354+++ b/drivers/md/md.c
44355@@ -190,10 +190,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
44356 * start build, activate spare
44357 */
44358 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
44359-static atomic_t md_event_count;
44360+static atomic_unchecked_t md_event_count;
44361 void md_new_event(struct mddev *mddev)
44362 {
44363- atomic_inc(&md_event_count);
44364+ atomic_inc_unchecked(&md_event_count);
44365 wake_up(&md_event_waiters);
44366 }
44367 EXPORT_SYMBOL_GPL(md_new_event);
44368@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
44369 */
44370 static void md_new_event_inintr(struct mddev *mddev)
44371 {
44372- atomic_inc(&md_event_count);
44373+ atomic_inc_unchecked(&md_event_count);
44374 wake_up(&md_event_waiters);
44375 }
44376
44377@@ -1422,7 +1422,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
44378 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
44379 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
44380 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
44381- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44382+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44383
44384 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
44385 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
44386@@ -1673,7 +1673,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
44387 else
44388 sb->resync_offset = cpu_to_le64(0);
44389
44390- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
44391+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
44392
44393 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
44394 sb->size = cpu_to_le64(mddev->dev_sectors);
44395@@ -2543,7 +2543,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
44396 static ssize_t
44397 errors_show(struct md_rdev *rdev, char *page)
44398 {
44399- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
44400+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
44401 }
44402
44403 static ssize_t
44404@@ -2552,7 +2552,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
44405 char *e;
44406 unsigned long n = simple_strtoul(buf, &e, 10);
44407 if (*buf && (*e == 0 || *e == '\n')) {
44408- atomic_set(&rdev->corrected_errors, n);
44409+ atomic_set_unchecked(&rdev->corrected_errors, n);
44410 return len;
44411 }
44412 return -EINVAL;
44413@@ -2997,8 +2997,8 @@ int md_rdev_init(struct md_rdev *rdev)
44414 rdev->sb_loaded = 0;
44415 rdev->bb_page = NULL;
44416 atomic_set(&rdev->nr_pending, 0);
44417- atomic_set(&rdev->read_errors, 0);
44418- atomic_set(&rdev->corrected_errors, 0);
44419+ atomic_set_unchecked(&rdev->read_errors, 0);
44420+ atomic_set_unchecked(&rdev->corrected_errors, 0);
44421
44422 INIT_LIST_HEAD(&rdev->same_set);
44423 init_waitqueue_head(&rdev->blocked_wait);
44424@@ -6865,7 +6865,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
44425
44426 spin_unlock(&pers_lock);
44427 seq_printf(seq, "\n");
44428- seq->poll_event = atomic_read(&md_event_count);
44429+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44430 return 0;
44431 }
44432 if (v == (void*)2) {
44433@@ -6968,7 +6968,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
44434 return error;
44435
44436 seq = file->private_data;
44437- seq->poll_event = atomic_read(&md_event_count);
44438+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44439 return error;
44440 }
44441
44442@@ -6985,7 +6985,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
44443 /* always allow read */
44444 mask = POLLIN | POLLRDNORM;
44445
44446- if (seq->poll_event != atomic_read(&md_event_count))
44447+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
44448 mask |= POLLERR | POLLPRI;
44449 return mask;
44450 }
44451@@ -7032,7 +7032,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44452 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44453 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44454 (int)part_stat_read(&disk->part0, sectors[1]) -
44455- atomic_read(&disk->sync_io);
44456+ atomic_read_unchecked(&disk->sync_io);
44457 /* sync IO will cause sync_io to increase before the disk_stats
44458 * as sync_io is counted when a request starts, and
44459 * disk_stats is counted when it completes.
44460diff --git a/drivers/md/md.h b/drivers/md/md.h
44461index 03cec5b..0a658c1 100644
44462--- a/drivers/md/md.h
44463+++ b/drivers/md/md.h
44464@@ -94,13 +94,13 @@ struct md_rdev {
44465 * only maintained for arrays that
44466 * support hot removal
44467 */
44468- atomic_t read_errors; /* number of consecutive read errors that
44469+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
44470 * we have tried to ignore.
44471 */
44472 struct timespec last_read_error; /* monotonic time since our
44473 * last read error
44474 */
44475- atomic_t corrected_errors; /* number of corrected read errors,
44476+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44477 * for reporting to userspace and storing
44478 * in superblock.
44479 */
44480@@ -448,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
44481
44482 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44483 {
44484- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44485+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44486 }
44487
44488 struct md_personality
44489diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
44490index e8a9042..35bd145 100644
44491--- a/drivers/md/persistent-data/dm-space-map-metadata.c
44492+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
44493@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
44494 * Flick into a mode where all blocks get allocated in the new area.
44495 */
44496 smm->begin = old_len;
44497- memcpy(sm, &bootstrap_ops, sizeof(*sm));
44498+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
44499
44500 /*
44501 * Extend.
44502@@ -714,7 +714,7 @@ out:
44503 /*
44504 * Switch back to normal behaviour.
44505 */
44506- memcpy(sm, &ops, sizeof(*sm));
44507+ memcpy((void *)sm, &ops, sizeof(*sm));
44508 return r;
44509 }
44510
44511diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44512index 3e6d115..ffecdeb 100644
44513--- a/drivers/md/persistent-data/dm-space-map.h
44514+++ b/drivers/md/persistent-data/dm-space-map.h
44515@@ -71,6 +71,7 @@ struct dm_space_map {
44516 dm_sm_threshold_fn fn,
44517 void *context);
44518 };
44519+typedef struct dm_space_map __no_const dm_space_map_no_const;
44520
44521 /*----------------------------------------------------------------*/
44522
44523diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44524index 2f2f38f..f6a8ebe 100644
44525--- a/drivers/md/raid1.c
44526+++ b/drivers/md/raid1.c
44527@@ -1932,7 +1932,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44528 if (r1_sync_page_io(rdev, sect, s,
44529 bio->bi_io_vec[idx].bv_page,
44530 READ) != 0)
44531- atomic_add(s, &rdev->corrected_errors);
44532+ atomic_add_unchecked(s, &rdev->corrected_errors);
44533 }
44534 sectors -= s;
44535 sect += s;
44536@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44537 !test_bit(Faulty, &rdev->flags)) {
44538 if (r1_sync_page_io(rdev, sect, s,
44539 conf->tmppage, READ)) {
44540- atomic_add(s, &rdev->corrected_errors);
44541+ atomic_add_unchecked(s, &rdev->corrected_errors);
44542 printk(KERN_INFO
44543 "md/raid1:%s: read error corrected "
44544 "(%d sectors at %llu on %s)\n",
44545diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44546index 32e282f..5cec803 100644
44547--- a/drivers/md/raid10.c
44548+++ b/drivers/md/raid10.c
44549@@ -1944,7 +1944,7 @@ static void end_sync_read(struct bio *bio, int error)
44550 /* The write handler will notice the lack of
44551 * R10BIO_Uptodate and record any errors etc
44552 */
44553- atomic_add(r10_bio->sectors,
44554+ atomic_add_unchecked(r10_bio->sectors,
44555 &conf->mirrors[d].rdev->corrected_errors);
44556
44557 /* for reconstruct, we always reschedule after a read.
44558@@ -2301,7 +2301,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44559 {
44560 struct timespec cur_time_mon;
44561 unsigned long hours_since_last;
44562- unsigned int read_errors = atomic_read(&rdev->read_errors);
44563+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44564
44565 ktime_get_ts(&cur_time_mon);
44566
44567@@ -2323,9 +2323,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44568 * overflowing the shift of read_errors by hours_since_last.
44569 */
44570 if (hours_since_last >= 8 * sizeof(read_errors))
44571- atomic_set(&rdev->read_errors, 0);
44572+ atomic_set_unchecked(&rdev->read_errors, 0);
44573 else
44574- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44575+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44576 }
44577
44578 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44579@@ -2379,8 +2379,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44580 return;
44581
44582 check_decay_read_errors(mddev, rdev);
44583- atomic_inc(&rdev->read_errors);
44584- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44585+ atomic_inc_unchecked(&rdev->read_errors);
44586+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44587 char b[BDEVNAME_SIZE];
44588 bdevname(rdev->bdev, b);
44589
44590@@ -2388,7 +2388,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44591 "md/raid10:%s: %s: Raid device exceeded "
44592 "read_error threshold [cur %d:max %d]\n",
44593 mdname(mddev), b,
44594- atomic_read(&rdev->read_errors), max_read_errors);
44595+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44596 printk(KERN_NOTICE
44597 "md/raid10:%s: %s: Failing raid device\n",
44598 mdname(mddev), b);
44599@@ -2543,7 +2543,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44600 sect +
44601 choose_data_offset(r10_bio, rdev)),
44602 bdevname(rdev->bdev, b));
44603- atomic_add(s, &rdev->corrected_errors);
44604+ atomic_add_unchecked(s, &rdev->corrected_errors);
44605 }
44606
44607 rdev_dec_pending(rdev, mddev);
44608diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44609index 8577cc7..e80e05d 100644
44610--- a/drivers/md/raid5.c
44611+++ b/drivers/md/raid5.c
44612@@ -1730,6 +1730,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
44613 return 1;
44614 }
44615
44616+#ifdef CONFIG_GRKERNSEC_HIDESYM
44617+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
44618+#endif
44619+
44620 static int grow_stripes(struct r5conf *conf, int num)
44621 {
44622 struct kmem_cache *sc;
44623@@ -1741,7 +1745,11 @@ static int grow_stripes(struct r5conf *conf, int num)
44624 "raid%d-%s", conf->level, mdname(conf->mddev));
44625 else
44626 sprintf(conf->cache_name[0],
44627+#ifdef CONFIG_GRKERNSEC_HIDESYM
44628+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
44629+#else
44630 "raid%d-%p", conf->level, conf->mddev);
44631+#endif
44632 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
44633
44634 conf->active_name = 0;
44635@@ -2017,21 +2025,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44636 mdname(conf->mddev), STRIPE_SECTORS,
44637 (unsigned long long)s,
44638 bdevname(rdev->bdev, b));
44639- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44640+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44641 clear_bit(R5_ReadError, &sh->dev[i].flags);
44642 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44643 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44644 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44645
44646- if (atomic_read(&rdev->read_errors))
44647- atomic_set(&rdev->read_errors, 0);
44648+ if (atomic_read_unchecked(&rdev->read_errors))
44649+ atomic_set_unchecked(&rdev->read_errors, 0);
44650 } else {
44651 const char *bdn = bdevname(rdev->bdev, b);
44652 int retry = 0;
44653 int set_bad = 0;
44654
44655 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44656- atomic_inc(&rdev->read_errors);
44657+ atomic_inc_unchecked(&rdev->read_errors);
44658 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44659 printk_ratelimited(
44660 KERN_WARNING
44661@@ -2059,7 +2067,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44662 mdname(conf->mddev),
44663 (unsigned long long)s,
44664 bdn);
44665- } else if (atomic_read(&rdev->read_errors)
44666+ } else if (atomic_read_unchecked(&rdev->read_errors)
44667 > conf->max_nr_stripes)
44668 printk(KERN_WARNING
44669 "md/raid:%s: Too many read errors, failing device %s.\n",
44670diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44671index 983db75..ef9248c 100644
44672--- a/drivers/media/dvb-core/dvbdev.c
44673+++ b/drivers/media/dvb-core/dvbdev.c
44674@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44675 const struct dvb_device *template, void *priv, int type)
44676 {
44677 struct dvb_device *dvbdev;
44678- struct file_operations *dvbdevfops;
44679+ file_operations_no_const *dvbdevfops;
44680 struct device *clsdev;
44681 int minor;
44682 int id;
44683diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
44684index 6ad22b6..6e90e2a 100644
44685--- a/drivers/media/dvb-frontends/af9033.h
44686+++ b/drivers/media/dvb-frontends/af9033.h
44687@@ -96,6 +96,6 @@ struct af9033_ops {
44688 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
44689 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
44690 int onoff);
44691-};
44692+} __no_const;
44693
44694 #endif /* AF9033_H */
44695diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44696index 9b6c3bb..baeb5c7 100644
44697--- a/drivers/media/dvb-frontends/dib3000.h
44698+++ b/drivers/media/dvb-frontends/dib3000.h
44699@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44700 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44701 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44702 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44703-};
44704+} __no_const;
44705
44706 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44707 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44708diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
44709index 1fea0e9..321ce8f 100644
44710--- a/drivers/media/dvb-frontends/dib7000p.h
44711+++ b/drivers/media/dvb-frontends/dib7000p.h
44712@@ -64,7 +64,7 @@ struct dib7000p_ops {
44713 int (*get_adc_power)(struct dvb_frontend *fe);
44714 int (*slave_reset)(struct dvb_frontend *fe);
44715 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
44716-};
44717+} __no_const;
44718
44719 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
44720 void *dib7000p_attach(struct dib7000p_ops *ops);
44721diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
44722index 84cc103..5780c54 100644
44723--- a/drivers/media/dvb-frontends/dib8000.h
44724+++ b/drivers/media/dvb-frontends/dib8000.h
44725@@ -61,7 +61,7 @@ struct dib8000_ops {
44726 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
44727 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
44728 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
44729-};
44730+} __no_const;
44731
44732 #if IS_ENABLED(CONFIG_DVB_DIB8000)
44733 void *dib8000_attach(struct dib8000_ops *ops);
44734diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
44735index 860c98fc..497fa25 100644
44736--- a/drivers/media/pci/cx88/cx88-video.c
44737+++ b/drivers/media/pci/cx88/cx88-video.c
44738@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
44739
44740 /* ------------------------------------------------------------------ */
44741
44742-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44743-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44744-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44745+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44746+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44747+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44748
44749 module_param_array(video_nr, int, NULL, 0444);
44750 module_param_array(vbi_nr, int, NULL, 0444);
44751diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
44752index 802642d..5534900 100644
44753--- a/drivers/media/pci/ivtv/ivtv-driver.c
44754+++ b/drivers/media/pci/ivtv/ivtv-driver.c
44755@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
44756 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
44757
44758 /* ivtv instance counter */
44759-static atomic_t ivtv_instance = ATOMIC_INIT(0);
44760+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
44761
44762 /* Parameter declarations */
44763 static int cardtype[IVTV_MAX_CARDS];
44764diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
44765index 8cbe6b4..ea3601c 100644
44766--- a/drivers/media/pci/solo6x10/solo6x10-core.c
44767+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
44768@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
44769
44770 static int solo_sysfs_init(struct solo_dev *solo_dev)
44771 {
44772- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
44773+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
44774 struct device *dev = &solo_dev->dev;
44775 const char *driver;
44776 int i;
44777diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
44778index c7141f2..5301fec 100644
44779--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
44780+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
44781@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
44782
44783 int solo_g723_init(struct solo_dev *solo_dev)
44784 {
44785- static struct snd_device_ops ops = { NULL };
44786+ static struct snd_device_ops ops = { };
44787 struct snd_card *card;
44788 struct snd_kcontrol_new kctl;
44789 char name[32];
44790diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44791index 8c84846..27b4f83 100644
44792--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
44793+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44794@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
44795
44796 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
44797 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
44798- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
44799+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
44800 if (p2m_id < 0)
44801 p2m_id = -p2m_id;
44802 }
44803diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
44804index bd8edfa..e82ed85 100644
44805--- a/drivers/media/pci/solo6x10/solo6x10.h
44806+++ b/drivers/media/pci/solo6x10/solo6x10.h
44807@@ -220,7 +220,7 @@ struct solo_dev {
44808
44809 /* P2M DMA Engine */
44810 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
44811- atomic_t p2m_count;
44812+ atomic_unchecked_t p2m_count;
44813 int p2m_jiffies;
44814 unsigned int p2m_timeouts;
44815
44816diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
44817index c135165..dc69499 100644
44818--- a/drivers/media/pci/tw68/tw68-core.c
44819+++ b/drivers/media/pci/tw68/tw68-core.c
44820@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
44821 module_param_array(card, int, NULL, 0444);
44822 MODULE_PARM_DESC(card, "card type");
44823
44824-static atomic_t tw68_instance = ATOMIC_INIT(0);
44825+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
44826
44827 /* ------------------------------------------------------------------ */
44828
44829diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
44830index ba2d8f9..1566684 100644
44831--- a/drivers/media/platform/omap/omap_vout.c
44832+++ b/drivers/media/platform/omap/omap_vout.c
44833@@ -63,7 +63,6 @@ enum omap_vout_channels {
44834 OMAP_VIDEO2,
44835 };
44836
44837-static struct videobuf_queue_ops video_vbq_ops;
44838 /* Variables configurable through module params*/
44839 static u32 video1_numbuffers = 3;
44840 static u32 video2_numbuffers = 3;
44841@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
44842 {
44843 struct videobuf_queue *q;
44844 struct omap_vout_device *vout = NULL;
44845+ static struct videobuf_queue_ops video_vbq_ops = {
44846+ .buf_setup = omap_vout_buffer_setup,
44847+ .buf_prepare = omap_vout_buffer_prepare,
44848+ .buf_release = omap_vout_buffer_release,
44849+ .buf_queue = omap_vout_buffer_queue,
44850+ };
44851
44852 vout = video_drvdata(file);
44853 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
44854@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
44855 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
44856
44857 q = &vout->vbq;
44858- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
44859- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
44860- video_vbq_ops.buf_release = omap_vout_buffer_release;
44861- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
44862 spin_lock_init(&vout->vbq_lock);
44863
44864 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
44865diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
44866index fb2acc5..a2fcbdc4 100644
44867--- a/drivers/media/platform/s5p-tv/mixer.h
44868+++ b/drivers/media/platform/s5p-tv/mixer.h
44869@@ -156,7 +156,7 @@ struct mxr_layer {
44870 /** layer index (unique identifier) */
44871 int idx;
44872 /** callbacks for layer methods */
44873- struct mxr_layer_ops ops;
44874+ struct mxr_layer_ops *ops;
44875 /** format array */
44876 const struct mxr_format **fmt_array;
44877 /** size of format array */
44878diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44879index 74344c7..a39e70e 100644
44880--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44881+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44882@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
44883 {
44884 struct mxr_layer *layer;
44885 int ret;
44886- struct mxr_layer_ops ops = {
44887+ static struct mxr_layer_ops ops = {
44888 .release = mxr_graph_layer_release,
44889 .buffer_set = mxr_graph_buffer_set,
44890 .stream_set = mxr_graph_stream_set,
44891diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
44892index b713403..53cb5ad 100644
44893--- a/drivers/media/platform/s5p-tv/mixer_reg.c
44894+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
44895@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
44896 layer->update_buf = next;
44897 }
44898
44899- layer->ops.buffer_set(layer, layer->update_buf);
44900+ layer->ops->buffer_set(layer, layer->update_buf);
44901
44902 if (done && done != layer->shadow_buf)
44903 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
44904diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
44905index b4d2696..91df48e 100644
44906--- a/drivers/media/platform/s5p-tv/mixer_video.c
44907+++ b/drivers/media/platform/s5p-tv/mixer_video.c
44908@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
44909 layer->geo.src.height = layer->geo.src.full_height;
44910
44911 mxr_geometry_dump(mdev, &layer->geo);
44912- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44913+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44914 mxr_geometry_dump(mdev, &layer->geo);
44915 }
44916
44917@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
44918 layer->geo.dst.full_width = mbus_fmt.width;
44919 layer->geo.dst.full_height = mbus_fmt.height;
44920 layer->geo.dst.field = mbus_fmt.field;
44921- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44922+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44923
44924 mxr_geometry_dump(mdev, &layer->geo);
44925 }
44926@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
44927 /* set source size to highest accepted value */
44928 geo->src.full_width = max(geo->dst.full_width, pix->width);
44929 geo->src.full_height = max(geo->dst.full_height, pix->height);
44930- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44931+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44932 mxr_geometry_dump(mdev, &layer->geo);
44933 /* set cropping to total visible screen */
44934 geo->src.width = pix->width;
44935@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
44936 geo->src.x_offset = 0;
44937 geo->src.y_offset = 0;
44938 /* assure consistency of geometry */
44939- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44940+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44941 mxr_geometry_dump(mdev, &layer->geo);
44942 /* set full size to lowest possible value */
44943 geo->src.full_width = 0;
44944 geo->src.full_height = 0;
44945- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44946+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44947 mxr_geometry_dump(mdev, &layer->geo);
44948
44949 /* returning results */
44950@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
44951 target->width = s->r.width;
44952 target->height = s->r.height;
44953
44954- layer->ops.fix_geometry(layer, stage, s->flags);
44955+ layer->ops->fix_geometry(layer, stage, s->flags);
44956
44957 /* retrieve update selection rectangle */
44958 res.left = target->x_offset;
44959@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
44960 mxr_output_get(mdev);
44961
44962 mxr_layer_update_output(layer);
44963- layer->ops.format_set(layer);
44964+ layer->ops->format_set(layer);
44965 /* enabling layer in hardware */
44966 spin_lock_irqsave(&layer->enq_slock, flags);
44967 layer->state = MXR_LAYER_STREAMING;
44968 spin_unlock_irqrestore(&layer->enq_slock, flags);
44969
44970- layer->ops.stream_set(layer, MXR_ENABLE);
44971+ layer->ops->stream_set(layer, MXR_ENABLE);
44972 mxr_streamer_get(mdev);
44973
44974 return 0;
44975@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
44976 spin_unlock_irqrestore(&layer->enq_slock, flags);
44977
44978 /* disabling layer in hardware */
44979- layer->ops.stream_set(layer, MXR_DISABLE);
44980+ layer->ops->stream_set(layer, MXR_DISABLE);
44981 /* remove one streamer */
44982 mxr_streamer_put(mdev);
44983 /* allow changes in output configuration */
44984@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
44985
44986 void mxr_layer_release(struct mxr_layer *layer)
44987 {
44988- if (layer->ops.release)
44989- layer->ops.release(layer);
44990+ if (layer->ops->release)
44991+ layer->ops->release(layer);
44992 }
44993
44994 void mxr_base_layer_release(struct mxr_layer *layer)
44995@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
44996
44997 layer->mdev = mdev;
44998 layer->idx = idx;
44999- layer->ops = *ops;
45000+ layer->ops = ops;
45001
45002 spin_lock_init(&layer->enq_slock);
45003 INIT_LIST_HEAD(&layer->enq_list);
45004diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45005index c9388c4..ce71ece 100644
45006--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45007+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45008@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
45009 {
45010 struct mxr_layer *layer;
45011 int ret;
45012- struct mxr_layer_ops ops = {
45013+ static struct mxr_layer_ops ops = {
45014 .release = mxr_vp_layer_release,
45015 .buffer_set = mxr_vp_buffer_set,
45016 .stream_set = mxr_vp_stream_set,
45017diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45018index 82affae..42833ec 100644
45019--- a/drivers/media/radio/radio-cadet.c
45020+++ b/drivers/media/radio/radio-cadet.c
45021@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45022 unsigned char readbuf[RDS_BUFFER];
45023 int i = 0;
45024
45025+ if (count > RDS_BUFFER)
45026+ return -EFAULT;
45027 mutex_lock(&dev->lock);
45028 if (dev->rdsstat == 0)
45029 cadet_start_rds(dev);
45030@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45031 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45032 mutex_unlock(&dev->lock);
45033
45034- if (i && copy_to_user(data, readbuf, i))
45035- return -EFAULT;
45036+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
45037+ i = -EFAULT;
45038+
45039 return i;
45040 }
45041
45042diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45043index 5236035..c622c74 100644
45044--- a/drivers/media/radio/radio-maxiradio.c
45045+++ b/drivers/media/radio/radio-maxiradio.c
45046@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45047 /* TEA5757 pin mappings */
45048 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45049
45050-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45051+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45052
45053 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45054 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45055diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45056index 050b3bb..79f62b9 100644
45057--- a/drivers/media/radio/radio-shark.c
45058+++ b/drivers/media/radio/radio-shark.c
45059@@ -79,7 +79,7 @@ struct shark_device {
45060 u32 last_val;
45061 };
45062
45063-static atomic_t shark_instance = ATOMIC_INIT(0);
45064+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45065
45066 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45067 {
45068diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45069index 8654e0d..0608a64 100644
45070--- a/drivers/media/radio/radio-shark2.c
45071+++ b/drivers/media/radio/radio-shark2.c
45072@@ -74,7 +74,7 @@ struct shark_device {
45073 u8 *transfer_buffer;
45074 };
45075
45076-static atomic_t shark_instance = ATOMIC_INIT(0);
45077+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45078
45079 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45080 {
45081diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45082index dccf586..d5db411 100644
45083--- a/drivers/media/radio/radio-si476x.c
45084+++ b/drivers/media/radio/radio-si476x.c
45085@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45086 struct si476x_radio *radio;
45087 struct v4l2_ctrl *ctrl;
45088
45089- static atomic_t instance = ATOMIC_INIT(0);
45090+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45091
45092 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45093 if (!radio)
45094diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
45095index 704397f..4d05977 100644
45096--- a/drivers/media/radio/wl128x/fmdrv_common.c
45097+++ b/drivers/media/radio/wl128x/fmdrv_common.c
45098@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
45099 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
45100
45101 /* Radio Nr */
45102-static u32 radio_nr = -1;
45103+static int radio_nr = -1;
45104 module_param(radio_nr, int, 0444);
45105 MODULE_PARM_DESC(radio_nr, "Radio Nr");
45106
45107diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45108index 9fd1527..8927230 100644
45109--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45110+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45111@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45112
45113 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45114 {
45115- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45116- char result[64];
45117- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45118- sizeof(result), 0);
45119+ char *buf;
45120+ char *result;
45121+ int retval;
45122+
45123+ buf = kmalloc(2, GFP_KERNEL);
45124+ if (buf == NULL)
45125+ return -ENOMEM;
45126+ result = kmalloc(64, GFP_KERNEL);
45127+ if (result == NULL) {
45128+ kfree(buf);
45129+ return -ENOMEM;
45130+ }
45131+
45132+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45133+ buf[1] = enable ? 1 : 0;
45134+
45135+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45136+
45137+ kfree(buf);
45138+ kfree(result);
45139+ return retval;
45140 }
45141
45142 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45143 {
45144- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45145- char state[3];
45146- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45147+ char *buf;
45148+ char *state;
45149+ int retval;
45150+
45151+ buf = kmalloc(2, GFP_KERNEL);
45152+ if (buf == NULL)
45153+ return -ENOMEM;
45154+ state = kmalloc(3, GFP_KERNEL);
45155+ if (state == NULL) {
45156+ kfree(buf);
45157+ return -ENOMEM;
45158+ }
45159+
45160+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45161+ buf[1] = enable ? 1 : 0;
45162+
45163+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45164+
45165+ kfree(buf);
45166+ kfree(state);
45167+ return retval;
45168 }
45169
45170 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45171 {
45172- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45173- char state[3];
45174+ char *query;
45175+ char *state;
45176 int ret;
45177+ query = kmalloc(1, GFP_KERNEL);
45178+ if (query == NULL)
45179+ return -ENOMEM;
45180+ state = kmalloc(3, GFP_KERNEL);
45181+ if (state == NULL) {
45182+ kfree(query);
45183+ return -ENOMEM;
45184+ }
45185+
45186+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45187
45188 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45189
45190- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45191- sizeof(state), 0);
45192+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45193 if (ret < 0) {
45194 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45195 "state info\n");
45196@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45197
45198 /* Copy this pointer as we are gonna need it in the release phase */
45199 cinergyt2_usb_device = adap->dev;
45200-
45201+ kfree(query);
45202+ kfree(state);
45203 return 0;
45204 }
45205
45206@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45207 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45208 {
45209 struct cinergyt2_state *st = d->priv;
45210- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45211+ u8 *key, *cmd;
45212 int i;
45213
45214+ cmd = kmalloc(1, GFP_KERNEL);
45215+ if (cmd == NULL)
45216+ return -EINVAL;
45217+ key = kzalloc(5, GFP_KERNEL);
45218+ if (key == NULL) {
45219+ kfree(cmd);
45220+ return -EINVAL;
45221+ }
45222+
45223+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45224+
45225 *state = REMOTE_NO_KEY_PRESSED;
45226
45227- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45228+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45229 if (key[4] == 0xff) {
45230 /* key repeat */
45231 st->rc_counter++;
45232@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45233 *event = d->last_event;
45234 deb_rc("repeat key, event %x\n",
45235 *event);
45236- return 0;
45237+ goto out;
45238 }
45239 }
45240 deb_rc("repeated key (non repeatable)\n");
45241 }
45242- return 0;
45243+ goto out;
45244 }
45245
45246 /* hack to pass checksum on the custom field */
45247@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45248
45249 deb_rc("key: %*ph\n", 5, key);
45250 }
45251+out:
45252+ kfree(cmd);
45253+ kfree(key);
45254 return 0;
45255 }
45256
45257diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45258index c890fe4..f9b2ae6 100644
45259--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45260+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45261@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45262 fe_status_t *status)
45263 {
45264 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45265- struct dvbt_get_status_msg result;
45266- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45267+ struct dvbt_get_status_msg *result;
45268+ u8 *cmd;
45269 int ret;
45270
45271- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45272- sizeof(result), 0);
45273+ cmd = kmalloc(1, GFP_KERNEL);
45274+ if (cmd == NULL)
45275+ return -ENOMEM;
45276+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45277+ if (result == NULL) {
45278+ kfree(cmd);
45279+ return -ENOMEM;
45280+ }
45281+
45282+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45283+
45284+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45285+ sizeof(*result), 0);
45286 if (ret < 0)
45287- return ret;
45288+ goto out;
45289
45290 *status = 0;
45291
45292- if (0xffff - le16_to_cpu(result.gain) > 30)
45293+ if (0xffff - le16_to_cpu(result->gain) > 30)
45294 *status |= FE_HAS_SIGNAL;
45295- if (result.lock_bits & (1 << 6))
45296+ if (result->lock_bits & (1 << 6))
45297 *status |= FE_HAS_LOCK;
45298- if (result.lock_bits & (1 << 5))
45299+ if (result->lock_bits & (1 << 5))
45300 *status |= FE_HAS_SYNC;
45301- if (result.lock_bits & (1 << 4))
45302+ if (result->lock_bits & (1 << 4))
45303 *status |= FE_HAS_CARRIER;
45304- if (result.lock_bits & (1 << 1))
45305+ if (result->lock_bits & (1 << 1))
45306 *status |= FE_HAS_VITERBI;
45307
45308 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45309 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45310 *status &= ~FE_HAS_LOCK;
45311
45312- return 0;
45313+out:
45314+ kfree(cmd);
45315+ kfree(result);
45316+ return ret;
45317 }
45318
45319 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45320 {
45321 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45322- struct dvbt_get_status_msg status;
45323- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45324+ struct dvbt_get_status_msg *status;
45325+ char *cmd;
45326 int ret;
45327
45328- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45329- sizeof(status), 0);
45330+ cmd = kmalloc(1, GFP_KERNEL);
45331+ if (cmd == NULL)
45332+ return -ENOMEM;
45333+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45334+ if (status == NULL) {
45335+ kfree(cmd);
45336+ return -ENOMEM;
45337+ }
45338+
45339+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45340+
45341+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45342+ sizeof(*status), 0);
45343 if (ret < 0)
45344- return ret;
45345+ goto out;
45346
45347- *ber = le32_to_cpu(status.viterbi_error_rate);
45348+ *ber = le32_to_cpu(status->viterbi_error_rate);
45349+out:
45350+ kfree(cmd);
45351+ kfree(status);
45352 return 0;
45353 }
45354
45355 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
45356 {
45357 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45358- struct dvbt_get_status_msg status;
45359- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45360+ struct dvbt_get_status_msg *status;
45361+ u8 *cmd;
45362 int ret;
45363
45364- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
45365- sizeof(status), 0);
45366+ cmd = kmalloc(1, GFP_KERNEL);
45367+ if (cmd == NULL)
45368+ return -ENOMEM;
45369+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45370+ if (status == NULL) {
45371+ kfree(cmd);
45372+ return -ENOMEM;
45373+ }
45374+
45375+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45376+
45377+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
45378+ sizeof(*status), 0);
45379 if (ret < 0) {
45380 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
45381 ret);
45382- return ret;
45383+ goto out;
45384 }
45385- *unc = le32_to_cpu(status.uncorrected_block_count);
45386- return 0;
45387+ *unc = le32_to_cpu(status->uncorrected_block_count);
45388+
45389+out:
45390+ kfree(cmd);
45391+ kfree(status);
45392+ return ret;
45393 }
45394
45395 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
45396 u16 *strength)
45397 {
45398 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45399- struct dvbt_get_status_msg status;
45400- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45401+ struct dvbt_get_status_msg *status;
45402+ char *cmd;
45403 int ret;
45404
45405- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45406- sizeof(status), 0);
45407+ cmd = kmalloc(1, GFP_KERNEL);
45408+ if (cmd == NULL)
45409+ return -ENOMEM;
45410+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45411+ if (status == NULL) {
45412+ kfree(cmd);
45413+ return -ENOMEM;
45414+ }
45415+
45416+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45417+
45418+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45419+ sizeof(*status), 0);
45420 if (ret < 0) {
45421 err("cinergyt2_fe_read_signal_strength() Failed!"
45422 " (Error=%d)\n", ret);
45423- return ret;
45424+ goto out;
45425 }
45426- *strength = (0xffff - le16_to_cpu(status.gain));
45427+ *strength = (0xffff - le16_to_cpu(status->gain));
45428+
45429+out:
45430+ kfree(cmd);
45431+ kfree(status);
45432 return 0;
45433 }
45434
45435 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
45436 {
45437 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45438- struct dvbt_get_status_msg status;
45439- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45440+ struct dvbt_get_status_msg *status;
45441+ char *cmd;
45442 int ret;
45443
45444- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45445- sizeof(status), 0);
45446+ cmd = kmalloc(1, GFP_KERNEL);
45447+ if (cmd == NULL)
45448+ return -ENOMEM;
45449+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45450+ if (status == NULL) {
45451+ kfree(cmd);
45452+ return -ENOMEM;
45453+ }
45454+
45455+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45456+
45457+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45458+ sizeof(*status), 0);
45459 if (ret < 0) {
45460 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
45461- return ret;
45462+ goto out;
45463 }
45464- *snr = (status.snr << 8) | status.snr;
45465- return 0;
45466+ *snr = (status->snr << 8) | status->snr;
45467+
45468+out:
45469+ kfree(cmd);
45470+ kfree(status);
45471+ return ret;
45472 }
45473
45474 static int cinergyt2_fe_init(struct dvb_frontend *fe)
45475@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
45476 {
45477 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
45478 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45479- struct dvbt_set_parameters_msg param;
45480- char result[2];
45481+ struct dvbt_set_parameters_msg *param;
45482+ char *result;
45483 int err;
45484
45485- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45486- param.tps = cpu_to_le16(compute_tps(fep));
45487- param.freq = cpu_to_le32(fep->frequency / 1000);
45488- param.flags = 0;
45489+ result = kmalloc(2, GFP_KERNEL);
45490+ if (result == NULL)
45491+ return -ENOMEM;
45492+ param = kmalloc(sizeof(*param), GFP_KERNEL);
45493+ if (param == NULL) {
45494+ kfree(result);
45495+ return -ENOMEM;
45496+ }
45497+
45498+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45499+ param->tps = cpu_to_le16(compute_tps(fep));
45500+ param->freq = cpu_to_le32(fep->frequency / 1000);
45501+ param->flags = 0;
45502
45503 switch (fep->bandwidth_hz) {
45504 default:
45505 case 8000000:
45506- param.bandwidth = 8;
45507+ param->bandwidth = 8;
45508 break;
45509 case 7000000:
45510- param.bandwidth = 7;
45511+ param->bandwidth = 7;
45512 break;
45513 case 6000000:
45514- param.bandwidth = 6;
45515+ param->bandwidth = 6;
45516 break;
45517 }
45518
45519 err = dvb_usb_generic_rw(state->d,
45520- (char *)&param, sizeof(param),
45521- result, sizeof(result), 0);
45522+ (char *)param, sizeof(*param),
45523+ result, 2, 0);
45524 if (err < 0)
45525 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
45526
45527- return (err < 0) ? err : 0;
45528+ kfree(result);
45529+ kfree(param);
45530+ return err;
45531 }
45532
45533 static void cinergyt2_fe_release(struct dvb_frontend *fe)
45534diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45535index 733a7ff..f8b52e3 100644
45536--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45537+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45538@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
45539
45540 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
45541 {
45542- struct hexline hx;
45543- u8 reset;
45544+ struct hexline *hx;
45545+ u8 *reset;
45546 int ret,pos=0;
45547
45548+ reset = kmalloc(1, GFP_KERNEL);
45549+ if (reset == NULL)
45550+ return -ENOMEM;
45551+
45552+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
45553+ if (hx == NULL) {
45554+ kfree(reset);
45555+ return -ENOMEM;
45556+ }
45557+
45558 /* stop the CPU */
45559- reset = 1;
45560- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
45561+ reset[0] = 1;
45562+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
45563 err("could not stop the USB controller CPU.");
45564
45565- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
45566- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
45567- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
45568+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
45569+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
45570+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
45571
45572- if (ret != hx.len) {
45573+ if (ret != hx->len) {
45574 err("error while transferring firmware "
45575 "(transferred size: %d, block size: %d)",
45576- ret,hx.len);
45577+ ret,hx->len);
45578 ret = -EINVAL;
45579 break;
45580 }
45581 }
45582 if (ret < 0) {
45583 err("firmware download failed at %d with %d",pos,ret);
45584+ kfree(reset);
45585+ kfree(hx);
45586 return ret;
45587 }
45588
45589 if (ret == 0) {
45590 /* restart the CPU */
45591- reset = 0;
45592- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
45593+ reset[0] = 0;
45594+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
45595 err("could not restart the USB controller CPU.");
45596 ret = -EINVAL;
45597 }
45598 } else
45599 ret = -EIO;
45600
45601+ kfree(reset);
45602+ kfree(hx);
45603+
45604 return ret;
45605 }
45606 EXPORT_SYMBOL(usb_cypress_load_firmware);
45607diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
45608index 1a3df10..57997a5 100644
45609--- a/drivers/media/usb/dvb-usb/dw2102.c
45610+++ b/drivers/media/usb/dvb-usb/dw2102.c
45611@@ -118,7 +118,7 @@ struct su3000_state {
45612
45613 struct s6x0_state {
45614 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
45615-};
45616+} __no_const;
45617
45618 /* debug */
45619 static int dvb_usb_dw2102_debug;
45620diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
45621index 5801ae7..83f71fa 100644
45622--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
45623+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
45624@@ -87,8 +87,11 @@ struct technisat_usb2_state {
45625 static int technisat_usb2_i2c_access(struct usb_device *udev,
45626 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
45627 {
45628- u8 b[64];
45629- int ret, actual_length;
45630+ u8 *b = kmalloc(64, GFP_KERNEL);
45631+ int ret, actual_length, error = 0;
45632+
45633+ if (b == NULL)
45634+ return -ENOMEM;
45635
45636 deb_i2c("i2c-access: %02x, tx: ", device_addr);
45637 debug_dump(tx, txlen, deb_i2c);
45638@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45639
45640 if (ret < 0) {
45641 err("i2c-error: out failed %02x = %d", device_addr, ret);
45642- return -ENODEV;
45643+ error = -ENODEV;
45644+ goto out;
45645 }
45646
45647 ret = usb_bulk_msg(udev,
45648@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45649 b, 64, &actual_length, 1000);
45650 if (ret < 0) {
45651 err("i2c-error: in failed %02x = %d", device_addr, ret);
45652- return -ENODEV;
45653+ error = -ENODEV;
45654+ goto out;
45655 }
45656
45657 if (b[0] != I2C_STATUS_OK) {
45658@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45659 /* handle tuner-i2c-nak */
45660 if (!(b[0] == I2C_STATUS_NAK &&
45661 device_addr == 0x60
45662- /* && device_is_technisat_usb2 */))
45663- return -ENODEV;
45664+ /* && device_is_technisat_usb2 */)) {
45665+ error = -ENODEV;
45666+ goto out;
45667+ }
45668 }
45669
45670 deb_i2c("status: %d, ", b[0]);
45671@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45672
45673 deb_i2c("\n");
45674
45675- return 0;
45676+out:
45677+ kfree(b);
45678+ return error;
45679 }
45680
45681 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
45682@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45683 {
45684 int ret;
45685
45686- u8 led[8] = {
45687- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45688- 0
45689- };
45690+ u8 *led = kzalloc(8, GFP_KERNEL);
45691+
45692+ if (led == NULL)
45693+ return -ENOMEM;
45694
45695 if (disable_led_control && state != TECH_LED_OFF)
45696 return 0;
45697
45698+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
45699+
45700 switch (state) {
45701 case TECH_LED_ON:
45702 led[1] = 0x82;
45703@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45704 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45705 USB_TYPE_VENDOR | USB_DIR_OUT,
45706 0, 0,
45707- led, sizeof(led), 500);
45708+ led, 8, 500);
45709
45710 mutex_unlock(&d->i2c_mutex);
45711+
45712+ kfree(led);
45713+
45714 return ret;
45715 }
45716
45717 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
45718 {
45719 int ret;
45720- u8 b = 0;
45721+ u8 *b = kzalloc(1, GFP_KERNEL);
45722+
45723+ if (b == NULL)
45724+ return -ENOMEM;
45725
45726 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
45727 return -EAGAIN;
45728@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
45729 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
45730 USB_TYPE_VENDOR | USB_DIR_OUT,
45731 (red << 8) | green, 0,
45732- &b, 1, 500);
45733+ b, 1, 500);
45734
45735 mutex_unlock(&d->i2c_mutex);
45736
45737+ kfree(b);
45738+
45739 return ret;
45740 }
45741
45742@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45743 struct dvb_usb_device_description **desc, int *cold)
45744 {
45745 int ret;
45746- u8 version[3];
45747+ u8 *version = kmalloc(3, GFP_KERNEL);
45748
45749 /* first select the interface */
45750 if (usb_set_interface(udev, 0, 1) != 0)
45751@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45752
45753 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
45754
45755+ if (version == NULL)
45756+ return 0;
45757+
45758 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
45759 GET_VERSION_INFO_VENDOR_REQUEST,
45760 USB_TYPE_VENDOR | USB_DIR_IN,
45761 0, 0,
45762- version, sizeof(version), 500);
45763+ version, 3, 500);
45764
45765 if (ret < 0)
45766 *cold = 1;
45767@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45768 *cold = 0;
45769 }
45770
45771+ kfree(version);
45772+
45773 return 0;
45774 }
45775
45776@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
45777
45778 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45779 {
45780- u8 buf[62], *b;
45781+ u8 *buf, *b;
45782 int ret;
45783 struct ir_raw_event ev;
45784
45785+ buf = kmalloc(62, GFP_KERNEL);
45786+
45787+ if (buf == NULL)
45788+ return -ENOMEM;
45789+
45790 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
45791 buf[1] = 0x08;
45792 buf[2] = 0x8f;
45793@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45794 GET_IR_DATA_VENDOR_REQUEST,
45795 USB_TYPE_VENDOR | USB_DIR_IN,
45796 0x8080, 0,
45797- buf, sizeof(buf), 500);
45798+ buf, 62, 500);
45799
45800 unlock:
45801 mutex_unlock(&d->i2c_mutex);
45802
45803- if (ret < 0)
45804+ if (ret < 0) {
45805+ kfree(buf);
45806 return ret;
45807+ }
45808
45809- if (ret == 1)
45810+ if (ret == 1) {
45811+ kfree(buf);
45812 return 0; /* no key pressed */
45813+ }
45814
45815 /* decoding */
45816 b = buf+1;
45817@@ -656,6 +689,8 @@ unlock:
45818
45819 ir_raw_event_handle(d->rc_dev);
45820
45821+ kfree(buf);
45822+
45823 return 1;
45824 }
45825
45826diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45827index af63543..0436f20 100644
45828--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45829+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45830@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45831 * by passing a very big num_planes value */
45832 uplane = compat_alloc_user_space(num_planes *
45833 sizeof(struct v4l2_plane));
45834- kp->m.planes = (__force struct v4l2_plane *)uplane;
45835+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
45836
45837 while (--num_planes >= 0) {
45838 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
45839@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45840 if (num_planes == 0)
45841 return 0;
45842
45843- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
45844+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
45845 if (get_user(p, &up->m.planes))
45846 return -EFAULT;
45847 uplane32 = compat_ptr(p);
45848@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
45849 get_user(kp->flags, &up->flags) ||
45850 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
45851 return -EFAULT;
45852- kp->base = (__force void *)compat_ptr(tmp);
45853+ kp->base = (__force_kernel void *)compat_ptr(tmp);
45854 return 0;
45855 }
45856
45857@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45858 n * sizeof(struct v4l2_ext_control32)))
45859 return -EFAULT;
45860 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
45861- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
45862+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
45863 while (--n >= 0) {
45864 u32 id;
45865
45866@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45867 {
45868 struct v4l2_ext_control32 __user *ucontrols;
45869 struct v4l2_ext_control __user *kcontrols =
45870- (__force struct v4l2_ext_control __user *)kp->controls;
45871+ (struct v4l2_ext_control __force_user *)kp->controls;
45872 int n = kp->count;
45873 compat_caddr_t p;
45874
45875@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
45876 get_user(tmp, &up->edid) ||
45877 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
45878 return -EFAULT;
45879- kp->edid = (__force u8 *)compat_ptr(tmp);
45880+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
45881 return 0;
45882 }
45883
45884diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
45885index 015f92a..59e311e 100644
45886--- a/drivers/media/v4l2-core/v4l2-device.c
45887+++ b/drivers/media/v4l2-core/v4l2-device.c
45888@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
45889 EXPORT_SYMBOL_GPL(v4l2_device_put);
45890
45891 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
45892- atomic_t *instance)
45893+ atomic_unchecked_t *instance)
45894 {
45895- int num = atomic_inc_return(instance) - 1;
45896+ int num = atomic_inc_return_unchecked(instance) - 1;
45897 int len = strlen(basename);
45898
45899 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
45900diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
45901index faac2f4..e39dcd9 100644
45902--- a/drivers/media/v4l2-core/v4l2-ioctl.c
45903+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
45904@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
45905 struct file *file, void *fh, void *p);
45906 } u;
45907 void (*debug)(const void *arg, bool write_only);
45908-};
45909+} __do_const;
45910+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
45911
45912 /* This control needs a priority check */
45913 #define INFO_FL_PRIO (1 << 0)
45914@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
45915 struct video_device *vfd = video_devdata(file);
45916 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
45917 bool write_only = false;
45918- struct v4l2_ioctl_info default_info;
45919+ v4l2_ioctl_info_no_const default_info;
45920 const struct v4l2_ioctl_info *info;
45921 void *fh = file->private_data;
45922 struct v4l2_fh *vfh = NULL;
45923@@ -2422,7 +2423,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45924 ret = -EINVAL;
45925 break;
45926 }
45927- *user_ptr = (void __user *)buf->m.planes;
45928+ *user_ptr = (void __force_user *)buf->m.planes;
45929 *kernel_ptr = (void **)&buf->m.planes;
45930 *array_size = sizeof(struct v4l2_plane) * buf->length;
45931 ret = 1;
45932@@ -2439,7 +2440,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45933 ret = -EINVAL;
45934 break;
45935 }
45936- *user_ptr = (void __user *)edid->edid;
45937+ *user_ptr = (void __force_user *)edid->edid;
45938 *kernel_ptr = (void **)&edid->edid;
45939 *array_size = edid->blocks * 128;
45940 ret = 1;
45941@@ -2457,7 +2458,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45942 ret = -EINVAL;
45943 break;
45944 }
45945- *user_ptr = (void __user *)ctrls->controls;
45946+ *user_ptr = (void __force_user *)ctrls->controls;
45947 *kernel_ptr = (void **)&ctrls->controls;
45948 *array_size = sizeof(struct v4l2_ext_control)
45949 * ctrls->count;
45950@@ -2558,7 +2559,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
45951 }
45952
45953 if (has_array_args) {
45954- *kernel_ptr = (void __force *)user_ptr;
45955+ *kernel_ptr = (void __force_kernel *)user_ptr;
45956 if (copy_to_user(user_ptr, mbuf, array_size))
45957 err = -EFAULT;
45958 goto out_array_args;
45959diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
45960index 24696f5..3637780 100644
45961--- a/drivers/memory/omap-gpmc.c
45962+++ b/drivers/memory/omap-gpmc.c
45963@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
45964 };
45965
45966 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
45967-static struct irq_chip gpmc_irq_chip;
45968 static int gpmc_irq_start;
45969
45970 static struct resource gpmc_mem_root;
45971@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
45972
45973 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
45974
45975+static struct irq_chip gpmc_irq_chip = {
45976+ .name = "gpmc",
45977+ .irq_startup = gpmc_irq_noop_ret,
45978+ .irq_enable = gpmc_irq_enable,
45979+ .irq_disable = gpmc_irq_disable,
45980+ .irq_shutdown = gpmc_irq_noop,
45981+ .irq_ack = gpmc_irq_noop,
45982+ .irq_mask = gpmc_irq_noop,
45983+ .irq_unmask = gpmc_irq_noop,
45984+};
45985+
45986 static int gpmc_setup_irq(void)
45987 {
45988 int i;
45989@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
45990 return gpmc_irq_start;
45991 }
45992
45993- gpmc_irq_chip.name = "gpmc";
45994- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
45995- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
45996- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
45997- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
45998- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
45999- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
46000- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
46001-
46002 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
46003 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
46004
46005diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
46006index 187f836..679544b 100644
46007--- a/drivers/message/fusion/mptbase.c
46008+++ b/drivers/message/fusion/mptbase.c
46009@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46010 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
46011 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
46012
46013+#ifdef CONFIG_GRKERNSEC_HIDESYM
46014+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
46015+#else
46016 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
46017 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
46018+#endif
46019+
46020 /*
46021 * Rounding UP to nearest 4-kB boundary here...
46022 */
46023@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46024 ioc->facts.GlobalCredits);
46025
46026 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
46027+#ifdef CONFIG_GRKERNSEC_HIDESYM
46028+ NULL, NULL);
46029+#else
46030 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
46031+#endif
46032 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
46033 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
46034 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
46035diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
46036index 5bdaae1..eced16f 100644
46037--- a/drivers/message/fusion/mptsas.c
46038+++ b/drivers/message/fusion/mptsas.c
46039@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
46040 return 0;
46041 }
46042
46043+static inline void
46044+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46045+{
46046+ if (phy_info->port_details) {
46047+ phy_info->port_details->rphy = rphy;
46048+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46049+ ioc->name, rphy));
46050+ }
46051+
46052+ if (rphy) {
46053+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46054+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46055+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46056+ ioc->name, rphy, rphy->dev.release));
46057+ }
46058+}
46059+
46060 /* no mutex */
46061 static void
46062 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
46063@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
46064 return NULL;
46065 }
46066
46067-static inline void
46068-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46069-{
46070- if (phy_info->port_details) {
46071- phy_info->port_details->rphy = rphy;
46072- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46073- ioc->name, rphy));
46074- }
46075-
46076- if (rphy) {
46077- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46078- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46079- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46080- ioc->name, rphy, rphy->dev.release));
46081- }
46082-}
46083-
46084 static inline struct sas_port *
46085 mptsas_get_port(struct mptsas_phyinfo *phy_info)
46086 {
46087diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
46088index b7d87cd..3fb36da 100644
46089--- a/drivers/message/i2o/i2o_proc.c
46090+++ b/drivers/message/i2o/i2o_proc.c
46091@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
46092 "Array Controller Device"
46093 };
46094
46095-static char *chtostr(char *tmp, u8 *chars, int n)
46096-{
46097- tmp[0] = 0;
46098- return strncat(tmp, (char *)chars, n);
46099-}
46100-
46101 static int i2o_report_query_status(struct seq_file *seq, int block_status,
46102 char *group)
46103 {
46104@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
46105 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
46106 {
46107 struct i2o_controller *c = (struct i2o_controller *)seq->private;
46108- static u32 work32[5];
46109- static u8 *work8 = (u8 *) work32;
46110- static u16 *work16 = (u16 *) work32;
46111+ u32 work32[5];
46112+ u8 *work8 = (u8 *) work32;
46113+ u16 *work16 = (u16 *) work32;
46114 int token;
46115 u32 hwcap;
46116
46117@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46118 } *result;
46119
46120 i2o_exec_execute_ddm_table ddm_table;
46121- char tmp[28 + 1];
46122
46123 result = kmalloc(sizeof(*result), GFP_KERNEL);
46124 if (!result)
46125@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46126
46127 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
46128 seq_printf(seq, "%-#8x", ddm_table.module_id);
46129- seq_printf(seq, "%-29s",
46130- chtostr(tmp, ddm_table.module_name_version, 28));
46131+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
46132 seq_printf(seq, "%9d ", ddm_table.data_size);
46133 seq_printf(seq, "%8d", ddm_table.code_size);
46134
46135@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46136
46137 i2o_driver_result_table *result;
46138 i2o_driver_store_table *dst;
46139- char tmp[28 + 1];
46140
46141 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
46142 if (result == NULL)
46143@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46144
46145 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
46146 seq_printf(seq, "%-#8x", dst->module_id);
46147- seq_printf(seq, "%-29s",
46148- chtostr(tmp, dst->module_name_version, 28));
46149- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
46150+ seq_printf(seq, "%-.28s", dst->module_name_version);
46151+ seq_printf(seq, "%-.8s", dst->date);
46152 seq_printf(seq, "%8d ", dst->module_size);
46153 seq_printf(seq, "%8d ", dst->mpb_size);
46154 seq_printf(seq, "0x%04x", dst->module_flags);
46155@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
46156 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46157 {
46158 struct i2o_device *d = (struct i2o_device *)seq->private;
46159- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46160+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46161 // == (allow) 512d bytes (max)
46162- static u16 *work16 = (u16 *) work32;
46163+ u16 *work16 = (u16 *) work32;
46164 int token;
46165- char tmp[16 + 1];
46166
46167 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
46168
46169@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46170 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
46171 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
46172 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
46173- seq_printf(seq, "Vendor info : %s\n",
46174- chtostr(tmp, (u8 *) (work32 + 2), 16));
46175- seq_printf(seq, "Product info : %s\n",
46176- chtostr(tmp, (u8 *) (work32 + 6), 16));
46177- seq_printf(seq, "Description : %s\n",
46178- chtostr(tmp, (u8 *) (work32 + 10), 16));
46179- seq_printf(seq, "Product rev. : %s\n",
46180- chtostr(tmp, (u8 *) (work32 + 14), 8));
46181+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
46182+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
46183+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
46184+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
46185
46186 seq_printf(seq, "Serial number : ");
46187 print_serial_number(seq, (u8 *) (work32 + 16),
46188@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46189 u8 pad[256]; // allow up to 256 byte (max) serial number
46190 } result;
46191
46192- char tmp[24 + 1];
46193-
46194 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
46195
46196 if (token < 0) {
46197@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46198 }
46199
46200 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
46201- seq_printf(seq, "Module name : %s\n",
46202- chtostr(tmp, result.module_name, 24));
46203- seq_printf(seq, "Module revision : %s\n",
46204- chtostr(tmp, result.module_rev, 8));
46205+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
46206+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
46207
46208 seq_printf(seq, "Serial number : ");
46209 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
46210@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46211 u8 instance_number[4];
46212 } result;
46213
46214- char tmp[64 + 1];
46215-
46216 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
46217
46218 if (token < 0) {
46219@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46220 return 0;
46221 }
46222
46223- seq_printf(seq, "Device name : %s\n",
46224- chtostr(tmp, result.device_name, 64));
46225- seq_printf(seq, "Service name : %s\n",
46226- chtostr(tmp, result.service_name, 64));
46227- seq_printf(seq, "Physical name : %s\n",
46228- chtostr(tmp, result.physical_location, 64));
46229- seq_printf(seq, "Instance number : %s\n",
46230- chtostr(tmp, result.instance_number, 4));
46231+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
46232+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
46233+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
46234+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
46235
46236 return 0;
46237 }
46238@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46239 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
46240 {
46241 struct i2o_device *d = (struct i2o_device *)seq->private;
46242- static u32 work32[12];
46243- static u16 *work16 = (u16 *) work32;
46244- static u8 *work8 = (u8 *) work32;
46245+ u32 work32[12];
46246+ u16 *work16 = (u16 *) work32;
46247+ u8 *work8 = (u8 *) work32;
46248 int token;
46249
46250 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
46251diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
46252index 92752fb..a7494f6 100644
46253--- a/drivers/message/i2o/iop.c
46254+++ b/drivers/message/i2o/iop.c
46255@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
46256
46257 spin_lock_irqsave(&c->context_list_lock, flags);
46258
46259- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
46260- atomic_inc(&c->context_list_counter);
46261+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
46262+ atomic_inc_unchecked(&c->context_list_counter);
46263
46264- entry->context = atomic_read(&c->context_list_counter);
46265+ entry->context = atomic_read_unchecked(&c->context_list_counter);
46266
46267 list_add(&entry->list, &c->context_list);
46268
46269@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
46270
46271 #if BITS_PER_LONG == 64
46272 spin_lock_init(&c->context_list_lock);
46273- atomic_set(&c->context_list_counter, 0);
46274+ atomic_set_unchecked(&c->context_list_counter, 0);
46275 INIT_LIST_HEAD(&c->context_list);
46276 #endif
46277
46278diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46279index 9a8e185..27ff17d 100644
46280--- a/drivers/mfd/ab8500-debugfs.c
46281+++ b/drivers/mfd/ab8500-debugfs.c
46282@@ -100,7 +100,7 @@ static int irq_last;
46283 static u32 *irq_count;
46284 static int num_irqs;
46285
46286-static struct device_attribute **dev_attr;
46287+static device_attribute_no_const **dev_attr;
46288 static char **event_name;
46289
46290 static u8 avg_sample = SAMPLE_16;
46291diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46292index c880c89..45a7c68 100644
46293--- a/drivers/mfd/max8925-i2c.c
46294+++ b/drivers/mfd/max8925-i2c.c
46295@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46296 const struct i2c_device_id *id)
46297 {
46298 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46299- static struct max8925_chip *chip;
46300+ struct max8925_chip *chip;
46301 struct device_node *node = client->dev.of_node;
46302
46303 if (node && !pdata) {
46304diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46305index 7612d89..70549c2 100644
46306--- a/drivers/mfd/tps65910.c
46307+++ b/drivers/mfd/tps65910.c
46308@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46309 struct tps65910_platform_data *pdata)
46310 {
46311 int ret = 0;
46312- static struct regmap_irq_chip *tps6591x_irqs_chip;
46313+ struct regmap_irq_chip *tps6591x_irqs_chip;
46314
46315 if (!irq) {
46316 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46317diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46318index 1b772ef..01e77d33 100644
46319--- a/drivers/mfd/twl4030-irq.c
46320+++ b/drivers/mfd/twl4030-irq.c
46321@@ -34,6 +34,7 @@
46322 #include <linux/of.h>
46323 #include <linux/irqdomain.h>
46324 #include <linux/i2c/twl.h>
46325+#include <asm/pgtable.h>
46326
46327 #include "twl-core.h"
46328
46329@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46330 * Install an irq handler for each of the SIH modules;
46331 * clone dummy irq_chip since PIH can't *do* anything
46332 */
46333- twl4030_irq_chip = dummy_irq_chip;
46334- twl4030_irq_chip.name = "twl4030";
46335+ pax_open_kernel();
46336+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46337+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46338
46339- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46340+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46341+ pax_close_kernel();
46342
46343 for (i = irq_base; i < irq_end; i++) {
46344 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46345diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46346index 464419b..64bae8d 100644
46347--- a/drivers/misc/c2port/core.c
46348+++ b/drivers/misc/c2port/core.c
46349@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46350 goto error_idr_alloc;
46351 c2dev->id = ret;
46352
46353- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46354+ pax_open_kernel();
46355+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46356+ pax_close_kernel();
46357
46358 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46359 "c2port%d", c2dev->id);
46360diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46361index 8385177..2f54635 100644
46362--- a/drivers/misc/eeprom/sunxi_sid.c
46363+++ b/drivers/misc/eeprom/sunxi_sid.c
46364@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46365
46366 platform_set_drvdata(pdev, sid_data);
46367
46368- sid_bin_attr.size = sid_data->keysize;
46369+ pax_open_kernel();
46370+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46371+ pax_close_kernel();
46372 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46373 return -ENODEV;
46374
46375diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
46376index 36f5d52..32311c3 100644
46377--- a/drivers/misc/kgdbts.c
46378+++ b/drivers/misc/kgdbts.c
46379@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
46380 char before[BREAK_INSTR_SIZE];
46381 char after[BREAK_INSTR_SIZE];
46382
46383- probe_kernel_read(before, (char *)kgdbts_break_test,
46384+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
46385 BREAK_INSTR_SIZE);
46386 init_simple_test();
46387 ts.tst = plant_and_detach_test;
46388@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
46389 /* Activate test with initial breakpoint */
46390 if (!is_early)
46391 kgdb_breakpoint();
46392- probe_kernel_read(after, (char *)kgdbts_break_test,
46393+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
46394 BREAK_INSTR_SIZE);
46395 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
46396 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
46397diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
46398index 3ef4627..8d00486 100644
46399--- a/drivers/misc/lis3lv02d/lis3lv02d.c
46400+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
46401@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
46402 * the lid is closed. This leads to interrupts as soon as a little move
46403 * is done.
46404 */
46405- atomic_inc(&lis3->count);
46406+ atomic_inc_unchecked(&lis3->count);
46407
46408 wake_up_interruptible(&lis3->misc_wait);
46409 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
46410@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
46411 if (lis3->pm_dev)
46412 pm_runtime_get_sync(lis3->pm_dev);
46413
46414- atomic_set(&lis3->count, 0);
46415+ atomic_set_unchecked(&lis3->count, 0);
46416 return 0;
46417 }
46418
46419@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
46420 add_wait_queue(&lis3->misc_wait, &wait);
46421 while (true) {
46422 set_current_state(TASK_INTERRUPTIBLE);
46423- data = atomic_xchg(&lis3->count, 0);
46424+ data = atomic_xchg_unchecked(&lis3->count, 0);
46425 if (data)
46426 break;
46427
46428@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
46429 struct lis3lv02d, miscdev);
46430
46431 poll_wait(file, &lis3->misc_wait, wait);
46432- if (atomic_read(&lis3->count))
46433+ if (atomic_read_unchecked(&lis3->count))
46434 return POLLIN | POLLRDNORM;
46435 return 0;
46436 }
46437diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
46438index c439c82..1f20f57 100644
46439--- a/drivers/misc/lis3lv02d/lis3lv02d.h
46440+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
46441@@ -297,7 +297,7 @@ struct lis3lv02d {
46442 struct input_polled_dev *idev; /* input device */
46443 struct platform_device *pdev; /* platform device */
46444 struct regulator_bulk_data regulators[2];
46445- atomic_t count; /* interrupt count after last read */
46446+ atomic_unchecked_t count; /* interrupt count after last read */
46447 union axis_conversion ac; /* hw -> logical axis */
46448 int mapped_btns[3];
46449
46450diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
46451index 2f30bad..c4c13d0 100644
46452--- a/drivers/misc/sgi-gru/gruhandles.c
46453+++ b/drivers/misc/sgi-gru/gruhandles.c
46454@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
46455 unsigned long nsec;
46456
46457 nsec = CLKS2NSEC(clks);
46458- atomic_long_inc(&mcs_op_statistics[op].count);
46459- atomic_long_add(nsec, &mcs_op_statistics[op].total);
46460+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
46461+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
46462 if (mcs_op_statistics[op].max < nsec)
46463 mcs_op_statistics[op].max = nsec;
46464 }
46465diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
46466index 4f76359..cdfcb2e 100644
46467--- a/drivers/misc/sgi-gru/gruprocfs.c
46468+++ b/drivers/misc/sgi-gru/gruprocfs.c
46469@@ -32,9 +32,9 @@
46470
46471 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
46472
46473-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
46474+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
46475 {
46476- unsigned long val = atomic_long_read(v);
46477+ unsigned long val = atomic_long_read_unchecked(v);
46478
46479 seq_printf(s, "%16lu %s\n", val, id);
46480 }
46481@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
46482
46483 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
46484 for (op = 0; op < mcsop_last; op++) {
46485- count = atomic_long_read(&mcs_op_statistics[op].count);
46486- total = atomic_long_read(&mcs_op_statistics[op].total);
46487+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
46488+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
46489 max = mcs_op_statistics[op].max;
46490 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
46491 count ? total / count : 0, max);
46492diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
46493index 5c3ce24..4915ccb 100644
46494--- a/drivers/misc/sgi-gru/grutables.h
46495+++ b/drivers/misc/sgi-gru/grutables.h
46496@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
46497 * GRU statistics.
46498 */
46499 struct gru_stats_s {
46500- atomic_long_t vdata_alloc;
46501- atomic_long_t vdata_free;
46502- atomic_long_t gts_alloc;
46503- atomic_long_t gts_free;
46504- atomic_long_t gms_alloc;
46505- atomic_long_t gms_free;
46506- atomic_long_t gts_double_allocate;
46507- atomic_long_t assign_context;
46508- atomic_long_t assign_context_failed;
46509- atomic_long_t free_context;
46510- atomic_long_t load_user_context;
46511- atomic_long_t load_kernel_context;
46512- atomic_long_t lock_kernel_context;
46513- atomic_long_t unlock_kernel_context;
46514- atomic_long_t steal_user_context;
46515- atomic_long_t steal_kernel_context;
46516- atomic_long_t steal_context_failed;
46517- atomic_long_t nopfn;
46518- atomic_long_t asid_new;
46519- atomic_long_t asid_next;
46520- atomic_long_t asid_wrap;
46521- atomic_long_t asid_reuse;
46522- atomic_long_t intr;
46523- atomic_long_t intr_cbr;
46524- atomic_long_t intr_tfh;
46525- atomic_long_t intr_spurious;
46526- atomic_long_t intr_mm_lock_failed;
46527- atomic_long_t call_os;
46528- atomic_long_t call_os_wait_queue;
46529- atomic_long_t user_flush_tlb;
46530- atomic_long_t user_unload_context;
46531- atomic_long_t user_exception;
46532- atomic_long_t set_context_option;
46533- atomic_long_t check_context_retarget_intr;
46534- atomic_long_t check_context_unload;
46535- atomic_long_t tlb_dropin;
46536- atomic_long_t tlb_preload_page;
46537- atomic_long_t tlb_dropin_fail_no_asid;
46538- atomic_long_t tlb_dropin_fail_upm;
46539- atomic_long_t tlb_dropin_fail_invalid;
46540- atomic_long_t tlb_dropin_fail_range_active;
46541- atomic_long_t tlb_dropin_fail_idle;
46542- atomic_long_t tlb_dropin_fail_fmm;
46543- atomic_long_t tlb_dropin_fail_no_exception;
46544- atomic_long_t tfh_stale_on_fault;
46545- atomic_long_t mmu_invalidate_range;
46546- atomic_long_t mmu_invalidate_page;
46547- atomic_long_t flush_tlb;
46548- atomic_long_t flush_tlb_gru;
46549- atomic_long_t flush_tlb_gru_tgh;
46550- atomic_long_t flush_tlb_gru_zero_asid;
46551+ atomic_long_unchecked_t vdata_alloc;
46552+ atomic_long_unchecked_t vdata_free;
46553+ atomic_long_unchecked_t gts_alloc;
46554+ atomic_long_unchecked_t gts_free;
46555+ atomic_long_unchecked_t gms_alloc;
46556+ atomic_long_unchecked_t gms_free;
46557+ atomic_long_unchecked_t gts_double_allocate;
46558+ atomic_long_unchecked_t assign_context;
46559+ atomic_long_unchecked_t assign_context_failed;
46560+ atomic_long_unchecked_t free_context;
46561+ atomic_long_unchecked_t load_user_context;
46562+ atomic_long_unchecked_t load_kernel_context;
46563+ atomic_long_unchecked_t lock_kernel_context;
46564+ atomic_long_unchecked_t unlock_kernel_context;
46565+ atomic_long_unchecked_t steal_user_context;
46566+ atomic_long_unchecked_t steal_kernel_context;
46567+ atomic_long_unchecked_t steal_context_failed;
46568+ atomic_long_unchecked_t nopfn;
46569+ atomic_long_unchecked_t asid_new;
46570+ atomic_long_unchecked_t asid_next;
46571+ atomic_long_unchecked_t asid_wrap;
46572+ atomic_long_unchecked_t asid_reuse;
46573+ atomic_long_unchecked_t intr;
46574+ atomic_long_unchecked_t intr_cbr;
46575+ atomic_long_unchecked_t intr_tfh;
46576+ atomic_long_unchecked_t intr_spurious;
46577+ atomic_long_unchecked_t intr_mm_lock_failed;
46578+ atomic_long_unchecked_t call_os;
46579+ atomic_long_unchecked_t call_os_wait_queue;
46580+ atomic_long_unchecked_t user_flush_tlb;
46581+ atomic_long_unchecked_t user_unload_context;
46582+ atomic_long_unchecked_t user_exception;
46583+ atomic_long_unchecked_t set_context_option;
46584+ atomic_long_unchecked_t check_context_retarget_intr;
46585+ atomic_long_unchecked_t check_context_unload;
46586+ atomic_long_unchecked_t tlb_dropin;
46587+ atomic_long_unchecked_t tlb_preload_page;
46588+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
46589+ atomic_long_unchecked_t tlb_dropin_fail_upm;
46590+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
46591+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
46592+ atomic_long_unchecked_t tlb_dropin_fail_idle;
46593+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
46594+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
46595+ atomic_long_unchecked_t tfh_stale_on_fault;
46596+ atomic_long_unchecked_t mmu_invalidate_range;
46597+ atomic_long_unchecked_t mmu_invalidate_page;
46598+ atomic_long_unchecked_t flush_tlb;
46599+ atomic_long_unchecked_t flush_tlb_gru;
46600+ atomic_long_unchecked_t flush_tlb_gru_tgh;
46601+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
46602
46603- atomic_long_t copy_gpa;
46604- atomic_long_t read_gpa;
46605+ atomic_long_unchecked_t copy_gpa;
46606+ atomic_long_unchecked_t read_gpa;
46607
46608- atomic_long_t mesq_receive;
46609- atomic_long_t mesq_receive_none;
46610- atomic_long_t mesq_send;
46611- atomic_long_t mesq_send_failed;
46612- atomic_long_t mesq_noop;
46613- atomic_long_t mesq_send_unexpected_error;
46614- atomic_long_t mesq_send_lb_overflow;
46615- atomic_long_t mesq_send_qlimit_reached;
46616- atomic_long_t mesq_send_amo_nacked;
46617- atomic_long_t mesq_send_put_nacked;
46618- atomic_long_t mesq_page_overflow;
46619- atomic_long_t mesq_qf_locked;
46620- atomic_long_t mesq_qf_noop_not_full;
46621- atomic_long_t mesq_qf_switch_head_failed;
46622- atomic_long_t mesq_qf_unexpected_error;
46623- atomic_long_t mesq_noop_unexpected_error;
46624- atomic_long_t mesq_noop_lb_overflow;
46625- atomic_long_t mesq_noop_qlimit_reached;
46626- atomic_long_t mesq_noop_amo_nacked;
46627- atomic_long_t mesq_noop_put_nacked;
46628- atomic_long_t mesq_noop_page_overflow;
46629+ atomic_long_unchecked_t mesq_receive;
46630+ atomic_long_unchecked_t mesq_receive_none;
46631+ atomic_long_unchecked_t mesq_send;
46632+ atomic_long_unchecked_t mesq_send_failed;
46633+ atomic_long_unchecked_t mesq_noop;
46634+ atomic_long_unchecked_t mesq_send_unexpected_error;
46635+ atomic_long_unchecked_t mesq_send_lb_overflow;
46636+ atomic_long_unchecked_t mesq_send_qlimit_reached;
46637+ atomic_long_unchecked_t mesq_send_amo_nacked;
46638+ atomic_long_unchecked_t mesq_send_put_nacked;
46639+ atomic_long_unchecked_t mesq_page_overflow;
46640+ atomic_long_unchecked_t mesq_qf_locked;
46641+ atomic_long_unchecked_t mesq_qf_noop_not_full;
46642+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
46643+ atomic_long_unchecked_t mesq_qf_unexpected_error;
46644+ atomic_long_unchecked_t mesq_noop_unexpected_error;
46645+ atomic_long_unchecked_t mesq_noop_lb_overflow;
46646+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
46647+ atomic_long_unchecked_t mesq_noop_amo_nacked;
46648+ atomic_long_unchecked_t mesq_noop_put_nacked;
46649+ atomic_long_unchecked_t mesq_noop_page_overflow;
46650
46651 };
46652
46653@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
46654 tghop_invalidate, mcsop_last};
46655
46656 struct mcs_op_statistic {
46657- atomic_long_t count;
46658- atomic_long_t total;
46659+ atomic_long_unchecked_t count;
46660+ atomic_long_unchecked_t total;
46661 unsigned long max;
46662 };
46663
46664@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
46665
46666 #define STAT(id) do { \
46667 if (gru_options & OPT_STATS) \
46668- atomic_long_inc(&gru_stats.id); \
46669+ atomic_long_inc_unchecked(&gru_stats.id); \
46670 } while (0)
46671
46672 #ifdef CONFIG_SGI_GRU_DEBUG
46673diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
46674index c862cd4..0d176fe 100644
46675--- a/drivers/misc/sgi-xp/xp.h
46676+++ b/drivers/misc/sgi-xp/xp.h
46677@@ -288,7 +288,7 @@ struct xpc_interface {
46678 xpc_notify_func, void *);
46679 void (*received) (short, int, void *);
46680 enum xp_retval (*partid_to_nasids) (short, void *);
46681-};
46682+} __no_const;
46683
46684 extern struct xpc_interface xpc_interface;
46685
46686diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
46687index 01be66d..e3a0c7e 100644
46688--- a/drivers/misc/sgi-xp/xp_main.c
46689+++ b/drivers/misc/sgi-xp/xp_main.c
46690@@ -78,13 +78,13 @@ xpc_notloaded(void)
46691 }
46692
46693 struct xpc_interface xpc_interface = {
46694- (void (*)(int))xpc_notloaded,
46695- (void (*)(int))xpc_notloaded,
46696- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46697- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46698+ .connect = (void (*)(int))xpc_notloaded,
46699+ .disconnect = (void (*)(int))xpc_notloaded,
46700+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46701+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46702 void *))xpc_notloaded,
46703- (void (*)(short, int, void *))xpc_notloaded,
46704- (enum xp_retval(*)(short, void *))xpc_notloaded
46705+ .received = (void (*)(short, int, void *))xpc_notloaded,
46706+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
46707 };
46708 EXPORT_SYMBOL_GPL(xpc_interface);
46709
46710diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
46711index b94d5f7..7f494c5 100644
46712--- a/drivers/misc/sgi-xp/xpc.h
46713+++ b/drivers/misc/sgi-xp/xpc.h
46714@@ -835,6 +835,7 @@ struct xpc_arch_operations {
46715 void (*received_payload) (struct xpc_channel *, void *);
46716 void (*notify_senders_of_disconnect) (struct xpc_channel *);
46717 };
46718+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
46719
46720 /* struct xpc_partition act_state values (for XPC HB) */
46721
46722@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
46723 /* found in xpc_main.c */
46724 extern struct device *xpc_part;
46725 extern struct device *xpc_chan;
46726-extern struct xpc_arch_operations xpc_arch_ops;
46727+extern xpc_arch_operations_no_const xpc_arch_ops;
46728 extern int xpc_disengage_timelimit;
46729 extern int xpc_disengage_timedout;
46730 extern int xpc_activate_IRQ_rcvd;
46731diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
46732index 82dc574..8539ab2 100644
46733--- a/drivers/misc/sgi-xp/xpc_main.c
46734+++ b/drivers/misc/sgi-xp/xpc_main.c
46735@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
46736 .notifier_call = xpc_system_die,
46737 };
46738
46739-struct xpc_arch_operations xpc_arch_ops;
46740+xpc_arch_operations_no_const xpc_arch_ops;
46741
46742 /*
46743 * Timer function to enforce the timelimit on the partition disengage.
46744@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
46745
46746 if (((die_args->trapnr == X86_TRAP_MF) ||
46747 (die_args->trapnr == X86_TRAP_XF)) &&
46748- !user_mode_vm(die_args->regs))
46749+ !user_mode(die_args->regs))
46750 xpc_die_deactivate();
46751
46752 break;
46753diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
46754index 4409d79..d7766d0 100644
46755--- a/drivers/mmc/card/block.c
46756+++ b/drivers/mmc/card/block.c
46757@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
46758 if (idata->ic.postsleep_min_us)
46759 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
46760
46761- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
46762+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
46763 err = -EFAULT;
46764 goto cmd_rel_host;
46765 }
46766diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
46767index 0d0f7a2..45b8d60 100644
46768--- a/drivers/mmc/host/dw_mmc.h
46769+++ b/drivers/mmc/host/dw_mmc.h
46770@@ -276,5 +276,5 @@ struct dw_mci_drv_data {
46771 int (*parse_dt)(struct dw_mci *host);
46772 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
46773 struct dw_mci_tuning_data *tuning_data);
46774-};
46775+} __do_const;
46776 #endif /* _DW_MMC_H_ */
46777diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
46778index 8232e9a..7776006 100644
46779--- a/drivers/mmc/host/mmci.c
46780+++ b/drivers/mmc/host/mmci.c
46781@@ -1635,7 +1635,9 @@ static int mmci_probe(struct amba_device *dev,
46782 mmc->caps |= MMC_CAP_CMD23;
46783
46784 if (variant->busy_detect) {
46785- mmci_ops.card_busy = mmci_card_busy;
46786+ pax_open_kernel();
46787+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
46788+ pax_close_kernel();
46789 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
46790 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
46791 mmc->max_busy_timeout = 0;
46792diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
46793index 7c71dcd..74cb746 100644
46794--- a/drivers/mmc/host/omap_hsmmc.c
46795+++ b/drivers/mmc/host/omap_hsmmc.c
46796@@ -2120,7 +2120,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
46797
46798 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
46799 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
46800- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46801+ pax_open_kernel();
46802+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46803+ pax_close_kernel();
46804 }
46805
46806 pm_runtime_enable(host->dev);
46807diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
46808index af1f7c0..00d368a 100644
46809--- a/drivers/mmc/host/sdhci-esdhc-imx.c
46810+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
46811@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
46812 host->mmc->caps |= MMC_CAP_1_8V_DDR;
46813 }
46814
46815- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
46816- sdhci_esdhc_ops.platform_execute_tuning =
46817+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
46818+ pax_open_kernel();
46819+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
46820 esdhc_executing_tuning;
46821+ pax_close_kernel();
46822+ }
46823
46824 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
46825 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
46826diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
46827index c45b893..fba0144 100644
46828--- a/drivers/mmc/host/sdhci-s3c.c
46829+++ b/drivers/mmc/host/sdhci-s3c.c
46830@@ -590,9 +590,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
46831 * we can use overriding functions instead of default.
46832 */
46833 if (sc->no_divider) {
46834- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46835- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46836- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46837+ pax_open_kernel();
46838+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46839+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46840+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46841+ pax_close_kernel();
46842 }
46843
46844 /* It supports additional host capabilities if needed */
46845diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
46846index 423666b..81ff5eb 100644
46847--- a/drivers/mtd/chips/cfi_cmdset_0020.c
46848+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
46849@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
46850 size_t totlen = 0, thislen;
46851 int ret = 0;
46852 size_t buflen = 0;
46853- static char *buffer;
46854+ char *buffer;
46855
46856 if (!ECCBUF_SIZE) {
46857 /* We should fall back to a general writev implementation.
46858diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
46859index b3b7ca1..5dd4634 100644
46860--- a/drivers/mtd/nand/denali.c
46861+++ b/drivers/mtd/nand/denali.c
46862@@ -24,6 +24,7 @@
46863 #include <linux/slab.h>
46864 #include <linux/mtd/mtd.h>
46865 #include <linux/module.h>
46866+#include <linux/slab.h>
46867
46868 #include "denali.h"
46869
46870diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46871index 4f3851a..f477a23 100644
46872--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46873+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46874@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
46875
46876 /* first try to map the upper buffer directly */
46877 if (virt_addr_valid(this->upper_buf) &&
46878- !object_is_on_stack(this->upper_buf)) {
46879+ !object_starts_on_stack(this->upper_buf)) {
46880 sg_init_one(sgl, this->upper_buf, this->upper_len);
46881 ret = dma_map_sg(this->dev, sgl, 1, dr);
46882 if (ret == 0)
46883diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
46884index 51b9d6a..52af9a7 100644
46885--- a/drivers/mtd/nftlmount.c
46886+++ b/drivers/mtd/nftlmount.c
46887@@ -24,6 +24,7 @@
46888 #include <asm/errno.h>
46889 #include <linux/delay.h>
46890 #include <linux/slab.h>
46891+#include <linux/sched.h>
46892 #include <linux/mtd/mtd.h>
46893 #include <linux/mtd/nand.h>
46894 #include <linux/mtd/nftl.h>
46895diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
46896index c23184a..4115c41 100644
46897--- a/drivers/mtd/sm_ftl.c
46898+++ b/drivers/mtd/sm_ftl.c
46899@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
46900 #define SM_CIS_VENDOR_OFFSET 0x59
46901 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
46902 {
46903- struct attribute_group *attr_group;
46904+ attribute_group_no_const *attr_group;
46905 struct attribute **attributes;
46906 struct sm_sysfs_attribute *vendor_attribute;
46907 char *vendor;
46908diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
46909index 7b11243..b3278a3 100644
46910--- a/drivers/net/bonding/bond_netlink.c
46911+++ b/drivers/net/bonding/bond_netlink.c
46912@@ -585,7 +585,7 @@ nla_put_failure:
46913 return -EMSGSIZE;
46914 }
46915
46916-struct rtnl_link_ops bond_link_ops __read_mostly = {
46917+struct rtnl_link_ops bond_link_ops = {
46918 .kind = "bond",
46919 .priv_size = sizeof(struct bonding),
46920 .setup = bond_setup,
46921diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
46922index b3b922a..80bba38 100644
46923--- a/drivers/net/caif/caif_hsi.c
46924+++ b/drivers/net/caif/caif_hsi.c
46925@@ -1444,7 +1444,7 @@ err:
46926 return -ENODEV;
46927 }
46928
46929-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
46930+static struct rtnl_link_ops caif_hsi_link_ops = {
46931 .kind = "cfhsi",
46932 .priv_size = sizeof(struct cfhsi),
46933 .setup = cfhsi_setup,
46934diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
46935index 98d73aa..63ef9da 100644
46936--- a/drivers/net/can/Kconfig
46937+++ b/drivers/net/can/Kconfig
46938@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
46939
46940 config CAN_FLEXCAN
46941 tristate "Support for Freescale FLEXCAN based chips"
46942- depends on ARM || PPC
46943+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
46944 ---help---
46945 Say Y here if you want to support for Freescale FlexCAN.
46946
46947diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
46948index 847c1f8..3bed607 100644
46949--- a/drivers/net/can/dev.c
46950+++ b/drivers/net/can/dev.c
46951@@ -578,6 +578,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
46952 skb->pkt_type = PACKET_BROADCAST;
46953 skb->ip_summed = CHECKSUM_UNNECESSARY;
46954
46955+ skb_reset_mac_header(skb);
46956+ skb_reset_network_header(skb);
46957+ skb_reset_transport_header(skb);
46958+
46959 can_skb_reserve(skb);
46960 can_skb_prv(skb)->ifindex = dev->ifindex;
46961
46962@@ -602,6 +606,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
46963 skb->pkt_type = PACKET_BROADCAST;
46964 skb->ip_summed = CHECKSUM_UNNECESSARY;
46965
46966+ skb_reset_mac_header(skb);
46967+ skb_reset_network_header(skb);
46968+ skb_reset_transport_header(skb);
46969+
46970 can_skb_reserve(skb);
46971 can_skb_prv(skb)->ifindex = dev->ifindex;
46972
46973@@ -950,7 +958,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
46974 return -EOPNOTSUPP;
46975 }
46976
46977-static struct rtnl_link_ops can_link_ops __read_mostly = {
46978+static struct rtnl_link_ops can_link_ops = {
46979 .kind = "can",
46980 .maxtype = IFLA_CAN_MAX,
46981 .policy = can_policy,
46982diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
46983index 674f367..ec3a31f 100644
46984--- a/drivers/net/can/vcan.c
46985+++ b/drivers/net/can/vcan.c
46986@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
46987 dev->destructor = free_netdev;
46988 }
46989
46990-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
46991+static struct rtnl_link_ops vcan_link_ops = {
46992 .kind = "vcan",
46993 .setup = vcan_setup,
46994 };
46995diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
46996index 49adbf1..fff7ff8 100644
46997--- a/drivers/net/dummy.c
46998+++ b/drivers/net/dummy.c
46999@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
47000 return 0;
47001 }
47002
47003-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
47004+static struct rtnl_link_ops dummy_link_ops = {
47005 .kind = DRV_NAME,
47006 .setup = dummy_setup,
47007 .validate = dummy_validate,
47008diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47009index 0443654..4f0aa18 100644
47010--- a/drivers/net/ethernet/8390/ax88796.c
47011+++ b/drivers/net/ethernet/8390/ax88796.c
47012@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47013 if (ax->plat->reg_offsets)
47014 ei_local->reg_offset = ax->plat->reg_offsets;
47015 else {
47016+ resource_size_t _mem_size = mem_size;
47017+ do_div(_mem_size, 0x18);
47018 ei_local->reg_offset = ax->reg_offsets;
47019 for (ret = 0; ret < 0x18; ret++)
47020- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47021+ ax->reg_offsets[ret] = _mem_size * ret;
47022 }
47023
47024 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47025diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47026index 760c72c..a99728c 100644
47027--- a/drivers/net/ethernet/altera/altera_tse_main.c
47028+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47029@@ -1217,7 +1217,7 @@ static int tse_shutdown(struct net_device *dev)
47030 return 0;
47031 }
47032
47033-static struct net_device_ops altera_tse_netdev_ops = {
47034+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47035 .ndo_open = tse_open,
47036 .ndo_stop = tse_shutdown,
47037 .ndo_start_xmit = tse_start_xmit,
47038@@ -1454,11 +1454,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47039 ndev->netdev_ops = &altera_tse_netdev_ops;
47040 altera_tse_set_ethtool_ops(ndev);
47041
47042+ pax_open_kernel();
47043 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47044
47045 if (priv->hash_filter)
47046 altera_tse_netdev_ops.ndo_set_rx_mode =
47047 tse_set_rx_mode_hashfilter;
47048+ pax_close_kernel();
47049
47050 /* Scatter/gather IO is not supported,
47051 * so it is turned off
47052diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47053index 29a0927..5a348e24 100644
47054--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47055+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47056@@ -1122,14 +1122,14 @@ do { \
47057 * operations, everything works on mask values.
47058 */
47059 #define XMDIO_READ(_pdata, _mmd, _reg) \
47060- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
47061+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
47062 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
47063
47064 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
47065 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
47066
47067 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
47068- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
47069+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
47070 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
47071
47072 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
47073diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47074index 8a50b01..39c1ad0 100644
47075--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47076+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47077@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
47078
47079 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
47080
47081- pdata->hw_if.config_dcb_tc(pdata);
47082+ pdata->hw_if->config_dcb_tc(pdata);
47083
47084 return 0;
47085 }
47086@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
47087
47088 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
47089
47090- pdata->hw_if.config_dcb_pfc(pdata);
47091+ pdata->hw_if->config_dcb_pfc(pdata);
47092
47093 return 0;
47094 }
47095diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47096index a50891f..b26fe24 100644
47097--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47098+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47099@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
47100
47101 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47102 {
47103- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47104+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47105 struct xgbe_channel *channel;
47106 struct xgbe_ring *ring;
47107 struct xgbe_ring_data *rdata;
47108@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47109
47110 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
47111 {
47112- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47113+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47114 struct xgbe_channel *channel;
47115 struct xgbe_ring *ring;
47116 struct xgbe_ring_desc *rdesc;
47117@@ -624,7 +624,7 @@ err_out:
47118 static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47119 {
47120 struct xgbe_prv_data *pdata = channel->pdata;
47121- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47122+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47123 struct xgbe_ring *ring = channel->rx_ring;
47124 struct xgbe_ring_data *rdata;
47125 int i;
47126@@ -650,17 +650,12 @@ static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47127 DBGPR("<--xgbe_realloc_rx_buffer\n");
47128 }
47129
47130-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47131-{
47132- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47133-
47134- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47135- desc_if->free_ring_resources = xgbe_free_ring_resources;
47136- desc_if->map_tx_skb = xgbe_map_tx_skb;
47137- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
47138- desc_if->unmap_rdata = xgbe_unmap_rdata;
47139- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47140- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47141-
47142- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47143-}
47144+const struct xgbe_desc_if default_xgbe_desc_if = {
47145+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47146+ .free_ring_resources = xgbe_free_ring_resources,
47147+ .map_tx_skb = xgbe_map_tx_skb,
47148+ .realloc_rx_buffer = xgbe_realloc_rx_buffer,
47149+ .unmap_rdata = xgbe_unmap_rdata,
47150+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47151+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47152+};
47153diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47154index 4c66cd1..1a20aab 100644
47155--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47156+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47157@@ -2703,7 +2703,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47158
47159 static int xgbe_init(struct xgbe_prv_data *pdata)
47160 {
47161- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47162+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47163 int ret;
47164
47165 DBGPR("-->xgbe_init\n");
47166@@ -2767,108 +2767,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47167 return 0;
47168 }
47169
47170-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47171-{
47172- DBGPR("-->xgbe_init_function_ptrs\n");
47173-
47174- hw_if->tx_complete = xgbe_tx_complete;
47175-
47176- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47177- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47178- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47179- hw_if->set_mac_address = xgbe_set_mac_address;
47180-
47181- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47182- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47183-
47184- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47185- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47186- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47187- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47188- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47189-
47190- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47191- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47192-
47193- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47194- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47195- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47196-
47197- hw_if->enable_tx = xgbe_enable_tx;
47198- hw_if->disable_tx = xgbe_disable_tx;
47199- hw_if->enable_rx = xgbe_enable_rx;
47200- hw_if->disable_rx = xgbe_disable_rx;
47201-
47202- hw_if->powerup_tx = xgbe_powerup_tx;
47203- hw_if->powerdown_tx = xgbe_powerdown_tx;
47204- hw_if->powerup_rx = xgbe_powerup_rx;
47205- hw_if->powerdown_rx = xgbe_powerdown_rx;
47206-
47207- hw_if->dev_xmit = xgbe_dev_xmit;
47208- hw_if->dev_read = xgbe_dev_read;
47209- hw_if->enable_int = xgbe_enable_int;
47210- hw_if->disable_int = xgbe_disable_int;
47211- hw_if->init = xgbe_init;
47212- hw_if->exit = xgbe_exit;
47213+const struct xgbe_hw_if default_xgbe_hw_if = {
47214+ .tx_complete = xgbe_tx_complete,
47215+
47216+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47217+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47218+ .add_mac_addresses = xgbe_add_mac_addresses,
47219+ .set_mac_address = xgbe_set_mac_address,
47220+
47221+ .enable_rx_csum = xgbe_enable_rx_csum,
47222+ .disable_rx_csum = xgbe_disable_rx_csum,
47223+
47224+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47225+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47226+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47227+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47228+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47229+
47230+ .read_mmd_regs = xgbe_read_mmd_regs,
47231+ .write_mmd_regs = xgbe_write_mmd_regs,
47232+
47233+ .set_gmii_speed = xgbe_set_gmii_speed,
47234+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47235+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47236+
47237+ .enable_tx = xgbe_enable_tx,
47238+ .disable_tx = xgbe_disable_tx,
47239+ .enable_rx = xgbe_enable_rx,
47240+ .disable_rx = xgbe_disable_rx,
47241+
47242+ .powerup_tx = xgbe_powerup_tx,
47243+ .powerdown_tx = xgbe_powerdown_tx,
47244+ .powerup_rx = xgbe_powerup_rx,
47245+ .powerdown_rx = xgbe_powerdown_rx,
47246+
47247+ .dev_xmit = xgbe_dev_xmit,
47248+ .dev_read = xgbe_dev_read,
47249+ .enable_int = xgbe_enable_int,
47250+ .disable_int = xgbe_disable_int,
47251+ .init = xgbe_init,
47252+ .exit = xgbe_exit,
47253
47254 /* Descriptor related Sequences have to be initialized here */
47255- hw_if->tx_desc_init = xgbe_tx_desc_init;
47256- hw_if->rx_desc_init = xgbe_rx_desc_init;
47257- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47258- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47259- hw_if->is_last_desc = xgbe_is_last_desc;
47260- hw_if->is_context_desc = xgbe_is_context_desc;
47261- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
47262+ .tx_desc_init = xgbe_tx_desc_init,
47263+ .rx_desc_init = xgbe_rx_desc_init,
47264+ .tx_desc_reset = xgbe_tx_desc_reset,
47265+ .rx_desc_reset = xgbe_rx_desc_reset,
47266+ .is_last_desc = xgbe_is_last_desc,
47267+ .is_context_desc = xgbe_is_context_desc,
47268+ .tx_start_xmit = xgbe_tx_start_xmit,
47269
47270 /* For FLOW ctrl */
47271- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47272- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47273+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47274+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47275
47276 /* For RX coalescing */
47277- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47278- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47279- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47280- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47281+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47282+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47283+ .usec_to_riwt = xgbe_usec_to_riwt,
47284+ .riwt_to_usec = xgbe_riwt_to_usec,
47285
47286 /* For RX and TX threshold config */
47287- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47288- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47289+ .config_rx_threshold = xgbe_config_rx_threshold,
47290+ .config_tx_threshold = xgbe_config_tx_threshold,
47291
47292 /* For RX and TX Store and Forward Mode config */
47293- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47294- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47295+ .config_rsf_mode = xgbe_config_rsf_mode,
47296+ .config_tsf_mode = xgbe_config_tsf_mode,
47297
47298 /* For TX DMA Operating on Second Frame config */
47299- hw_if->config_osp_mode = xgbe_config_osp_mode;
47300+ .config_osp_mode = xgbe_config_osp_mode,
47301
47302 /* For RX and TX PBL config */
47303- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47304- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47305- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47306- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47307- hw_if->config_pblx8 = xgbe_config_pblx8;
47308+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47309+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47310+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47311+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47312+ .config_pblx8 = xgbe_config_pblx8,
47313
47314 /* For MMC statistics support */
47315- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47316- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47317- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47318+ .tx_mmc_int = xgbe_tx_mmc_int,
47319+ .rx_mmc_int = xgbe_rx_mmc_int,
47320+ .read_mmc_stats = xgbe_read_mmc_stats,
47321
47322 /* For PTP config */
47323- hw_if->config_tstamp = xgbe_config_tstamp;
47324- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47325- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47326- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47327- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47328+ .config_tstamp = xgbe_config_tstamp,
47329+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47330+ .set_tstamp_time = xgbe_set_tstamp_time,
47331+ .get_tstamp_time = xgbe_get_tstamp_time,
47332+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47333
47334 /* For Data Center Bridging config */
47335- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47336- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47337+ .config_dcb_tc = xgbe_config_dcb_tc,
47338+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47339
47340 /* For Receive Side Scaling */
47341- hw_if->enable_rss = xgbe_enable_rss;
47342- hw_if->disable_rss = xgbe_disable_rss;
47343- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
47344- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
47345-
47346- DBGPR("<--xgbe_init_function_ptrs\n");
47347-}
47348+ .enable_rss = xgbe_enable_rss,
47349+ .disable_rss = xgbe_disable_rss,
47350+ .set_rss_hash_key = xgbe_set_rss_hash_key,
47351+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
47352+};
47353diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47354index e5ffb2c..e56d30b 100644
47355--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47356+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47357@@ -239,7 +239,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
47358 * support, tell it now
47359 */
47360 if (ring->tx.xmit_more)
47361- pdata->hw_if.tx_start_xmit(channel, ring);
47362+ pdata->hw_if->tx_start_xmit(channel, ring);
47363
47364 return NETDEV_TX_BUSY;
47365 }
47366@@ -267,7 +267,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47367
47368 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47369 {
47370- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47371+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47372 struct xgbe_channel *channel;
47373 enum xgbe_int int_id;
47374 unsigned int i;
47375@@ -289,7 +289,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47376
47377 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47378 {
47379- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47380+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47381 struct xgbe_channel *channel;
47382 enum xgbe_int int_id;
47383 unsigned int i;
47384@@ -312,7 +312,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47385 static irqreturn_t xgbe_isr(int irq, void *data)
47386 {
47387 struct xgbe_prv_data *pdata = data;
47388- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47389+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47390 struct xgbe_channel *channel;
47391 unsigned int dma_isr, dma_ch_isr;
47392 unsigned int mac_isr, mac_tssr;
47393@@ -611,7 +611,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
47394
47395 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47396 {
47397- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47398+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47399
47400 DBGPR("-->xgbe_init_tx_coalesce\n");
47401
47402@@ -625,7 +625,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47403
47404 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47405 {
47406- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47407+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47408
47409 DBGPR("-->xgbe_init_rx_coalesce\n");
47410
47411@@ -639,7 +639,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47412
47413 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47414 {
47415- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47416+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47417 struct xgbe_channel *channel;
47418 struct xgbe_ring *ring;
47419 struct xgbe_ring_data *rdata;
47420@@ -664,7 +664,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47421
47422 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47423 {
47424- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47425+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47426 struct xgbe_channel *channel;
47427 struct xgbe_ring *ring;
47428 struct xgbe_ring_data *rdata;
47429@@ -690,7 +690,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47430 static void xgbe_adjust_link(struct net_device *netdev)
47431 {
47432 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47433- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47434+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47435 struct phy_device *phydev = pdata->phydev;
47436 int new_state = 0;
47437
47438@@ -798,7 +798,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47439 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47440 {
47441 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47442- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47443+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47444 unsigned long flags;
47445
47446 DBGPR("-->xgbe_powerdown\n");
47447@@ -836,7 +836,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47448 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47449 {
47450 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47451- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47452+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47453 unsigned long flags;
47454
47455 DBGPR("-->xgbe_powerup\n");
47456@@ -873,7 +873,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47457
47458 static int xgbe_start(struct xgbe_prv_data *pdata)
47459 {
47460- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47461+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47462 struct net_device *netdev = pdata->netdev;
47463
47464 DBGPR("-->xgbe_start\n");
47465@@ -899,7 +899,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
47466
47467 static void xgbe_stop(struct xgbe_prv_data *pdata)
47468 {
47469- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47470+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47471 struct xgbe_channel *channel;
47472 struct net_device *netdev = pdata->netdev;
47473 struct netdev_queue *txq;
47474@@ -932,7 +932,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
47475 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
47476 {
47477 struct xgbe_channel *channel;
47478- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47479+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47480 unsigned int i;
47481
47482 DBGPR("-->xgbe_restart_dev\n");
47483@@ -1135,7 +1135,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
47484 return -ERANGE;
47485 }
47486
47487- pdata->hw_if.config_tstamp(pdata, mac_tscr);
47488+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
47489
47490 memcpy(&pdata->tstamp_config, &config, sizeof(config));
47491
47492@@ -1284,8 +1284,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
47493 static int xgbe_open(struct net_device *netdev)
47494 {
47495 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47496- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47497- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47498+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47499+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47500 struct xgbe_channel *channel = NULL;
47501 unsigned int i = 0;
47502 int ret;
47503@@ -1400,8 +1400,8 @@ err_phy_init:
47504 static int xgbe_close(struct net_device *netdev)
47505 {
47506 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47507- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47508- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47509+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47510+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47511 struct xgbe_channel *channel;
47512 unsigned int i;
47513
47514@@ -1442,8 +1442,8 @@ static int xgbe_close(struct net_device *netdev)
47515 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
47516 {
47517 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47518- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47519- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47520+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47521+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47522 struct xgbe_channel *channel;
47523 struct xgbe_ring *ring;
47524 struct xgbe_packet_data *packet;
47525@@ -1518,7 +1518,7 @@ tx_netdev_return:
47526 static void xgbe_set_rx_mode(struct net_device *netdev)
47527 {
47528 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47529- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47530+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47531 unsigned int pr_mode, am_mode;
47532
47533 DBGPR("-->xgbe_set_rx_mode\n");
47534@@ -1537,7 +1537,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
47535 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
47536 {
47537 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47538- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47539+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47540 struct sockaddr *saddr = addr;
47541
47542 DBGPR("-->xgbe_set_mac_address\n");
47543@@ -1604,7 +1604,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
47544
47545 DBGPR("-->%s\n", __func__);
47546
47547- pdata->hw_if.read_mmc_stats(pdata);
47548+ pdata->hw_if->read_mmc_stats(pdata);
47549
47550 s->rx_packets = pstats->rxframecount_gb;
47551 s->rx_bytes = pstats->rxoctetcount_gb;
47552@@ -1631,7 +1631,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
47553 u16 vid)
47554 {
47555 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47556- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47557+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47558
47559 DBGPR("-->%s\n", __func__);
47560
47561@@ -1647,7 +1647,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
47562 u16 vid)
47563 {
47564 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47565- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47566+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47567
47568 DBGPR("-->%s\n", __func__);
47569
47570@@ -1713,7 +1713,7 @@ static int xgbe_set_features(struct net_device *netdev,
47571 netdev_features_t features)
47572 {
47573 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47574- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47575+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47576 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
47577 int ret = 0;
47578
47579@@ -1778,7 +1778,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
47580 static void xgbe_rx_refresh(struct xgbe_channel *channel)
47581 {
47582 struct xgbe_prv_data *pdata = channel->pdata;
47583- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47584+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47585 struct xgbe_ring *ring = channel->rx_ring;
47586 struct xgbe_ring_data *rdata;
47587
47588@@ -1819,8 +1819,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
47589 static int xgbe_tx_poll(struct xgbe_channel *channel)
47590 {
47591 struct xgbe_prv_data *pdata = channel->pdata;
47592- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47593- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47594+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47595+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47596 struct xgbe_ring *ring = channel->tx_ring;
47597 struct xgbe_ring_data *rdata;
47598 struct xgbe_ring_desc *rdesc;
47599@@ -1891,7 +1891,7 @@ unlock:
47600 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
47601 {
47602 struct xgbe_prv_data *pdata = channel->pdata;
47603- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47604+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47605 struct xgbe_ring *ring = channel->rx_ring;
47606 struct xgbe_ring_data *rdata;
47607 struct xgbe_packet_data *packet;
47608diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47609index ebf4893..28108c7 100644
47610--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47611+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47612@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
47613
47614 DBGPR("-->%s\n", __func__);
47615
47616- pdata->hw_if.read_mmc_stats(pdata);
47617+ pdata->hw_if->read_mmc_stats(pdata);
47618 for (i = 0; i < XGBE_STATS_COUNT; i++) {
47619 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
47620 *data++ = *(u64 *)stat;
47621@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
47622 struct ethtool_coalesce *ec)
47623 {
47624 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47625- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47626+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47627 unsigned int riwt;
47628
47629 DBGPR("-->xgbe_get_coalesce\n");
47630@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
47631 struct ethtool_coalesce *ec)
47632 {
47633 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47634- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47635+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47636 unsigned int rx_frames, rx_riwt, rx_usecs;
47637 unsigned int tx_frames, tx_usecs;
47638
47639diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47640index dbd3850..4e31b38 100644
47641--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47642+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47643@@ -155,12 +155,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
47644 DBGPR("<--xgbe_default_config\n");
47645 }
47646
47647-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
47648-{
47649- xgbe_init_function_ptrs_dev(&pdata->hw_if);
47650- xgbe_init_function_ptrs_desc(&pdata->desc_if);
47651-}
47652-
47653 static int xgbe_probe(struct platform_device *pdev)
47654 {
47655 struct xgbe_prv_data *pdata;
47656@@ -281,9 +275,8 @@ static int xgbe_probe(struct platform_device *pdev)
47657 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
47658
47659 /* Set all the function pointers */
47660- xgbe_init_all_fptrs(pdata);
47661- hw_if = &pdata->hw_if;
47662- desc_if = &pdata->desc_if;
47663+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
47664+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
47665
47666 /* Issue software reset to device */
47667 hw_if->exit(pdata);
47668diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47669index 363b210..b241389 100644
47670--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47671+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47672@@ -126,7 +126,7 @@
47673 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
47674 {
47675 struct xgbe_prv_data *pdata = mii->priv;
47676- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47677+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47678 int mmd_data;
47679
47680 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
47681@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
47682 u16 mmd_val)
47683 {
47684 struct xgbe_prv_data *pdata = mii->priv;
47685- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47686+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47687 int mmd_data = mmd_val;
47688
47689 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
47690diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47691index a1bf9d1c..84adcab 100644
47692--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47693+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47694@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
47695 tstamp_cc);
47696 u64 nsec;
47697
47698- nsec = pdata->hw_if.get_tstamp_time(pdata);
47699+ nsec = pdata->hw_if->get_tstamp_time(pdata);
47700
47701 return nsec;
47702 }
47703@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
47704
47705 spin_lock_irqsave(&pdata->tstamp_lock, flags);
47706
47707- pdata->hw_if.update_tstamp_addend(pdata, addend);
47708+ pdata->hw_if->update_tstamp_addend(pdata, addend);
47709
47710 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
47711
47712diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
47713index f9ec762..988c969 100644
47714--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
47715+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
47716@@ -668,8 +668,8 @@ struct xgbe_prv_data {
47717 int dev_irq;
47718 unsigned int per_channel_irq;
47719
47720- struct xgbe_hw_if hw_if;
47721- struct xgbe_desc_if desc_if;
47722+ const struct xgbe_hw_if *hw_if;
47723+ const struct xgbe_desc_if *desc_if;
47724
47725 /* AXI DMA settings */
47726 unsigned int axdomain;
47727@@ -787,6 +787,9 @@ struct xgbe_prv_data {
47728 #endif
47729 };
47730
47731+extern const struct xgbe_hw_if default_xgbe_hw_if;
47732+extern const struct xgbe_desc_if default_xgbe_desc_if;
47733+
47734 /* Function prototypes*/
47735
47736 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
47737diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47738index adcacda..fa6e0ae 100644
47739--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47740+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47741@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
47742 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
47743 {
47744 /* RX_MODE controlling object */
47745- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
47746+ bnx2x_init_rx_mode_obj(bp);
47747
47748 /* multicast configuration controlling object */
47749 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
47750diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47751index 07cdf9b..b08ecc7 100644
47752--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47753+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47754@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
47755 return rc;
47756 }
47757
47758-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47759- struct bnx2x_rx_mode_obj *o)
47760+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
47761 {
47762 if (CHIP_IS_E1x(bp)) {
47763- o->wait_comp = bnx2x_empty_rx_mode_wait;
47764- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
47765+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
47766+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
47767 } else {
47768- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
47769- o->config_rx_mode = bnx2x_set_rx_mode_e2;
47770+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
47771+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
47772 }
47773 }
47774
47775diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47776index 86baecb..ff3bb46 100644
47777--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47778+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47779@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
47780
47781 /********************* RX MODE ****************/
47782
47783-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47784- struct bnx2x_rx_mode_obj *o);
47785+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
47786
47787 /**
47788 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
47789diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
47790index 31c9f82..e65e986 100644
47791--- a/drivers/net/ethernet/broadcom/tg3.h
47792+++ b/drivers/net/ethernet/broadcom/tg3.h
47793@@ -150,6 +150,7 @@
47794 #define CHIPREV_ID_5750_A0 0x4000
47795 #define CHIPREV_ID_5750_A1 0x4001
47796 #define CHIPREV_ID_5750_A3 0x4003
47797+#define CHIPREV_ID_5750_C1 0x4201
47798 #define CHIPREV_ID_5750_C2 0x4202
47799 #define CHIPREV_ID_5752_A0_HW 0x5000
47800 #define CHIPREV_ID_5752_A0 0x6000
47801diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
47802index 903466e..b285864 100644
47803--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
47804+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
47805@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
47806 }
47807
47808 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
47809- bna_cb_ioceth_enable,
47810- bna_cb_ioceth_disable,
47811- bna_cb_ioceth_hbfail,
47812- bna_cb_ioceth_reset
47813+ .enable_cbfn = bna_cb_ioceth_enable,
47814+ .disable_cbfn = bna_cb_ioceth_disable,
47815+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
47816+ .reset_cbfn = bna_cb_ioceth_reset
47817 };
47818
47819 static void bna_attr_init(struct bna_ioceth *ioceth)
47820diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47821index 8cffcdf..aadf043 100644
47822--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47823+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47824@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
47825 */
47826 struct l2t_skb_cb {
47827 arp_failure_handler_func arp_failure_handler;
47828-};
47829+} __no_const;
47830
47831 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
47832
47833diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47834index ccf3436..b720d77 100644
47835--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47836+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47837@@ -2277,7 +2277,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
47838
47839 int i;
47840 struct adapter *ap = netdev2adap(dev);
47841- static const unsigned int *reg_ranges;
47842+ const unsigned int *reg_ranges;
47843 int arr_size = 0, buf_size = 0;
47844
47845 if (is_t4(ap->params.chip)) {
47846diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
47847index badff18..e15c4ec 100644
47848--- a/drivers/net/ethernet/dec/tulip/de4x5.c
47849+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
47850@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47851 for (i=0; i<ETH_ALEN; i++) {
47852 tmp.addr[i] = dev->dev_addr[i];
47853 }
47854- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47855+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47856 break;
47857
47858 case DE4X5_SET_HWADDR: /* Set the hardware address */
47859@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47860 spin_lock_irqsave(&lp->lock, flags);
47861 memcpy(&statbuf, &lp->pktStats, ioc->len);
47862 spin_unlock_irqrestore(&lp->lock, flags);
47863- if (copy_to_user(ioc->data, &statbuf, ioc->len))
47864+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
47865 return -EFAULT;
47866 break;
47867 }
47868diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
47869index d48806b..41cd80f 100644
47870--- a/drivers/net/ethernet/emulex/benet/be_main.c
47871+++ b/drivers/net/ethernet/emulex/benet/be_main.c
47872@@ -537,7 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
47873
47874 if (wrapped)
47875 newacc += 65536;
47876- ACCESS_ONCE(*acc) = newacc;
47877+ ACCESS_ONCE_RW(*acc) = newacc;
47878 }
47879
47880 static void populate_erx_stats(struct be_adapter *adapter,
47881diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
47882index 6d0c5d5..55be363 100644
47883--- a/drivers/net/ethernet/faraday/ftgmac100.c
47884+++ b/drivers/net/ethernet/faraday/ftgmac100.c
47885@@ -30,6 +30,8 @@
47886 #include <linux/netdevice.h>
47887 #include <linux/phy.h>
47888 #include <linux/platform_device.h>
47889+#include <linux/interrupt.h>
47890+#include <linux/irqreturn.h>
47891 #include <net/ip.h>
47892
47893 #include "ftgmac100.h"
47894diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
47895index dce5f7b..2433466 100644
47896--- a/drivers/net/ethernet/faraday/ftmac100.c
47897+++ b/drivers/net/ethernet/faraday/ftmac100.c
47898@@ -31,6 +31,8 @@
47899 #include <linux/module.h>
47900 #include <linux/netdevice.h>
47901 #include <linux/platform_device.h>
47902+#include <linux/interrupt.h>
47903+#include <linux/irqreturn.h>
47904
47905 #include "ftmac100.h"
47906
47907diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47908index 6d1ec92..4d5d97d 100644
47909--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47910+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47911@@ -407,7 +407,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
47912 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
47913
47914 /* Update the base adjustement value. */
47915- ACCESS_ONCE(pf->ptp_base_adj) = incval;
47916+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
47917 smp_mb(); /* Force the above update. */
47918 }
47919
47920diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47921index 5fd4b52..87aa34b 100644
47922--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47923+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47924@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
47925 }
47926
47927 /* update the base incval used to calculate frequency adjustment */
47928- ACCESS_ONCE(adapter->base_incval) = incval;
47929+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
47930 smp_mb();
47931
47932 /* need lock to prevent incorrect read while modifying cyclecounter */
47933diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47934index e3357bf..d4d5348 100644
47935--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47936+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47937@@ -466,8 +466,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
47938 wmb();
47939
47940 /* we want to dirty this cache line once */
47941- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
47942- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
47943+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
47944+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
47945
47946 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
47947
47948diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
47949index 2bbd01f..e8baa64 100644
47950--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
47951+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
47952@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
47953 struct __vxge_hw_fifo *fifo;
47954 struct vxge_hw_fifo_config *config;
47955 u32 txdl_size, txdl_per_memblock;
47956- struct vxge_hw_mempool_cbs fifo_mp_callback;
47957+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
47958+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
47959+ };
47960+
47961 struct __vxge_hw_virtualpath *vpath;
47962
47963 if ((vp == NULL) || (attr == NULL)) {
47964@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
47965 goto exit;
47966 }
47967
47968- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
47969-
47970 fifo->mempool =
47971 __vxge_hw_mempool_create(vpath->hldev,
47972 fifo->config->memblock_size,
47973diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47974index 2bb48d5..d1a865d 100644
47975--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47976+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47977@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
47978 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
47979 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
47980 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
47981- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
47982+ pax_open_kernel();
47983+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
47984+ pax_close_kernel();
47985 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47986 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
47987 max_tx_rings = QLCNIC_MAX_TX_RINGS;
47988diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47989index be7d7a6..a8983f8 100644
47990--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47991+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47992@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
47993 case QLCNIC_NON_PRIV_FUNC:
47994 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
47995 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47996- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
47997+ pax_open_kernel();
47998+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
47999+ pax_close_kernel();
48000 break;
48001 case QLCNIC_PRIV_FUNC:
48002 ahw->op_mode = QLCNIC_PRIV_FUNC;
48003 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48004- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48005+ pax_open_kernel();
48006+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48007+ pax_close_kernel();
48008 break;
48009 case QLCNIC_MGMT_FUNC:
48010 ahw->op_mode = QLCNIC_MGMT_FUNC;
48011 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48012- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48013+ pax_open_kernel();
48014+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48015+ pax_close_kernel();
48016 break;
48017 default:
48018 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48019diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48020index c9f57fb..208bdc1 100644
48021--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48022+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48023@@ -1285,7 +1285,7 @@ flash_temp:
48024 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48025 {
48026 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48027- static const struct qlcnic_dump_operations *fw_dump_ops;
48028+ const struct qlcnic_dump_operations *fw_dump_ops;
48029 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48030 u32 entry_offset, dump, no_entries, buf_offset = 0;
48031 int i, k, ops_cnt, ops_index, dump_size = 0;
48032diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48033index 2e2cf80..ebc796d 100644
48034--- a/drivers/net/ethernet/realtek/r8169.c
48035+++ b/drivers/net/ethernet/realtek/r8169.c
48036@@ -788,22 +788,22 @@ struct rtl8169_private {
48037 struct mdio_ops {
48038 void (*write)(struct rtl8169_private *, int, int);
48039 int (*read)(struct rtl8169_private *, int);
48040- } mdio_ops;
48041+ } __no_const mdio_ops;
48042
48043 struct pll_power_ops {
48044 void (*down)(struct rtl8169_private *);
48045 void (*up)(struct rtl8169_private *);
48046- } pll_power_ops;
48047+ } __no_const pll_power_ops;
48048
48049 struct jumbo_ops {
48050 void (*enable)(struct rtl8169_private *);
48051 void (*disable)(struct rtl8169_private *);
48052- } jumbo_ops;
48053+ } __no_const jumbo_ops;
48054
48055 struct csi_ops {
48056 void (*write)(struct rtl8169_private *, int, int);
48057 u32 (*read)(struct rtl8169_private *, int);
48058- } csi_ops;
48059+ } __no_const csi_ops;
48060
48061 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
48062 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
48063diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
48064index 6b861e3..204ac86 100644
48065--- a/drivers/net/ethernet/sfc/ptp.c
48066+++ b/drivers/net/ethernet/sfc/ptp.c
48067@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
48068 ptp->start.dma_addr);
48069
48070 /* Clear flag that signals MC ready */
48071- ACCESS_ONCE(*start) = 0;
48072+ ACCESS_ONCE_RW(*start) = 0;
48073 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
48074 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
48075 EFX_BUG_ON_PARANOID(rc);
48076diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48077index 08c483b..2c4a553 100644
48078--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48079+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48080@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
48081
48082 writel(value, ioaddr + MMC_CNTRL);
48083
48084- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48085- MMC_CNTRL, value);
48086+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48087+// MMC_CNTRL, value);
48088 }
48089
48090 /* To mask all all interrupts.*/
48091diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
48092index 384ca4f..dd7d4f9 100644
48093--- a/drivers/net/hyperv/hyperv_net.h
48094+++ b/drivers/net/hyperv/hyperv_net.h
48095@@ -171,7 +171,7 @@ struct rndis_device {
48096 enum rndis_device_state state;
48097 bool link_state;
48098 bool link_change;
48099- atomic_t new_req_id;
48100+ atomic_unchecked_t new_req_id;
48101
48102 spinlock_t request_lock;
48103 struct list_head req_list;
48104diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
48105index ec0c40a..c9e42eb 100644
48106--- a/drivers/net/hyperv/rndis_filter.c
48107+++ b/drivers/net/hyperv/rndis_filter.c
48108@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
48109 * template
48110 */
48111 set = &rndis_msg->msg.set_req;
48112- set->req_id = atomic_inc_return(&dev->new_req_id);
48113+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48114
48115 /* Add to the request list */
48116 spin_lock_irqsave(&dev->request_lock, flags);
48117@@ -912,7 +912,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48118
48119 /* Setup the rndis set */
48120 halt = &request->request_msg.msg.halt_req;
48121- halt->req_id = atomic_inc_return(&dev->new_req_id);
48122+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48123
48124 /* Ignore return since this msg is optional. */
48125 rndis_filter_send_request(dev, request);
48126diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
48127index 34f846b..4a0d5b1 100644
48128--- a/drivers/net/ifb.c
48129+++ b/drivers/net/ifb.c
48130@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
48131 return 0;
48132 }
48133
48134-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
48135+static struct rtnl_link_ops ifb_link_ops = {
48136 .kind = "ifb",
48137 .priv_size = sizeof(struct ifb_private),
48138 .setup = ifb_setup,
48139diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48140index 612e073..a9f5eda 100644
48141--- a/drivers/net/macvlan.c
48142+++ b/drivers/net/macvlan.c
48143@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48144 free_nskb:
48145 kfree_skb(nskb);
48146 err:
48147- atomic_long_inc(&skb->dev->rx_dropped);
48148+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48149 }
48150
48151 static void macvlan_flush_sources(struct macvlan_port *port,
48152@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48153 int macvlan_link_register(struct rtnl_link_ops *ops)
48154 {
48155 /* common fields */
48156- ops->priv_size = sizeof(struct macvlan_dev);
48157- ops->validate = macvlan_validate;
48158- ops->maxtype = IFLA_MACVLAN_MAX;
48159- ops->policy = macvlan_policy;
48160- ops->changelink = macvlan_changelink;
48161- ops->get_size = macvlan_get_size;
48162- ops->fill_info = macvlan_fill_info;
48163+ pax_open_kernel();
48164+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48165+ *(void **)&ops->validate = macvlan_validate;
48166+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48167+ *(const void **)&ops->policy = macvlan_policy;
48168+ *(void **)&ops->changelink = macvlan_changelink;
48169+ *(void **)&ops->get_size = macvlan_get_size;
48170+ *(void **)&ops->fill_info = macvlan_fill_info;
48171+ pax_close_kernel();
48172
48173 return rtnl_link_register(ops);
48174 };
48175@@ -1545,7 +1547,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48176 return NOTIFY_DONE;
48177 }
48178
48179-static struct notifier_block macvlan_notifier_block __read_mostly = {
48180+static struct notifier_block macvlan_notifier_block = {
48181 .notifier_call = macvlan_device_event,
48182 };
48183
48184diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48185index 4d050ee..012f6dd 100644
48186--- a/drivers/net/macvtap.c
48187+++ b/drivers/net/macvtap.c
48188@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
48189 dev->tx_queue_len = TUN_READQ_SIZE;
48190 }
48191
48192-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
48193+static struct rtnl_link_ops macvtap_link_ops = {
48194 .kind = "macvtap",
48195 .setup = macvtap_setup,
48196 .newlink = macvtap_newlink,
48197@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48198
48199 ret = 0;
48200 u = q->flags;
48201- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48202+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48203 put_user(u, &ifr->ifr_flags))
48204 ret = -EFAULT;
48205 macvtap_put_vlan(vlan);
48206@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48207 return NOTIFY_DONE;
48208 }
48209
48210-static struct notifier_block macvtap_notifier_block __read_mostly = {
48211+static struct notifier_block macvtap_notifier_block = {
48212 .notifier_call = macvtap_device_event,
48213 };
48214
48215diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
48216index 34924df..a747360 100644
48217--- a/drivers/net/nlmon.c
48218+++ b/drivers/net/nlmon.c
48219@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
48220 return 0;
48221 }
48222
48223-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
48224+static struct rtnl_link_ops nlmon_link_ops = {
48225 .kind = "nlmon",
48226 .priv_size = sizeof(struct nlmon),
48227 .setup = nlmon_setup,
48228diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
48229index 3fc91e8..6c36337 100644
48230--- a/drivers/net/phy/phy_device.c
48231+++ b/drivers/net/phy/phy_device.c
48232@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
48233 * zero on success.
48234 *
48235 */
48236-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48237+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
48238 struct phy_c45_device_ids *c45_ids) {
48239 int phy_reg;
48240 int i, reg_addr;
48241@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48242 * its return value is in turn returned.
48243 *
48244 */
48245-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48246+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
48247 bool is_c45, struct phy_c45_device_ids *c45_ids)
48248 {
48249 int phy_reg;
48250@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48251 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
48252 {
48253 struct phy_c45_device_ids c45_ids = {0};
48254- u32 phy_id = 0;
48255+ int phy_id = 0;
48256 int r;
48257
48258 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
48259diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48260index af034db..1611c0b2 100644
48261--- a/drivers/net/ppp/ppp_generic.c
48262+++ b/drivers/net/ppp/ppp_generic.c
48263@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48264 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48265 struct ppp_stats stats;
48266 struct ppp_comp_stats cstats;
48267- char *vers;
48268
48269 switch (cmd) {
48270 case SIOCGPPPSTATS:
48271@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48272 break;
48273
48274 case SIOCGPPPVER:
48275- vers = PPP_VERSION;
48276- if (copy_to_user(addr, vers, strlen(vers) + 1))
48277+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48278 break;
48279 err = 0;
48280 break;
48281diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48282index 079f7ad..b2a2bfa7 100644
48283--- a/drivers/net/slip/slhc.c
48284+++ b/drivers/net/slip/slhc.c
48285@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48286 register struct tcphdr *thp;
48287 register struct iphdr *ip;
48288 register struct cstate *cs;
48289- int len, hdrlen;
48290+ long len, hdrlen;
48291 unsigned char *cp = icp;
48292
48293 /* We've got a compressed packet; read the change byte */
48294diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48295index 2c087ef..4859007 100644
48296--- a/drivers/net/team/team.c
48297+++ b/drivers/net/team/team.c
48298@@ -2103,7 +2103,7 @@ static unsigned int team_get_num_rx_queues(void)
48299 return TEAM_DEFAULT_NUM_RX_QUEUES;
48300 }
48301
48302-static struct rtnl_link_ops team_link_ops __read_mostly = {
48303+static struct rtnl_link_ops team_link_ops = {
48304 .kind = DRV_NAME,
48305 .priv_size = sizeof(struct team),
48306 .setup = team_setup,
48307@@ -2893,7 +2893,7 @@ static int team_device_event(struct notifier_block *unused,
48308 return NOTIFY_DONE;
48309 }
48310
48311-static struct notifier_block team_notifier_block __read_mostly = {
48312+static struct notifier_block team_notifier_block = {
48313 .notifier_call = team_device_event,
48314 };
48315
48316diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48317index 10f9e40..3515e7e 100644
48318--- a/drivers/net/tun.c
48319+++ b/drivers/net/tun.c
48320@@ -1425,7 +1425,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
48321 return -EINVAL;
48322 }
48323
48324-static struct rtnl_link_ops tun_link_ops __read_mostly = {
48325+static struct rtnl_link_ops tun_link_ops = {
48326 .kind = DRV_NAME,
48327 .priv_size = sizeof(struct tun_struct),
48328 .setup = tun_setup,
48329@@ -1827,7 +1827,7 @@ unlock:
48330 }
48331
48332 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48333- unsigned long arg, int ifreq_len)
48334+ unsigned long arg, size_t ifreq_len)
48335 {
48336 struct tun_file *tfile = file->private_data;
48337 struct tun_struct *tun;
48338@@ -1841,6 +1841,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48339 int le;
48340 int ret;
48341
48342+ if (ifreq_len > sizeof ifr)
48343+ return -EFAULT;
48344+
48345 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48346 if (copy_from_user(&ifr, argp, ifreq_len))
48347 return -EFAULT;
48348diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48349index 9c5aa92..8cd0405 100644
48350--- a/drivers/net/usb/hso.c
48351+++ b/drivers/net/usb/hso.c
48352@@ -71,7 +71,7 @@
48353 #include <asm/byteorder.h>
48354 #include <linux/serial_core.h>
48355 #include <linux/serial.h>
48356-
48357+#include <asm/local.h>
48358
48359 #define MOD_AUTHOR "Option Wireless"
48360 #define MOD_DESCRIPTION "USB High Speed Option driver"
48361@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48362 struct urb *urb;
48363
48364 urb = serial->rx_urb[0];
48365- if (serial->port.count > 0) {
48366+ if (atomic_read(&serial->port.count) > 0) {
48367 count = put_rxbuf_data(urb, serial);
48368 if (count == -1)
48369 return;
48370@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48371 DUMP1(urb->transfer_buffer, urb->actual_length);
48372
48373 /* Anyone listening? */
48374- if (serial->port.count == 0)
48375+ if (atomic_read(&serial->port.count) == 0)
48376 return;
48377
48378 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48379@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48380 tty_port_tty_set(&serial->port, tty);
48381
48382 /* check for port already opened, if not set the termios */
48383- serial->port.count++;
48384- if (serial->port.count == 1) {
48385+ if (atomic_inc_return(&serial->port.count) == 1) {
48386 serial->rx_state = RX_IDLE;
48387 /* Force default termio settings */
48388 _hso_serial_set_termios(tty, NULL);
48389@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48390 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48391 if (result) {
48392 hso_stop_serial_device(serial->parent);
48393- serial->port.count--;
48394+ atomic_dec(&serial->port.count);
48395 kref_put(&serial->parent->ref, hso_serial_ref_free);
48396 }
48397 } else {
48398@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48399
48400 /* reset the rts and dtr */
48401 /* do the actual close */
48402- serial->port.count--;
48403+ atomic_dec(&serial->port.count);
48404
48405- if (serial->port.count <= 0) {
48406- serial->port.count = 0;
48407+ if (atomic_read(&serial->port.count) <= 0) {
48408+ atomic_set(&serial->port.count, 0);
48409 tty_port_tty_set(&serial->port, NULL);
48410 if (!usb_gone)
48411 hso_stop_serial_device(serial->parent);
48412@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48413
48414 /* the actual setup */
48415 spin_lock_irqsave(&serial->serial_lock, flags);
48416- if (serial->port.count)
48417+ if (atomic_read(&serial->port.count))
48418 _hso_serial_set_termios(tty, old);
48419 else
48420 tty->termios = *old;
48421@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
48422 D1("Pending read interrupt on port %d\n", i);
48423 spin_lock(&serial->serial_lock);
48424 if (serial->rx_state == RX_IDLE &&
48425- serial->port.count > 0) {
48426+ atomic_read(&serial->port.count) > 0) {
48427 /* Setup and send a ctrl req read on
48428 * port i */
48429 if (!serial->rx_urb_filled[0]) {
48430@@ -3046,7 +3045,7 @@ static int hso_resume(struct usb_interface *iface)
48431 /* Start all serial ports */
48432 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48433 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48434- if (dev2ser(serial_table[i])->port.count) {
48435+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48436 result =
48437 hso_start_serial_device(serial_table[i], GFP_NOIO);
48438 hso_kick_transmit(dev2ser(serial_table[i]));
48439diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48440index bf405f1..fd847ee 100644
48441--- a/drivers/net/usb/r8152.c
48442+++ b/drivers/net/usb/r8152.c
48443@@ -571,7 +571,7 @@ struct r8152 {
48444 void (*unload)(struct r8152 *);
48445 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
48446 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
48447- } rtl_ops;
48448+ } __no_const rtl_ops;
48449
48450 int intr_interval;
48451 u32 saved_wolopts;
48452diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48453index a2515887..6d13233 100644
48454--- a/drivers/net/usb/sierra_net.c
48455+++ b/drivers/net/usb/sierra_net.c
48456@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48457 /* atomic counter partially included in MAC address to make sure 2 devices
48458 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48459 */
48460-static atomic_t iface_counter = ATOMIC_INIT(0);
48461+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48462
48463 /*
48464 * SYNC Timer Delay definition used to set the expiry time
48465@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48466 dev->net->netdev_ops = &sierra_net_device_ops;
48467
48468 /* change MAC addr to include, ifacenum, and to be unique */
48469- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48470+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48471 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48472
48473 /* we will have to manufacture ethernet headers, prepare template */
48474diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48475index 059fdf1..7543217 100644
48476--- a/drivers/net/virtio_net.c
48477+++ b/drivers/net/virtio_net.c
48478@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48479 #define RECEIVE_AVG_WEIGHT 64
48480
48481 /* Minimum alignment for mergeable packet buffers. */
48482-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48483+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48484
48485 #define VIRTNET_DRIVER_VERSION "1.0.0"
48486
48487diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48488index a8c755d..a988b71 100644
48489--- a/drivers/net/vxlan.c
48490+++ b/drivers/net/vxlan.c
48491@@ -2702,7 +2702,7 @@ nla_put_failure:
48492 return -EMSGSIZE;
48493 }
48494
48495-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48496+static struct rtnl_link_ops vxlan_link_ops = {
48497 .kind = "vxlan",
48498 .maxtype = IFLA_VXLAN_MAX,
48499 .policy = vxlan_policy,
48500@@ -2749,7 +2749,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
48501 return NOTIFY_DONE;
48502 }
48503
48504-static struct notifier_block vxlan_notifier_block __read_mostly = {
48505+static struct notifier_block vxlan_notifier_block = {
48506 .notifier_call = vxlan_lowerdev_event,
48507 };
48508
48509diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
48510index 5920c99..ff2e4a5 100644
48511--- a/drivers/net/wan/lmc/lmc_media.c
48512+++ b/drivers/net/wan/lmc/lmc_media.c
48513@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
48514 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
48515
48516 lmc_media_t lmc_ds3_media = {
48517- lmc_ds3_init, /* special media init stuff */
48518- lmc_ds3_default, /* reset to default state */
48519- lmc_ds3_set_status, /* reset status to state provided */
48520- lmc_dummy_set_1, /* set clock source */
48521- lmc_dummy_set2_1, /* set line speed */
48522- lmc_ds3_set_100ft, /* set cable length */
48523- lmc_ds3_set_scram, /* set scrambler */
48524- lmc_ds3_get_link_status, /* get link status */
48525- lmc_dummy_set_1, /* set link status */
48526- lmc_ds3_set_crc_length, /* set CRC length */
48527- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48528- lmc_ds3_watchdog
48529+ .init = lmc_ds3_init, /* special media init stuff */
48530+ .defaults = lmc_ds3_default, /* reset to default state */
48531+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
48532+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
48533+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48534+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
48535+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
48536+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
48537+ .set_link_status = lmc_dummy_set_1, /* set link status */
48538+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
48539+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48540+ .watchdog = lmc_ds3_watchdog
48541 };
48542
48543 lmc_media_t lmc_hssi_media = {
48544- lmc_hssi_init, /* special media init stuff */
48545- lmc_hssi_default, /* reset to default state */
48546- lmc_hssi_set_status, /* reset status to state provided */
48547- lmc_hssi_set_clock, /* set clock source */
48548- lmc_dummy_set2_1, /* set line speed */
48549- lmc_dummy_set_1, /* set cable length */
48550- lmc_dummy_set_1, /* set scrambler */
48551- lmc_hssi_get_link_status, /* get link status */
48552- lmc_hssi_set_link_status, /* set link status */
48553- lmc_hssi_set_crc_length, /* set CRC length */
48554- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48555- lmc_hssi_watchdog
48556+ .init = lmc_hssi_init, /* special media init stuff */
48557+ .defaults = lmc_hssi_default, /* reset to default state */
48558+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
48559+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
48560+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48561+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48562+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48563+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
48564+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
48565+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
48566+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48567+ .watchdog = lmc_hssi_watchdog
48568 };
48569
48570-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
48571- lmc_ssi_default, /* reset to default state */
48572- lmc_ssi_set_status, /* reset status to state provided */
48573- lmc_ssi_set_clock, /* set clock source */
48574- lmc_ssi_set_speed, /* set line speed */
48575- lmc_dummy_set_1, /* set cable length */
48576- lmc_dummy_set_1, /* set scrambler */
48577- lmc_ssi_get_link_status, /* get link status */
48578- lmc_ssi_set_link_status, /* set link status */
48579- lmc_ssi_set_crc_length, /* set CRC length */
48580- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48581- lmc_ssi_watchdog
48582+lmc_media_t lmc_ssi_media = {
48583+ .init = lmc_ssi_init, /* special media init stuff */
48584+ .defaults = lmc_ssi_default, /* reset to default state */
48585+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
48586+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
48587+ .set_speed = lmc_ssi_set_speed, /* set line speed */
48588+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48589+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48590+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
48591+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
48592+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
48593+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48594+ .watchdog = lmc_ssi_watchdog
48595 };
48596
48597 lmc_media_t lmc_t1_media = {
48598- lmc_t1_init, /* special media init stuff */
48599- lmc_t1_default, /* reset to default state */
48600- lmc_t1_set_status, /* reset status to state provided */
48601- lmc_t1_set_clock, /* set clock source */
48602- lmc_dummy_set2_1, /* set line speed */
48603- lmc_dummy_set_1, /* set cable length */
48604- lmc_dummy_set_1, /* set scrambler */
48605- lmc_t1_get_link_status, /* get link status */
48606- lmc_dummy_set_1, /* set link status */
48607- lmc_t1_set_crc_length, /* set CRC length */
48608- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48609- lmc_t1_watchdog
48610+ .init = lmc_t1_init, /* special media init stuff */
48611+ .defaults = lmc_t1_default, /* reset to default state */
48612+ .set_status = lmc_t1_set_status, /* reset status to state provided */
48613+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
48614+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48615+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48616+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48617+ .get_link_status = lmc_t1_get_link_status, /* get link status */
48618+ .set_link_status = lmc_dummy_set_1, /* set link status */
48619+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
48620+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48621+ .watchdog = lmc_t1_watchdog
48622 };
48623
48624 static void
48625diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
48626index feacc3b..5bac0de 100644
48627--- a/drivers/net/wan/z85230.c
48628+++ b/drivers/net/wan/z85230.c
48629@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
48630
48631 struct z8530_irqhandler z8530_sync =
48632 {
48633- z8530_rx,
48634- z8530_tx,
48635- z8530_status
48636+ .rx = z8530_rx,
48637+ .tx = z8530_tx,
48638+ .status = z8530_status
48639 };
48640
48641 EXPORT_SYMBOL(z8530_sync);
48642@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
48643 }
48644
48645 static struct z8530_irqhandler z8530_dma_sync = {
48646- z8530_dma_rx,
48647- z8530_dma_tx,
48648- z8530_dma_status
48649+ .rx = z8530_dma_rx,
48650+ .tx = z8530_dma_tx,
48651+ .status = z8530_dma_status
48652 };
48653
48654 static struct z8530_irqhandler z8530_txdma_sync = {
48655- z8530_rx,
48656- z8530_dma_tx,
48657- z8530_dma_status
48658+ .rx = z8530_rx,
48659+ .tx = z8530_dma_tx,
48660+ .status = z8530_dma_status
48661 };
48662
48663 /**
48664@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
48665
48666 struct z8530_irqhandler z8530_nop=
48667 {
48668- z8530_rx_clear,
48669- z8530_tx_clear,
48670- z8530_status_clear
48671+ .rx = z8530_rx_clear,
48672+ .tx = z8530_tx_clear,
48673+ .status = z8530_status_clear
48674 };
48675
48676
48677diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
48678index 0b60295..b8bfa5b 100644
48679--- a/drivers/net/wimax/i2400m/rx.c
48680+++ b/drivers/net/wimax/i2400m/rx.c
48681@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
48682 if (i2400m->rx_roq == NULL)
48683 goto error_roq_alloc;
48684
48685- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
48686+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
48687 GFP_KERNEL);
48688 if (rd == NULL) {
48689 result = -ENOMEM;
48690diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
48691index e71a2ce..2268d61 100644
48692--- a/drivers/net/wireless/airo.c
48693+++ b/drivers/net/wireless/airo.c
48694@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
48695 struct airo_info *ai = dev->ml_priv;
48696 int ridcode;
48697 int enabled;
48698- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48699+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48700 unsigned char *iobuf;
48701
48702 /* Only super-user can write RIDs */
48703diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
48704index da92bfa..5a9001a 100644
48705--- a/drivers/net/wireless/at76c50x-usb.c
48706+++ b/drivers/net/wireless/at76c50x-usb.c
48707@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
48708 }
48709
48710 /* Convert timeout from the DFU status to jiffies */
48711-static inline unsigned long at76_get_timeout(struct dfu_status *s)
48712+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
48713 {
48714 return msecs_to_jiffies((s->poll_timeout[2] << 16)
48715 | (s->poll_timeout[1] << 8)
48716diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
48717index f1946a6..cd367fb 100644
48718--- a/drivers/net/wireless/ath/ath10k/htc.c
48719+++ b/drivers/net/wireless/ath/ath10k/htc.c
48720@@ -851,7 +851,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
48721 /* registered target arrival callback from the HIF layer */
48722 int ath10k_htc_init(struct ath10k *ar)
48723 {
48724- struct ath10k_hif_cb htc_callbacks;
48725+ static struct ath10k_hif_cb htc_callbacks = {
48726+ .rx_completion = ath10k_htc_rx_completion_handler,
48727+ .tx_completion = ath10k_htc_tx_completion_handler,
48728+ };
48729 struct ath10k_htc_ep *ep = NULL;
48730 struct ath10k_htc *htc = &ar->htc;
48731
48732@@ -860,8 +863,6 @@ int ath10k_htc_init(struct ath10k *ar)
48733 ath10k_htc_reset_endpoint_states(htc);
48734
48735 /* setup HIF layer callbacks */
48736- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
48737- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
48738 htc->ar = ar;
48739
48740 /* Get HIF default pipe for HTC message exchange */
48741diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
48742index 527179c..a890150 100644
48743--- a/drivers/net/wireless/ath/ath10k/htc.h
48744+++ b/drivers/net/wireless/ath/ath10k/htc.h
48745@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
48746
48747 struct ath10k_htc_ops {
48748 void (*target_send_suspend_complete)(struct ath10k *ar);
48749-};
48750+} __no_const;
48751
48752 struct ath10k_htc_ep_ops {
48753 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
48754 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
48755 void (*ep_tx_credits)(struct ath10k *);
48756-};
48757+} __no_const;
48758
48759 /* service connection information */
48760 struct ath10k_htc_svc_conn_req {
48761diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48762index f816909..e56cd8b 100644
48763--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48764+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48765@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48766 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
48767 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
48768
48769- ACCESS_ONCE(ads->ds_link) = i->link;
48770- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
48771+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
48772+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
48773
48774 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
48775 ctl6 = SM(i->keytype, AR_EncrType);
48776@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48777
48778 if ((i->is_first || i->is_last) &&
48779 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
48780- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
48781+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
48782 | set11nTries(i->rates, 1)
48783 | set11nTries(i->rates, 2)
48784 | set11nTries(i->rates, 3)
48785 | (i->dur_update ? AR_DurUpdateEna : 0)
48786 | SM(0, AR_BurstDur);
48787
48788- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
48789+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
48790 | set11nRate(i->rates, 1)
48791 | set11nRate(i->rates, 2)
48792 | set11nRate(i->rates, 3);
48793 } else {
48794- ACCESS_ONCE(ads->ds_ctl2) = 0;
48795- ACCESS_ONCE(ads->ds_ctl3) = 0;
48796+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
48797+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
48798 }
48799
48800 if (!i->is_first) {
48801- ACCESS_ONCE(ads->ds_ctl0) = 0;
48802- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48803- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48804+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
48805+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48806+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48807 return;
48808 }
48809
48810@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48811 break;
48812 }
48813
48814- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48815+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48816 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48817 | SM(i->txpower[0], AR_XmitPower0)
48818 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48819@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48820 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
48821 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
48822
48823- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48824- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48825+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48826+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48827
48828 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
48829 return;
48830
48831- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48832+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48833 | set11nPktDurRTSCTS(i->rates, 1);
48834
48835- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48836+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48837 | set11nPktDurRTSCTS(i->rates, 3);
48838
48839- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48840+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48841 | set11nRateFlags(i->rates, 1)
48842 | set11nRateFlags(i->rates, 2)
48843 | set11nRateFlags(i->rates, 3)
48844 | SM(i->rtscts_rate, AR_RTSCTSRate);
48845
48846- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48847- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48848- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48849+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48850+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48851+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48852 }
48853
48854 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
48855diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48856index da84b70..83e4978 100644
48857--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48858+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48859@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48860 (i->qcu << AR_TxQcuNum_S) | desc_len;
48861
48862 checksum += val;
48863- ACCESS_ONCE(ads->info) = val;
48864+ ACCESS_ONCE_RW(ads->info) = val;
48865
48866 checksum += i->link;
48867- ACCESS_ONCE(ads->link) = i->link;
48868+ ACCESS_ONCE_RW(ads->link) = i->link;
48869
48870 checksum += i->buf_addr[0];
48871- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
48872+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
48873 checksum += i->buf_addr[1];
48874- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
48875+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
48876 checksum += i->buf_addr[2];
48877- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
48878+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
48879 checksum += i->buf_addr[3];
48880- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
48881+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
48882
48883 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
48884- ACCESS_ONCE(ads->ctl3) = val;
48885+ ACCESS_ONCE_RW(ads->ctl3) = val;
48886 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
48887- ACCESS_ONCE(ads->ctl5) = val;
48888+ ACCESS_ONCE_RW(ads->ctl5) = val;
48889 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
48890- ACCESS_ONCE(ads->ctl7) = val;
48891+ ACCESS_ONCE_RW(ads->ctl7) = val;
48892 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
48893- ACCESS_ONCE(ads->ctl9) = val;
48894+ ACCESS_ONCE_RW(ads->ctl9) = val;
48895
48896 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
48897- ACCESS_ONCE(ads->ctl10) = checksum;
48898+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
48899
48900 if (i->is_first || i->is_last) {
48901- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
48902+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
48903 | set11nTries(i->rates, 1)
48904 | set11nTries(i->rates, 2)
48905 | set11nTries(i->rates, 3)
48906 | (i->dur_update ? AR_DurUpdateEna : 0)
48907 | SM(0, AR_BurstDur);
48908
48909- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
48910+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
48911 | set11nRate(i->rates, 1)
48912 | set11nRate(i->rates, 2)
48913 | set11nRate(i->rates, 3);
48914 } else {
48915- ACCESS_ONCE(ads->ctl13) = 0;
48916- ACCESS_ONCE(ads->ctl14) = 0;
48917+ ACCESS_ONCE_RW(ads->ctl13) = 0;
48918+ ACCESS_ONCE_RW(ads->ctl14) = 0;
48919 }
48920
48921 ads->ctl20 = 0;
48922@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48923
48924 ctl17 = SM(i->keytype, AR_EncrType);
48925 if (!i->is_first) {
48926- ACCESS_ONCE(ads->ctl11) = 0;
48927- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
48928- ACCESS_ONCE(ads->ctl15) = 0;
48929- ACCESS_ONCE(ads->ctl16) = 0;
48930- ACCESS_ONCE(ads->ctl17) = ctl17;
48931- ACCESS_ONCE(ads->ctl18) = 0;
48932- ACCESS_ONCE(ads->ctl19) = 0;
48933+ ACCESS_ONCE_RW(ads->ctl11) = 0;
48934+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
48935+ ACCESS_ONCE_RW(ads->ctl15) = 0;
48936+ ACCESS_ONCE_RW(ads->ctl16) = 0;
48937+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
48938+ ACCESS_ONCE_RW(ads->ctl18) = 0;
48939+ ACCESS_ONCE_RW(ads->ctl19) = 0;
48940 return;
48941 }
48942
48943- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
48944+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
48945 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48946 | SM(i->txpower[0], AR_XmitPower0)
48947 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48948@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48949 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
48950 ctl12 |= SM(val, AR_PAPRDChainMask);
48951
48952- ACCESS_ONCE(ads->ctl12) = ctl12;
48953- ACCESS_ONCE(ads->ctl17) = ctl17;
48954+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
48955+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
48956
48957- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
48958+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
48959 | set11nPktDurRTSCTS(i->rates, 1);
48960
48961- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
48962+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
48963 | set11nPktDurRTSCTS(i->rates, 3);
48964
48965- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
48966+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
48967 | set11nRateFlags(i->rates, 1)
48968 | set11nRateFlags(i->rates, 2)
48969 | set11nRateFlags(i->rates, 3)
48970 | SM(i->rtscts_rate, AR_RTSCTSRate);
48971
48972- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
48973+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
48974
48975- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
48976- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
48977- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
48978+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
48979+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
48980+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
48981 }
48982
48983 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
48984diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
48985index 1cbd335..27dfb40 100644
48986--- a/drivers/net/wireless/ath/ath9k/hw.h
48987+++ b/drivers/net/wireless/ath/ath9k/hw.h
48988@@ -640,7 +640,7 @@ struct ath_hw_private_ops {
48989
48990 /* ANI */
48991 void (*ani_cache_ini_regs)(struct ath_hw *ah);
48992-};
48993+} __no_const;
48994
48995 /**
48996 * struct ath_spec_scan - parameters for Atheros spectral scan
48997@@ -716,7 +716,7 @@ struct ath_hw_ops {
48998 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
48999 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
49000 #endif
49001-};
49002+} __no_const;
49003
49004 struct ath_nf_limits {
49005 s16 max;
49006diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
49007index 62b0bf4..4ae094c 100644
49008--- a/drivers/net/wireless/ath/ath9k/main.c
49009+++ b/drivers/net/wireless/ath/ath9k/main.c
49010@@ -2546,16 +2546,18 @@ void ath9k_fill_chanctx_ops(void)
49011 if (!ath9k_is_chanctx_enabled())
49012 return;
49013
49014- ath9k_ops.hw_scan = ath9k_hw_scan;
49015- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49016- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49017- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49018- ath9k_ops.add_chanctx = ath9k_add_chanctx;
49019- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49020- ath9k_ops.change_chanctx = ath9k_change_chanctx;
49021- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49022- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49023- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49024+ pax_open_kernel();
49025+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
49026+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49027+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49028+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49029+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
49030+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49031+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
49032+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49033+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49034+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49035+ pax_close_kernel();
49036 }
49037
49038 #endif
49039diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
49040index 058a9f2..d5cb1ba 100644
49041--- a/drivers/net/wireless/b43/phy_lp.c
49042+++ b/drivers/net/wireless/b43/phy_lp.c
49043@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
49044 {
49045 struct ssb_bus *bus = dev->dev->sdev->bus;
49046
49047- static const struct b206x_channel *chandata = NULL;
49048+ const struct b206x_channel *chandata = NULL;
49049 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
49050 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
49051 u16 old_comm15, scale;
49052diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
49053index dc1d20c..f7a4f06 100644
49054--- a/drivers/net/wireless/iwlegacy/3945-mac.c
49055+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
49056@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49057 */
49058 if (il3945_mod_params.disable_hw_scan) {
49059 D_INFO("Disabling hw_scan\n");
49060- il3945_mac_ops.hw_scan = NULL;
49061+ pax_open_kernel();
49062+ *(void **)&il3945_mac_ops.hw_scan = NULL;
49063+ pax_close_kernel();
49064 }
49065
49066 D_INFO("*** LOAD DRIVER ***\n");
49067diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49068index 0ffb6ff..c0b7f0e 100644
49069--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49070+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49071@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
49072 {
49073 struct iwl_priv *priv = file->private_data;
49074 char buf[64];
49075- int buf_size;
49076+ size_t buf_size;
49077 u32 offset, len;
49078
49079 memset(buf, 0, sizeof(buf));
49080@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
49081 struct iwl_priv *priv = file->private_data;
49082
49083 char buf[8];
49084- int buf_size;
49085+ size_t buf_size;
49086 u32 reset_flag;
49087
49088 memset(buf, 0, sizeof(buf));
49089@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49090 {
49091 struct iwl_priv *priv = file->private_data;
49092 char buf[8];
49093- int buf_size;
49094+ size_t buf_size;
49095 int ht40;
49096
49097 memset(buf, 0, sizeof(buf));
49098@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49099 {
49100 struct iwl_priv *priv = file->private_data;
49101 char buf[8];
49102- int buf_size;
49103+ size_t buf_size;
49104 int value;
49105
49106 memset(buf, 0, sizeof(buf));
49107@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49108 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49109 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49110
49111-static const char *fmt_value = " %-30s %10u\n";
49112-static const char *fmt_hex = " %-30s 0x%02X\n";
49113-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49114-static const char *fmt_header =
49115+static const char fmt_value[] = " %-30s %10u\n";
49116+static const char fmt_hex[] = " %-30s 0x%02X\n";
49117+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49118+static const char fmt_header[] =
49119 "%-32s current cumulative delta max\n";
49120
49121 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49122@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49123 {
49124 struct iwl_priv *priv = file->private_data;
49125 char buf[8];
49126- int buf_size;
49127+ size_t buf_size;
49128 int clear;
49129
49130 memset(buf, 0, sizeof(buf));
49131@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49132 {
49133 struct iwl_priv *priv = file->private_data;
49134 char buf[8];
49135- int buf_size;
49136+ size_t buf_size;
49137 int trace;
49138
49139 memset(buf, 0, sizeof(buf));
49140@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49141 {
49142 struct iwl_priv *priv = file->private_data;
49143 char buf[8];
49144- int buf_size;
49145+ size_t buf_size;
49146 int missed;
49147
49148 memset(buf, 0, sizeof(buf));
49149@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49150
49151 struct iwl_priv *priv = file->private_data;
49152 char buf[8];
49153- int buf_size;
49154+ size_t buf_size;
49155 int plcp;
49156
49157 memset(buf, 0, sizeof(buf));
49158@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49159
49160 struct iwl_priv *priv = file->private_data;
49161 char buf[8];
49162- int buf_size;
49163+ size_t buf_size;
49164 int flush;
49165
49166 memset(buf, 0, sizeof(buf));
49167@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49168
49169 struct iwl_priv *priv = file->private_data;
49170 char buf[8];
49171- int buf_size;
49172+ size_t buf_size;
49173 int rts;
49174
49175 if (!priv->cfg->ht_params)
49176@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49177 {
49178 struct iwl_priv *priv = file->private_data;
49179 char buf[8];
49180- int buf_size;
49181+ size_t buf_size;
49182
49183 memset(buf, 0, sizeof(buf));
49184 buf_size = min(count, sizeof(buf) - 1);
49185@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49186 struct iwl_priv *priv = file->private_data;
49187 u32 event_log_flag;
49188 char buf[8];
49189- int buf_size;
49190+ size_t buf_size;
49191
49192 /* check that the interface is up */
49193 if (!iwl_is_ready(priv))
49194@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49195 struct iwl_priv *priv = file->private_data;
49196 char buf[8];
49197 u32 calib_disabled;
49198- int buf_size;
49199+ size_t buf_size;
49200
49201 memset(buf, 0, sizeof(buf));
49202 buf_size = min(count, sizeof(buf) - 1);
49203diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49204index 523fe0c..0d9473b 100644
49205--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49206+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49207@@ -1781,7 +1781,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49208 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49209
49210 char buf[8];
49211- int buf_size;
49212+ size_t buf_size;
49213 u32 reset_flag;
49214
49215 memset(buf, 0, sizeof(buf));
49216@@ -1802,7 +1802,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49217 {
49218 struct iwl_trans *trans = file->private_data;
49219 char buf[8];
49220- int buf_size;
49221+ size_t buf_size;
49222 int csr;
49223
49224 memset(buf, 0, sizeof(buf));
49225diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49226index ef58a88..fafa731 100644
49227--- a/drivers/net/wireless/mac80211_hwsim.c
49228+++ b/drivers/net/wireless/mac80211_hwsim.c
49229@@ -3066,20 +3066,20 @@ static int __init init_mac80211_hwsim(void)
49230 if (channels < 1)
49231 return -EINVAL;
49232
49233- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49234- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49235- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49236- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49237- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49238- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49239- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49240- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49241- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49242- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49243- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49244- mac80211_hwsim_assign_vif_chanctx;
49245- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49246- mac80211_hwsim_unassign_vif_chanctx;
49247+ pax_open_kernel();
49248+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49249+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49250+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49251+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49252+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49253+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49254+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49255+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49256+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49257+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49258+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49259+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49260+ pax_close_kernel();
49261
49262 spin_lock_init(&hwsim_radio_lock);
49263 INIT_LIST_HEAD(&hwsim_radios);
49264diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49265index 1a4facd..a2ecbbd 100644
49266--- a/drivers/net/wireless/rndis_wlan.c
49267+++ b/drivers/net/wireless/rndis_wlan.c
49268@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49269
49270 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49271
49272- if (rts_threshold < 0 || rts_threshold > 2347)
49273+ if (rts_threshold > 2347)
49274 rts_threshold = 2347;
49275
49276 tmp = cpu_to_le32(rts_threshold);
49277diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49278index 9bb398b..b0cc047 100644
49279--- a/drivers/net/wireless/rt2x00/rt2x00.h
49280+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49281@@ -375,7 +375,7 @@ struct rt2x00_intf {
49282 * for hardware which doesn't support hardware
49283 * sequence counting.
49284 */
49285- atomic_t seqno;
49286+ atomic_unchecked_t seqno;
49287 };
49288
49289 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49290diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49291index 66ff364..3ce34f7 100644
49292--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49293+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49294@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49295 * sequence counter given by mac80211.
49296 */
49297 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49298- seqno = atomic_add_return(0x10, &intf->seqno);
49299+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49300 else
49301- seqno = atomic_read(&intf->seqno);
49302+ seqno = atomic_read_unchecked(&intf->seqno);
49303
49304 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49305 hdr->seq_ctrl |= cpu_to_le16(seqno);
49306diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49307index b661f896..ddf7d2b 100644
49308--- a/drivers/net/wireless/ti/wl1251/sdio.c
49309+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49310@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49311
49312 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49313
49314- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49315- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49316+ pax_open_kernel();
49317+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49318+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49319+ pax_close_kernel();
49320
49321 wl1251_info("using dedicated interrupt line");
49322 } else {
49323- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49324- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49325+ pax_open_kernel();
49326+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49327+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49328+ pax_close_kernel();
49329
49330 wl1251_info("using SDIO interrupt");
49331 }
49332diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49333index d6d0d6d..60c23a0 100644
49334--- a/drivers/net/wireless/ti/wl12xx/main.c
49335+++ b/drivers/net/wireless/ti/wl12xx/main.c
49336@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49337 sizeof(wl->conf.mem));
49338
49339 /* read data preparation is only needed by wl127x */
49340- wl->ops->prepare_read = wl127x_prepare_read;
49341+ pax_open_kernel();
49342+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49343+ pax_close_kernel();
49344
49345 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49346 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49347@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49348 sizeof(wl->conf.mem));
49349
49350 /* read data preparation is only needed by wl127x */
49351- wl->ops->prepare_read = wl127x_prepare_read;
49352+ pax_open_kernel();
49353+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49354+ pax_close_kernel();
49355
49356 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49357 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49358diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49359index 8e56261..9140678 100644
49360--- a/drivers/net/wireless/ti/wl18xx/main.c
49361+++ b/drivers/net/wireless/ti/wl18xx/main.c
49362@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49363 }
49364
49365 if (!checksum_param) {
49366- wl18xx_ops.set_rx_csum = NULL;
49367- wl18xx_ops.init_vif = NULL;
49368+ pax_open_kernel();
49369+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49370+ *(void **)&wl18xx_ops.init_vif = NULL;
49371+ pax_close_kernel();
49372 }
49373
49374 /* Enable 11a Band only if we have 5G antennas */
49375diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49376index a912dc0..a8225ba 100644
49377--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49378+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49379@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49380 {
49381 struct zd_usb *usb = urb->context;
49382 struct zd_usb_interrupt *intr = &usb->intr;
49383- int len;
49384+ unsigned int len;
49385 u16 int_num;
49386
49387 ZD_ASSERT(in_interrupt());
49388diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49389index ce2e2cf..f81e500 100644
49390--- a/drivers/nfc/nfcwilink.c
49391+++ b/drivers/nfc/nfcwilink.c
49392@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49393
49394 static int nfcwilink_probe(struct platform_device *pdev)
49395 {
49396- static struct nfcwilink *drv;
49397+ struct nfcwilink *drv;
49398 int rc;
49399 __u32 protocols;
49400
49401diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
49402index f2596c8..50d53af 100644
49403--- a/drivers/nfc/st21nfca/st21nfca.c
49404+++ b/drivers/nfc/st21nfca/st21nfca.c
49405@@ -559,7 +559,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
49406 goto exit;
49407 }
49408
49409- gate = uid_skb->data;
49410+ memcpy(gate, uid_skb->data, uid_skb->len);
49411 *len = uid_skb->len;
49412 exit:
49413 kfree_skb(uid_skb);
49414diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
49415index 5100742..6ad4e6d 100644
49416--- a/drivers/of/fdt.c
49417+++ b/drivers/of/fdt.c
49418@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
49419 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
49420 return 0;
49421 }
49422- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49423+ pax_open_kernel();
49424+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49425+ pax_close_kernel();
49426 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
49427 }
49428 late_initcall(of_fdt_raw_init);
49429diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49430index d93b2b6..ae50401 100644
49431--- a/drivers/oprofile/buffer_sync.c
49432+++ b/drivers/oprofile/buffer_sync.c
49433@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49434 if (cookie == NO_COOKIE)
49435 offset = pc;
49436 if (cookie == INVALID_COOKIE) {
49437- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49438+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49439 offset = pc;
49440 }
49441 if (cookie != last_cookie) {
49442@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49443 /* add userspace sample */
49444
49445 if (!mm) {
49446- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49447+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49448 return 0;
49449 }
49450
49451 cookie = lookup_dcookie(mm, s->eip, &offset);
49452
49453 if (cookie == INVALID_COOKIE) {
49454- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49455+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49456 return 0;
49457 }
49458
49459@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49460 /* ignore backtraces if failed to add a sample */
49461 if (state == sb_bt_start) {
49462 state = sb_bt_ignore;
49463- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49464+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49465 }
49466 }
49467 release_mm(mm);
49468diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49469index c0cc4e7..44d4e54 100644
49470--- a/drivers/oprofile/event_buffer.c
49471+++ b/drivers/oprofile/event_buffer.c
49472@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49473 }
49474
49475 if (buffer_pos == buffer_size) {
49476- atomic_inc(&oprofile_stats.event_lost_overflow);
49477+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49478 return;
49479 }
49480
49481diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49482index ed2c3ec..deda85a 100644
49483--- a/drivers/oprofile/oprof.c
49484+++ b/drivers/oprofile/oprof.c
49485@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49486 if (oprofile_ops.switch_events())
49487 return;
49488
49489- atomic_inc(&oprofile_stats.multiplex_counter);
49490+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49491 start_switch_worker();
49492 }
49493
49494diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49495index ee2cfce..7f8f699 100644
49496--- a/drivers/oprofile/oprofile_files.c
49497+++ b/drivers/oprofile/oprofile_files.c
49498@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49499
49500 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49501
49502-static ssize_t timeout_read(struct file *file, char __user *buf,
49503+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49504 size_t count, loff_t *offset)
49505 {
49506 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
49507diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
49508index 59659ce..6c860a0 100644
49509--- a/drivers/oprofile/oprofile_stats.c
49510+++ b/drivers/oprofile/oprofile_stats.c
49511@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
49512 cpu_buf->sample_invalid_eip = 0;
49513 }
49514
49515- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
49516- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
49517- atomic_set(&oprofile_stats.event_lost_overflow, 0);
49518- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
49519- atomic_set(&oprofile_stats.multiplex_counter, 0);
49520+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
49521+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
49522+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
49523+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
49524+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
49525 }
49526
49527
49528diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
49529index 1fc622b..8c48fc3 100644
49530--- a/drivers/oprofile/oprofile_stats.h
49531+++ b/drivers/oprofile/oprofile_stats.h
49532@@ -13,11 +13,11 @@
49533 #include <linux/atomic.h>
49534
49535 struct oprofile_stat_struct {
49536- atomic_t sample_lost_no_mm;
49537- atomic_t sample_lost_no_mapping;
49538- atomic_t bt_lost_no_mapping;
49539- atomic_t event_lost_overflow;
49540- atomic_t multiplex_counter;
49541+ atomic_unchecked_t sample_lost_no_mm;
49542+ atomic_unchecked_t sample_lost_no_mapping;
49543+ atomic_unchecked_t bt_lost_no_mapping;
49544+ atomic_unchecked_t event_lost_overflow;
49545+ atomic_unchecked_t multiplex_counter;
49546 };
49547
49548 extern struct oprofile_stat_struct oprofile_stats;
49549diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
49550index 3f49345..c750d0b 100644
49551--- a/drivers/oprofile/oprofilefs.c
49552+++ b/drivers/oprofile/oprofilefs.c
49553@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
49554
49555 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
49556 {
49557- atomic_t *val = file->private_data;
49558- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
49559+ atomic_unchecked_t *val = file->private_data;
49560+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
49561 }
49562
49563
49564@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
49565
49566
49567 int oprofilefs_create_ro_atomic(struct dentry *root,
49568- char const *name, atomic_t *val)
49569+ char const *name, atomic_unchecked_t *val)
49570 {
49571 return __oprofilefs_create_file(root, name,
49572 &atomic_ro_fops, 0444, val);
49573diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
49574index bdef916..88c7dee 100644
49575--- a/drivers/oprofile/timer_int.c
49576+++ b/drivers/oprofile/timer_int.c
49577@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
49578 return NOTIFY_OK;
49579 }
49580
49581-static struct notifier_block __refdata oprofile_cpu_notifier = {
49582+static struct notifier_block oprofile_cpu_notifier = {
49583 .notifier_call = oprofile_cpu_notify,
49584 };
49585
49586diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
49587index 3b47080..6cd05dd 100644
49588--- a/drivers/parport/procfs.c
49589+++ b/drivers/parport/procfs.c
49590@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
49591
49592 *ppos += len;
49593
49594- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
49595+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
49596 }
49597
49598 #ifdef CONFIG_PARPORT_1284
49599@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
49600
49601 *ppos += len;
49602
49603- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
49604+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
49605 }
49606 #endif /* IEEE1284.3 support. */
49607
49608diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
49609index 6ca2399..68d866b 100644
49610--- a/drivers/pci/hotplug/acpiphp_ibm.c
49611+++ b/drivers/pci/hotplug/acpiphp_ibm.c
49612@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
49613 goto init_cleanup;
49614 }
49615
49616- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49617+ pax_open_kernel();
49618+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49619+ pax_close_kernel();
49620 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
49621
49622 return retval;
49623diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
49624index 66b7bbe..26bee78 100644
49625--- a/drivers/pci/hotplug/cpcihp_generic.c
49626+++ b/drivers/pci/hotplug/cpcihp_generic.c
49627@@ -73,7 +73,6 @@ static u16 port;
49628 static unsigned int enum_bit;
49629 static u8 enum_mask;
49630
49631-static struct cpci_hp_controller_ops generic_hpc_ops;
49632 static struct cpci_hp_controller generic_hpc;
49633
49634 static int __init validate_parameters(void)
49635@@ -139,6 +138,10 @@ static int query_enum(void)
49636 return ((value & enum_mask) == enum_mask);
49637 }
49638
49639+static struct cpci_hp_controller_ops generic_hpc_ops = {
49640+ .query_enum = query_enum,
49641+};
49642+
49643 static int __init cpcihp_generic_init(void)
49644 {
49645 int status;
49646@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
49647 pci_dev_put(dev);
49648
49649 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
49650- generic_hpc_ops.query_enum = query_enum;
49651 generic_hpc.ops = &generic_hpc_ops;
49652
49653 status = cpci_hp_register_controller(&generic_hpc);
49654diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
49655index 7ecf34e..effed62 100644
49656--- a/drivers/pci/hotplug/cpcihp_zt5550.c
49657+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
49658@@ -59,7 +59,6 @@
49659 /* local variables */
49660 static bool debug;
49661 static bool poll;
49662-static struct cpci_hp_controller_ops zt5550_hpc_ops;
49663 static struct cpci_hp_controller zt5550_hpc;
49664
49665 /* Primary cPCI bus bridge device */
49666@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
49667 return 0;
49668 }
49669
49670+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
49671+ .query_enum = zt5550_hc_query_enum,
49672+};
49673+
49674 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
49675 {
49676 int status;
49677@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
49678 dbg("returned from zt5550_hc_config");
49679
49680 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
49681- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
49682 zt5550_hpc.ops = &zt5550_hpc_ops;
49683 if (!poll) {
49684 zt5550_hpc.irq = hc_dev->irq;
49685 zt5550_hpc.irq_flags = IRQF_SHARED;
49686 zt5550_hpc.dev_id = hc_dev;
49687
49688- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49689- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49690- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49691+ pax_open_kernel();
49692+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49693+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49694+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49695+ pax_open_kernel();
49696 } else {
49697 info("using ENUM# polling mode");
49698 }
49699diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
49700index 1e08ff8c..3cd145f 100644
49701--- a/drivers/pci/hotplug/cpqphp_nvram.c
49702+++ b/drivers/pci/hotplug/cpqphp_nvram.c
49703@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
49704
49705 void compaq_nvram_init (void __iomem *rom_start)
49706 {
49707+#ifndef CONFIG_PAX_KERNEXEC
49708 if (rom_start)
49709 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
49710+#endif
49711
49712 dbg("int15 entry = %p\n", compaq_int15_entry_point);
49713
49714diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
49715index 56d8486..f26113f 100644
49716--- a/drivers/pci/hotplug/pci_hotplug_core.c
49717+++ b/drivers/pci/hotplug/pci_hotplug_core.c
49718@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
49719 return -EINVAL;
49720 }
49721
49722- slot->ops->owner = owner;
49723- slot->ops->mod_name = mod_name;
49724+ pax_open_kernel();
49725+ *(struct module **)&slot->ops->owner = owner;
49726+ *(const char **)&slot->ops->mod_name = mod_name;
49727+ pax_close_kernel();
49728
49729 mutex_lock(&pci_hp_mutex);
49730 /*
49731diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
49732index 07aa722..84514b4 100644
49733--- a/drivers/pci/hotplug/pciehp_core.c
49734+++ b/drivers/pci/hotplug/pciehp_core.c
49735@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
49736 struct slot *slot = ctrl->slot;
49737 struct hotplug_slot *hotplug = NULL;
49738 struct hotplug_slot_info *info = NULL;
49739- struct hotplug_slot_ops *ops = NULL;
49740+ hotplug_slot_ops_no_const *ops = NULL;
49741 char name[SLOT_NAME_SIZE];
49742 int retval = -ENOMEM;
49743
49744diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
49745index fd60806..ab6c565 100644
49746--- a/drivers/pci/msi.c
49747+++ b/drivers/pci/msi.c
49748@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
49749 {
49750 struct attribute **msi_attrs;
49751 struct attribute *msi_attr;
49752- struct device_attribute *msi_dev_attr;
49753- struct attribute_group *msi_irq_group;
49754+ device_attribute_no_const *msi_dev_attr;
49755+ attribute_group_no_const *msi_irq_group;
49756 const struct attribute_group **msi_irq_groups;
49757 struct msi_desc *entry;
49758 int ret = -ENOMEM;
49759@@ -573,7 +573,7 @@ error_attrs:
49760 count = 0;
49761 msi_attr = msi_attrs[count];
49762 while (msi_attr) {
49763- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
49764+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
49765 kfree(msi_attr->name);
49766 kfree(msi_dev_attr);
49767 ++count;
49768diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
49769index aa012fb..63fac5d 100644
49770--- a/drivers/pci/pci-sysfs.c
49771+++ b/drivers/pci/pci-sysfs.c
49772@@ -1139,7 +1139,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
49773 {
49774 /* allocate attribute structure, piggyback attribute name */
49775 int name_len = write_combine ? 13 : 10;
49776- struct bin_attribute *res_attr;
49777+ bin_attribute_no_const *res_attr;
49778 int retval;
49779
49780 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
49781@@ -1316,7 +1316,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
49782 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
49783 {
49784 int retval;
49785- struct bin_attribute *attr;
49786+ bin_attribute_no_const *attr;
49787
49788 /* If the device has VPD, try to expose it in sysfs. */
49789 if (dev->vpd) {
49790@@ -1363,7 +1363,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
49791 {
49792 int retval;
49793 int rom_size = 0;
49794- struct bin_attribute *attr;
49795+ bin_attribute_no_const *attr;
49796
49797 if (!sysfs_initialized)
49798 return -EACCES;
49799diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
49800index d54632a..198c84d 100644
49801--- a/drivers/pci/pci.h
49802+++ b/drivers/pci/pci.h
49803@@ -93,7 +93,7 @@ struct pci_vpd_ops {
49804 struct pci_vpd {
49805 unsigned int len;
49806 const struct pci_vpd_ops *ops;
49807- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
49808+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
49809 };
49810
49811 int pci_vpd_pci22_init(struct pci_dev *dev);
49812diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
49813index e1e7026..d28dd33 100644
49814--- a/drivers/pci/pcie/aspm.c
49815+++ b/drivers/pci/pcie/aspm.c
49816@@ -27,9 +27,9 @@
49817 #define MODULE_PARAM_PREFIX "pcie_aspm."
49818
49819 /* Note: those are not register definitions */
49820-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
49821-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
49822-#define ASPM_STATE_L1 (4) /* L1 state */
49823+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
49824+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
49825+#define ASPM_STATE_L1 (4U) /* L1 state */
49826 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
49827 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
49828
49829diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
49830index 23212f8..65e945b 100644
49831--- a/drivers/pci/probe.c
49832+++ b/drivers/pci/probe.c
49833@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
49834 u16 orig_cmd;
49835 struct pci_bus_region region, inverted_region;
49836
49837- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
49838+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
49839
49840 /* No printks while decoding is disabled! */
49841 if (!dev->mmio_always_on) {
49842diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
49843index 3f155e7..0f4b1f0 100644
49844--- a/drivers/pci/proc.c
49845+++ b/drivers/pci/proc.c
49846@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
49847 static int __init pci_proc_init(void)
49848 {
49849 struct pci_dev *dev = NULL;
49850+
49851+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49852+#ifdef CONFIG_GRKERNSEC_PROC_USER
49853+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
49854+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49855+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49856+#endif
49857+#else
49858 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
49859+#endif
49860 proc_create("devices", 0, proc_bus_pci_dir,
49861 &proc_bus_pci_dev_operations);
49862 proc_initialized = 1;
49863diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
49864index b84fdd6..b89d829 100644
49865--- a/drivers/platform/chrome/chromeos_laptop.c
49866+++ b/drivers/platform/chrome/chromeos_laptop.c
49867@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
49868 .callback = chromeos_laptop_dmi_matched, \
49869 .driver_data = (void *)&board_
49870
49871-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
49872+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
49873 {
49874 .ident = "Samsung Series 5 550",
49875 .matches = {
49876diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
49877index 1e1e594..8fe59c5 100644
49878--- a/drivers/platform/x86/alienware-wmi.c
49879+++ b/drivers/platform/x86/alienware-wmi.c
49880@@ -150,7 +150,7 @@ struct wmax_led_args {
49881 } __packed;
49882
49883 static struct platform_device *platform_device;
49884-static struct device_attribute *zone_dev_attrs;
49885+static device_attribute_no_const *zone_dev_attrs;
49886 static struct attribute **zone_attrs;
49887 static struct platform_zone *zone_data;
49888
49889@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
49890 }
49891 };
49892
49893-static struct attribute_group zone_attribute_group = {
49894+static attribute_group_no_const zone_attribute_group = {
49895 .name = "rgb_zones",
49896 };
49897
49898diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
49899index 7543a56..367ca8ed 100644
49900--- a/drivers/platform/x86/asus-wmi.c
49901+++ b/drivers/platform/x86/asus-wmi.c
49902@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
49903 int err;
49904 u32 retval = -1;
49905
49906+#ifdef CONFIG_GRKERNSEC_KMEM
49907+ return -EPERM;
49908+#endif
49909+
49910 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
49911
49912 if (err < 0)
49913@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
49914 int err;
49915 u32 retval = -1;
49916
49917+#ifdef CONFIG_GRKERNSEC_KMEM
49918+ return -EPERM;
49919+#endif
49920+
49921 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
49922 &retval);
49923
49924@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
49925 union acpi_object *obj;
49926 acpi_status status;
49927
49928+#ifdef CONFIG_GRKERNSEC_KMEM
49929+ return -EPERM;
49930+#endif
49931+
49932 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
49933 1, asus->debug.method_id,
49934 &input, &output);
49935diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
49936index 0859877..1cf7d08 100644
49937--- a/drivers/platform/x86/msi-laptop.c
49938+++ b/drivers/platform/x86/msi-laptop.c
49939@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
49940
49941 if (!quirks->ec_read_only) {
49942 /* allow userland write sysfs file */
49943- dev_attr_bluetooth.store = store_bluetooth;
49944- dev_attr_wlan.store = store_wlan;
49945- dev_attr_threeg.store = store_threeg;
49946- dev_attr_bluetooth.attr.mode |= S_IWUSR;
49947- dev_attr_wlan.attr.mode |= S_IWUSR;
49948- dev_attr_threeg.attr.mode |= S_IWUSR;
49949+ pax_open_kernel();
49950+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
49951+ *(void **)&dev_attr_wlan.store = store_wlan;
49952+ *(void **)&dev_attr_threeg.store = store_threeg;
49953+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
49954+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
49955+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
49956+ pax_close_kernel();
49957 }
49958
49959 /* disable hardware control by fn key */
49960diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
49961index 6d2bac0..ec2b029 100644
49962--- a/drivers/platform/x86/msi-wmi.c
49963+++ b/drivers/platform/x86/msi-wmi.c
49964@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
49965 static void msi_wmi_notify(u32 value, void *context)
49966 {
49967 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
49968- static struct key_entry *key;
49969+ struct key_entry *key;
49970 union acpi_object *obj;
49971 acpi_status status;
49972
49973diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
49974index 6dd1c0e..5d602c7 100644
49975--- a/drivers/platform/x86/sony-laptop.c
49976+++ b/drivers/platform/x86/sony-laptop.c
49977@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
49978 }
49979
49980 /* High speed charging function */
49981-static struct device_attribute *hsc_handle;
49982+static device_attribute_no_const *hsc_handle;
49983
49984 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
49985 struct device_attribute *attr,
49986@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
49987 }
49988
49989 /* low battery function */
49990-static struct device_attribute *lowbatt_handle;
49991+static device_attribute_no_const *lowbatt_handle;
49992
49993 static ssize_t sony_nc_lowbatt_store(struct device *dev,
49994 struct device_attribute *attr,
49995@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
49996 }
49997
49998 /* fan speed function */
49999-static struct device_attribute *fan_handle, *hsf_handle;
50000+static device_attribute_no_const *fan_handle, *hsf_handle;
50001
50002 static ssize_t sony_nc_hsfan_store(struct device *dev,
50003 struct device_attribute *attr,
50004@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
50005 }
50006
50007 /* USB charge function */
50008-static struct device_attribute *uc_handle;
50009+static device_attribute_no_const *uc_handle;
50010
50011 static ssize_t sony_nc_usb_charge_store(struct device *dev,
50012 struct device_attribute *attr,
50013@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
50014 }
50015
50016 /* Panel ID function */
50017-static struct device_attribute *panel_handle;
50018+static device_attribute_no_const *panel_handle;
50019
50020 static ssize_t sony_nc_panelid_show(struct device *dev,
50021 struct device_attribute *attr, char *buffer)
50022@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
50023 }
50024
50025 /* smart connect function */
50026-static struct device_attribute *sc_handle;
50027+static device_attribute_no_const *sc_handle;
50028
50029 static ssize_t sony_nc_smart_conn_store(struct device *dev,
50030 struct device_attribute *attr,
50031diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50032index c3d11fa..f83cded 100644
50033--- a/drivers/platform/x86/thinkpad_acpi.c
50034+++ b/drivers/platform/x86/thinkpad_acpi.c
50035@@ -2092,7 +2092,7 @@ static int hotkey_mask_get(void)
50036 return 0;
50037 }
50038
50039-void static hotkey_mask_warn_incomplete_mask(void)
50040+static void hotkey_mask_warn_incomplete_mask(void)
50041 {
50042 /* log only what the user can fix... */
50043 const u32 wantedmask = hotkey_driver_mask &
50044@@ -2436,10 +2436,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
50045 && !tp_features.bright_unkfw)
50046 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
50047 }
50048+}
50049
50050 #undef TPACPI_COMPARE_KEY
50051 #undef TPACPI_MAY_SEND_KEY
50052-}
50053
50054 /*
50055 * Polling driver
50056diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
50057index 438d4c7..ca8a2fb 100644
50058--- a/drivers/pnp/pnpbios/bioscalls.c
50059+++ b/drivers/pnp/pnpbios/bioscalls.c
50060@@ -59,7 +59,7 @@ do { \
50061 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
50062 } while(0)
50063
50064-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
50065+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
50066 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
50067
50068 /*
50069@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50070
50071 cpu = get_cpu();
50072 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
50073+
50074+ pax_open_kernel();
50075 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
50076+ pax_close_kernel();
50077
50078 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
50079 spin_lock_irqsave(&pnp_bios_lock, flags);
50080@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50081 :"memory");
50082 spin_unlock_irqrestore(&pnp_bios_lock, flags);
50083
50084+ pax_open_kernel();
50085 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
50086+ pax_close_kernel();
50087+
50088 put_cpu();
50089
50090 /* If we get here and this is set then the PnP BIOS faulted on us. */
50091@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
50092 return status;
50093 }
50094
50095-void pnpbios_calls_init(union pnp_bios_install_struct *header)
50096+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
50097 {
50098 int i;
50099
50100@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50101 pnp_bios_callpoint.offset = header->fields.pm16offset;
50102 pnp_bios_callpoint.segment = PNP_CS16;
50103
50104+ pax_open_kernel();
50105+
50106 for_each_possible_cpu(i) {
50107 struct desc_struct *gdt = get_cpu_gdt_table(i);
50108 if (!gdt)
50109@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50110 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50111 (unsigned long)__va(header->fields.pm16dseg));
50112 }
50113+
50114+ pax_close_kernel();
50115 }
50116diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50117index 0c52e2a..3421ab7 100644
50118--- a/drivers/power/pda_power.c
50119+++ b/drivers/power/pda_power.c
50120@@ -37,7 +37,11 @@ static int polling;
50121
50122 #if IS_ENABLED(CONFIG_USB_PHY)
50123 static struct usb_phy *transceiver;
50124-static struct notifier_block otg_nb;
50125+static int otg_handle_notification(struct notifier_block *nb,
50126+ unsigned long event, void *unused);
50127+static struct notifier_block otg_nb = {
50128+ .notifier_call = otg_handle_notification
50129+};
50130 #endif
50131
50132 static struct regulator *ac_draw;
50133@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50134
50135 #if IS_ENABLED(CONFIG_USB_PHY)
50136 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50137- otg_nb.notifier_call = otg_handle_notification;
50138 ret = usb_register_notifier(transceiver, &otg_nb);
50139 if (ret) {
50140 dev_err(dev, "failure to register otg notifier\n");
50141diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50142index cc439fd..8fa30df 100644
50143--- a/drivers/power/power_supply.h
50144+++ b/drivers/power/power_supply.h
50145@@ -16,12 +16,12 @@ struct power_supply;
50146
50147 #ifdef CONFIG_SYSFS
50148
50149-extern void power_supply_init_attrs(struct device_type *dev_type);
50150+extern void power_supply_init_attrs(void);
50151 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50152
50153 #else
50154
50155-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50156+static inline void power_supply_init_attrs(void) {}
50157 #define power_supply_uevent NULL
50158
50159 #endif /* CONFIG_SYSFS */
50160diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50161index 694e8cd..9f03483 100644
50162--- a/drivers/power/power_supply_core.c
50163+++ b/drivers/power/power_supply_core.c
50164@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50165 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50166 EXPORT_SYMBOL_GPL(power_supply_notifier);
50167
50168-static struct device_type power_supply_dev_type;
50169+extern const struct attribute_group *power_supply_attr_groups[];
50170+static struct device_type power_supply_dev_type = {
50171+ .groups = power_supply_attr_groups,
50172+};
50173
50174 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50175 struct power_supply *supply)
50176@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
50177 return PTR_ERR(power_supply_class);
50178
50179 power_supply_class->dev_uevent = power_supply_uevent;
50180- power_supply_init_attrs(&power_supply_dev_type);
50181+ power_supply_init_attrs();
50182
50183 return 0;
50184 }
50185diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50186index 62653f5..d0bb485 100644
50187--- a/drivers/power/power_supply_sysfs.c
50188+++ b/drivers/power/power_supply_sysfs.c
50189@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
50190 .is_visible = power_supply_attr_is_visible,
50191 };
50192
50193-static const struct attribute_group *power_supply_attr_groups[] = {
50194+const struct attribute_group *power_supply_attr_groups[] = {
50195 &power_supply_attr_group,
50196 NULL,
50197 };
50198
50199-void power_supply_init_attrs(struct device_type *dev_type)
50200+void power_supply_init_attrs(void)
50201 {
50202 int i;
50203
50204- dev_type->groups = power_supply_attr_groups;
50205-
50206 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50207 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50208 }
50209diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50210index 84419af..268ede8 100644
50211--- a/drivers/powercap/powercap_sys.c
50212+++ b/drivers/powercap/powercap_sys.c
50213@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50214 struct device_attribute name_attr;
50215 };
50216
50217+static ssize_t show_constraint_name(struct device *dev,
50218+ struct device_attribute *dev_attr,
50219+ char *buf);
50220+
50221 static struct powercap_constraint_attr
50222- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50223+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50224+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50225+ .power_limit_attr = {
50226+ .attr = {
50227+ .name = NULL,
50228+ .mode = S_IWUSR | S_IRUGO
50229+ },
50230+ .show = show_constraint_power_limit_uw,
50231+ .store = store_constraint_power_limit_uw
50232+ },
50233+
50234+ .time_window_attr = {
50235+ .attr = {
50236+ .name = NULL,
50237+ .mode = S_IWUSR | S_IRUGO
50238+ },
50239+ .show = show_constraint_time_window_us,
50240+ .store = store_constraint_time_window_us
50241+ },
50242+
50243+ .max_power_attr = {
50244+ .attr = {
50245+ .name = NULL,
50246+ .mode = S_IRUGO
50247+ },
50248+ .show = show_constraint_max_power_uw,
50249+ .store = NULL
50250+ },
50251+
50252+ .min_power_attr = {
50253+ .attr = {
50254+ .name = NULL,
50255+ .mode = S_IRUGO
50256+ },
50257+ .show = show_constraint_min_power_uw,
50258+ .store = NULL
50259+ },
50260+
50261+ .max_time_window_attr = {
50262+ .attr = {
50263+ .name = NULL,
50264+ .mode = S_IRUGO
50265+ },
50266+ .show = show_constraint_max_time_window_us,
50267+ .store = NULL
50268+ },
50269+
50270+ .min_time_window_attr = {
50271+ .attr = {
50272+ .name = NULL,
50273+ .mode = S_IRUGO
50274+ },
50275+ .show = show_constraint_min_time_window_us,
50276+ .store = NULL
50277+ },
50278+
50279+ .name_attr = {
50280+ .attr = {
50281+ .name = NULL,
50282+ .mode = S_IRUGO
50283+ },
50284+ .show = show_constraint_name,
50285+ .store = NULL
50286+ }
50287+ }
50288+};
50289
50290 /* A list of powercap control_types */
50291 static LIST_HEAD(powercap_cntrl_list);
50292@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50293 }
50294
50295 static int create_constraint_attribute(int id, const char *name,
50296- int mode,
50297- struct device_attribute *dev_attr,
50298- ssize_t (*show)(struct device *,
50299- struct device_attribute *, char *),
50300- ssize_t (*store)(struct device *,
50301- struct device_attribute *,
50302- const char *, size_t)
50303- )
50304+ struct device_attribute *dev_attr)
50305 {
50306+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50307
50308- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50309- id, name);
50310- if (!dev_attr->attr.name)
50311+ if (!name)
50312 return -ENOMEM;
50313- dev_attr->attr.mode = mode;
50314- dev_attr->show = show;
50315- dev_attr->store = store;
50316+
50317+ pax_open_kernel();
50318+ *(const char **)&dev_attr->attr.name = name;
50319+ pax_close_kernel();
50320
50321 return 0;
50322 }
50323@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50324
50325 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50326 ret = create_constraint_attribute(i, "power_limit_uw",
50327- S_IWUSR | S_IRUGO,
50328- &constraint_attrs[i].power_limit_attr,
50329- show_constraint_power_limit_uw,
50330- store_constraint_power_limit_uw);
50331+ &constraint_attrs[i].power_limit_attr);
50332 if (ret)
50333 goto err_alloc;
50334 ret = create_constraint_attribute(i, "time_window_us",
50335- S_IWUSR | S_IRUGO,
50336- &constraint_attrs[i].time_window_attr,
50337- show_constraint_time_window_us,
50338- store_constraint_time_window_us);
50339+ &constraint_attrs[i].time_window_attr);
50340 if (ret)
50341 goto err_alloc;
50342- ret = create_constraint_attribute(i, "name", S_IRUGO,
50343- &constraint_attrs[i].name_attr,
50344- show_constraint_name,
50345- NULL);
50346+ ret = create_constraint_attribute(i, "name",
50347+ &constraint_attrs[i].name_attr);
50348 if (ret)
50349 goto err_alloc;
50350- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50351- &constraint_attrs[i].max_power_attr,
50352- show_constraint_max_power_uw,
50353- NULL);
50354+ ret = create_constraint_attribute(i, "max_power_uw",
50355+ &constraint_attrs[i].max_power_attr);
50356 if (ret)
50357 goto err_alloc;
50358- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50359- &constraint_attrs[i].min_power_attr,
50360- show_constraint_min_power_uw,
50361- NULL);
50362+ ret = create_constraint_attribute(i, "min_power_uw",
50363+ &constraint_attrs[i].min_power_attr);
50364 if (ret)
50365 goto err_alloc;
50366 ret = create_constraint_attribute(i, "max_time_window_us",
50367- S_IRUGO,
50368- &constraint_attrs[i].max_time_window_attr,
50369- show_constraint_max_time_window_us,
50370- NULL);
50371+ &constraint_attrs[i].max_time_window_attr);
50372 if (ret)
50373 goto err_alloc;
50374 ret = create_constraint_attribute(i, "min_time_window_us",
50375- S_IRUGO,
50376- &constraint_attrs[i].min_time_window_attr,
50377- show_constraint_min_time_window_us,
50378- NULL);
50379+ &constraint_attrs[i].min_time_window_attr);
50380 if (ret)
50381 goto err_alloc;
50382
50383@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50384 power_zone->zone_dev_attrs[count++] =
50385 &dev_attr_max_energy_range_uj.attr;
50386 if (power_zone->ops->get_energy_uj) {
50387+ pax_open_kernel();
50388 if (power_zone->ops->reset_energy_uj)
50389- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50390+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50391 else
50392- dev_attr_energy_uj.attr.mode = S_IRUGO;
50393+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50394+ pax_close_kernel();
50395 power_zone->zone_dev_attrs[count++] =
50396 &dev_attr_energy_uj.attr;
50397 }
50398diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50399index 9c5d414..c7900ce 100644
50400--- a/drivers/ptp/ptp_private.h
50401+++ b/drivers/ptp/ptp_private.h
50402@@ -51,7 +51,7 @@ struct ptp_clock {
50403 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50404 wait_queue_head_t tsev_wq;
50405 int defunct; /* tells readers to go away when clock is being removed */
50406- struct device_attribute *pin_dev_attr;
50407+ device_attribute_no_const *pin_dev_attr;
50408 struct attribute **pin_attr;
50409 struct attribute_group pin_attr_group;
50410 };
50411diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50412index 302e626..12579af 100644
50413--- a/drivers/ptp/ptp_sysfs.c
50414+++ b/drivers/ptp/ptp_sysfs.c
50415@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50416 goto no_pin_attr;
50417
50418 for (i = 0; i < n_pins; i++) {
50419- struct device_attribute *da = &ptp->pin_dev_attr[i];
50420+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50421 sysfs_attr_init(&da->attr);
50422 da->attr.name = info->pin_config[i].name;
50423 da->attr.mode = 0644;
50424diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50425index 9c48fb3..5b494fa 100644
50426--- a/drivers/regulator/core.c
50427+++ b/drivers/regulator/core.c
50428@@ -3587,7 +3587,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50429 {
50430 const struct regulation_constraints *constraints = NULL;
50431 const struct regulator_init_data *init_data;
50432- static atomic_t regulator_no = ATOMIC_INIT(0);
50433+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
50434 struct regulator_dev *rdev;
50435 struct device *dev;
50436 int ret, i;
50437@@ -3661,7 +3661,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50438 rdev->dev.class = &regulator_class;
50439 rdev->dev.parent = dev;
50440 dev_set_name(&rdev->dev, "regulator.%d",
50441- atomic_inc_return(&regulator_no) - 1);
50442+ atomic_inc_return_unchecked(&regulator_no) - 1);
50443 ret = device_register(&rdev->dev);
50444 if (ret != 0) {
50445 put_device(&rdev->dev);
50446diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50447index 7eee2ca..4024513 100644
50448--- a/drivers/regulator/max8660.c
50449+++ b/drivers/regulator/max8660.c
50450@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50451 max8660->shadow_regs[MAX8660_OVER1] = 5;
50452 } else {
50453 /* Otherwise devices can be toggled via software */
50454- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50455- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50456+ pax_open_kernel();
50457+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50458+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50459+ pax_close_kernel();
50460 }
50461
50462 /*
50463diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50464index c3d55c2..0dddfe6 100644
50465--- a/drivers/regulator/max8973-regulator.c
50466+++ b/drivers/regulator/max8973-regulator.c
50467@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50468 if (!pdata || !pdata->enable_ext_control) {
50469 max->desc.enable_reg = MAX8973_VOUT;
50470 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50471- max->ops.enable = regulator_enable_regmap;
50472- max->ops.disable = regulator_disable_regmap;
50473- max->ops.is_enabled = regulator_is_enabled_regmap;
50474+ pax_open_kernel();
50475+ *(void **)&max->ops.enable = regulator_enable_regmap;
50476+ *(void **)&max->ops.disable = regulator_disable_regmap;
50477+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50478+ pax_close_kernel();
50479 }
50480
50481 if (pdata) {
50482diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50483index 0d17c92..a29f627 100644
50484--- a/drivers/regulator/mc13892-regulator.c
50485+++ b/drivers/regulator/mc13892-regulator.c
50486@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50487 mc13xxx_unlock(mc13892);
50488
50489 /* update mc13892_vcam ops */
50490- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50491+ pax_open_kernel();
50492+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50493 sizeof(struct regulator_ops));
50494- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50495- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50496+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50497+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50498+ pax_close_kernel();
50499 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
50500
50501 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50502diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
50503index 5b2e761..c8c8a4a 100644
50504--- a/drivers/rtc/rtc-cmos.c
50505+++ b/drivers/rtc/rtc-cmos.c
50506@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
50507 hpet_rtc_timer_init();
50508
50509 /* export at least the first block of NVRAM */
50510- nvram.size = address_space - NVRAM_OFFSET;
50511+ pax_open_kernel();
50512+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
50513+ pax_close_kernel();
50514 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
50515 if (retval < 0) {
50516 dev_dbg(dev, "can't create nvram file? %d\n", retval);
50517diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
50518index d049393..bb20be0 100644
50519--- a/drivers/rtc/rtc-dev.c
50520+++ b/drivers/rtc/rtc-dev.c
50521@@ -16,6 +16,7 @@
50522 #include <linux/module.h>
50523 #include <linux/rtc.h>
50524 #include <linux/sched.h>
50525+#include <linux/grsecurity.h>
50526 #include "rtc-core.h"
50527
50528 static dev_t rtc_devt;
50529@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
50530 if (copy_from_user(&tm, uarg, sizeof(tm)))
50531 return -EFAULT;
50532
50533+ gr_log_timechange();
50534+
50535 return rtc_set_time(rtc, &tm);
50536
50537 case RTC_PIE_ON:
50538diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50539index 4ffabb3..1f87fca 100644
50540--- a/drivers/rtc/rtc-ds1307.c
50541+++ b/drivers/rtc/rtc-ds1307.c
50542@@ -107,7 +107,7 @@ struct ds1307 {
50543 u8 offset; /* register's offset */
50544 u8 regs[11];
50545 u16 nvram_offset;
50546- struct bin_attribute *nvram;
50547+ bin_attribute_no_const *nvram;
50548 enum ds_type type;
50549 unsigned long flags;
50550 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
50551diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
50552index 90abb5b..e0bf6dd 100644
50553--- a/drivers/rtc/rtc-m48t59.c
50554+++ b/drivers/rtc/rtc-m48t59.c
50555@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
50556 if (IS_ERR(m48t59->rtc))
50557 return PTR_ERR(m48t59->rtc);
50558
50559- m48t59_nvram_attr.size = pdata->offset;
50560+ pax_open_kernel();
50561+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
50562+ pax_close_kernel();
50563
50564 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
50565 if (ret)
50566diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
50567index e693af6..2e525b6 100644
50568--- a/drivers/scsi/bfa/bfa_fcpim.h
50569+++ b/drivers/scsi/bfa/bfa_fcpim.h
50570@@ -36,7 +36,7 @@ struct bfa_iotag_s {
50571
50572 struct bfa_itn_s {
50573 bfa_isr_func_t isr;
50574-};
50575+} __no_const;
50576
50577 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
50578 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
50579diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
50580index 0f19455..ef7adb5 100644
50581--- a/drivers/scsi/bfa/bfa_fcs.c
50582+++ b/drivers/scsi/bfa/bfa_fcs.c
50583@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
50584 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
50585
50586 static struct bfa_fcs_mod_s fcs_modules[] = {
50587- { bfa_fcs_port_attach, NULL, NULL },
50588- { bfa_fcs_uf_attach, NULL, NULL },
50589- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50590- bfa_fcs_fabric_modexit },
50591+ {
50592+ .attach = bfa_fcs_port_attach,
50593+ .modinit = NULL,
50594+ .modexit = NULL
50595+ },
50596+ {
50597+ .attach = bfa_fcs_uf_attach,
50598+ .modinit = NULL,
50599+ .modexit = NULL
50600+ },
50601+ {
50602+ .attach = bfa_fcs_fabric_attach,
50603+ .modinit = bfa_fcs_fabric_modinit,
50604+ .modexit = bfa_fcs_fabric_modexit
50605+ },
50606 };
50607
50608 /*
50609diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
50610index ff75ef8..2dfe00a 100644
50611--- a/drivers/scsi/bfa/bfa_fcs_lport.c
50612+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
50613@@ -89,15 +89,26 @@ static struct {
50614 void (*offline) (struct bfa_fcs_lport_s *port);
50615 } __port_action[] = {
50616 {
50617- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
50618- bfa_fcs_lport_unknown_offline}, {
50619- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
50620- bfa_fcs_lport_fab_offline}, {
50621- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
50622- bfa_fcs_lport_n2n_offline}, {
50623- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
50624- bfa_fcs_lport_loop_offline},
50625- };
50626+ .init = bfa_fcs_lport_unknown_init,
50627+ .online = bfa_fcs_lport_unknown_online,
50628+ .offline = bfa_fcs_lport_unknown_offline
50629+ },
50630+ {
50631+ .init = bfa_fcs_lport_fab_init,
50632+ .online = bfa_fcs_lport_fab_online,
50633+ .offline = bfa_fcs_lport_fab_offline
50634+ },
50635+ {
50636+ .init = bfa_fcs_lport_n2n_init,
50637+ .online = bfa_fcs_lport_n2n_online,
50638+ .offline = bfa_fcs_lport_n2n_offline
50639+ },
50640+ {
50641+ .init = bfa_fcs_lport_loop_init,
50642+ .online = bfa_fcs_lport_loop_online,
50643+ .offline = bfa_fcs_lport_loop_offline
50644+ },
50645+};
50646
50647 /*
50648 * fcs_port_sm FCS logical port state machine
50649diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
50650index a38aafa0..fe8f03b 100644
50651--- a/drivers/scsi/bfa/bfa_ioc.h
50652+++ b/drivers/scsi/bfa/bfa_ioc.h
50653@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
50654 bfa_ioc_disable_cbfn_t disable_cbfn;
50655 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
50656 bfa_ioc_reset_cbfn_t reset_cbfn;
50657-};
50658+} __no_const;
50659
50660 /*
50661 * IOC event notification mechanism.
50662@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
50663 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
50664 enum bfi_ioc_state fwstate);
50665 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
50666-};
50667+} __no_const;
50668
50669 /*
50670 * Queue element to wait for room in request queue. FIFO order is
50671diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
50672index a14c784..6de6790 100644
50673--- a/drivers/scsi/bfa/bfa_modules.h
50674+++ b/drivers/scsi/bfa/bfa_modules.h
50675@@ -78,12 +78,12 @@ enum {
50676 \
50677 extern struct bfa_module_s hal_mod_ ## __mod; \
50678 struct bfa_module_s hal_mod_ ## __mod = { \
50679- bfa_ ## __mod ## _meminfo, \
50680- bfa_ ## __mod ## _attach, \
50681- bfa_ ## __mod ## _detach, \
50682- bfa_ ## __mod ## _start, \
50683- bfa_ ## __mod ## _stop, \
50684- bfa_ ## __mod ## _iocdisable, \
50685+ .meminfo = bfa_ ## __mod ## _meminfo, \
50686+ .attach = bfa_ ## __mod ## _attach, \
50687+ .detach = bfa_ ## __mod ## _detach, \
50688+ .start = bfa_ ## __mod ## _start, \
50689+ .stop = bfa_ ## __mod ## _stop, \
50690+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
50691 }
50692
50693 #define BFA_CACHELINE_SZ (256)
50694diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
50695index 045c4e1..13de803 100644
50696--- a/drivers/scsi/fcoe/fcoe_sysfs.c
50697+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
50698@@ -33,8 +33,8 @@
50699 */
50700 #include "libfcoe.h"
50701
50702-static atomic_t ctlr_num;
50703-static atomic_t fcf_num;
50704+static atomic_unchecked_t ctlr_num;
50705+static atomic_unchecked_t fcf_num;
50706
50707 /*
50708 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
50709@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
50710 if (!ctlr)
50711 goto out;
50712
50713- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
50714+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
50715 ctlr->f = f;
50716 ctlr->mode = FIP_CONN_TYPE_FABRIC;
50717 INIT_LIST_HEAD(&ctlr->fcfs);
50718@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
50719 fcf->dev.parent = &ctlr->dev;
50720 fcf->dev.bus = &fcoe_bus_type;
50721 fcf->dev.type = &fcoe_fcf_device_type;
50722- fcf->id = atomic_inc_return(&fcf_num) - 1;
50723+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
50724 fcf->state = FCOE_FCF_STATE_UNKNOWN;
50725
50726 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
50727@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
50728 {
50729 int error;
50730
50731- atomic_set(&ctlr_num, 0);
50732- atomic_set(&fcf_num, 0);
50733+ atomic_set_unchecked(&ctlr_num, 0);
50734+ atomic_set_unchecked(&fcf_num, 0);
50735
50736 error = bus_register(&fcoe_bus_type);
50737 if (error)
50738diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
50739index 8bb173e..20236b4 100644
50740--- a/drivers/scsi/hosts.c
50741+++ b/drivers/scsi/hosts.c
50742@@ -42,7 +42,7 @@
50743 #include "scsi_logging.h"
50744
50745
50746-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50747+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50748
50749
50750 static void scsi_host_cls_release(struct device *dev)
50751@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
50752 * subtract one because we increment first then return, but we need to
50753 * know what the next host number was before increment
50754 */
50755- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
50756+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
50757 shost->dma_channel = 0xff;
50758
50759 /* These three are default values which can be overridden */
50760diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
50761index 6bb4611..0203251 100644
50762--- a/drivers/scsi/hpsa.c
50763+++ b/drivers/scsi/hpsa.c
50764@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
50765 struct reply_queue_buffer *rq = &h->reply_queue[q];
50766
50767 if (h->transMethod & CFGTBL_Trans_io_accel1)
50768- return h->access.command_completed(h, q);
50769+ return h->access->command_completed(h, q);
50770
50771 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
50772- return h->access.command_completed(h, q);
50773+ return h->access->command_completed(h, q);
50774
50775 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
50776 a = rq->head[rq->current_entry];
50777@@ -5360,7 +5360,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50778 while (!list_empty(&h->reqQ)) {
50779 c = list_entry(h->reqQ.next, struct CommandList, list);
50780 /* can't do anything if fifo is full */
50781- if ((h->access.fifo_full(h))) {
50782+ if ((h->access->fifo_full(h))) {
50783 h->fifo_recently_full = 1;
50784 dev_warn(&h->pdev->dev, "fifo full\n");
50785 break;
50786@@ -5376,7 +5376,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50787 atomic_inc(&h->commands_outstanding);
50788 spin_unlock_irqrestore(&h->lock, *flags);
50789 /* Tell the controller execute command */
50790- h->access.submit_command(h, c);
50791+ h->access->submit_command(h, c);
50792 spin_lock_irqsave(&h->lock, *flags);
50793 }
50794 }
50795@@ -5392,17 +5392,17 @@ static void lock_and_start_io(struct ctlr_info *h)
50796
50797 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
50798 {
50799- return h->access.command_completed(h, q);
50800+ return h->access->command_completed(h, q);
50801 }
50802
50803 static inline bool interrupt_pending(struct ctlr_info *h)
50804 {
50805- return h->access.intr_pending(h);
50806+ return h->access->intr_pending(h);
50807 }
50808
50809 static inline long interrupt_not_for_us(struct ctlr_info *h)
50810 {
50811- return (h->access.intr_pending(h) == 0) ||
50812+ return (h->access->intr_pending(h) == 0) ||
50813 (h->interrupts_enabled == 0);
50814 }
50815
50816@@ -6343,7 +6343,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
50817 if (prod_index < 0)
50818 return -ENODEV;
50819 h->product_name = products[prod_index].product_name;
50820- h->access = *(products[prod_index].access);
50821+ h->access = products[prod_index].access;
50822
50823 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
50824 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
50825@@ -6690,7 +6690,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
50826 unsigned long flags;
50827 u32 lockup_detected;
50828
50829- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50830+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50831 spin_lock_irqsave(&h->lock, flags);
50832 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
50833 if (!lockup_detected) {
50834@@ -6937,7 +6937,7 @@ reinit_after_soft_reset:
50835 }
50836
50837 /* make sure the board interrupts are off */
50838- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50839+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50840
50841 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
50842 goto clean2;
50843@@ -6972,7 +6972,7 @@ reinit_after_soft_reset:
50844 * fake ones to scoop up any residual completions.
50845 */
50846 spin_lock_irqsave(&h->lock, flags);
50847- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50848+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50849 spin_unlock_irqrestore(&h->lock, flags);
50850 free_irqs(h);
50851 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
50852@@ -6991,9 +6991,9 @@ reinit_after_soft_reset:
50853 dev_info(&h->pdev->dev, "Board READY.\n");
50854 dev_info(&h->pdev->dev,
50855 "Waiting for stale completions to drain.\n");
50856- h->access.set_intr_mask(h, HPSA_INTR_ON);
50857+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50858 msleep(10000);
50859- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50860+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50861
50862 rc = controller_reset_failed(h->cfgtable);
50863 if (rc)
50864@@ -7019,7 +7019,7 @@ reinit_after_soft_reset:
50865 h->drv_req_rescan = 0;
50866
50867 /* Turn the interrupts on so we can service requests */
50868- h->access.set_intr_mask(h, HPSA_INTR_ON);
50869+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50870
50871 hpsa_hba_inquiry(h);
50872 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
50873@@ -7084,7 +7084,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
50874 * To write all data in the battery backed cache to disks
50875 */
50876 hpsa_flush_cache(h);
50877- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50878+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50879 hpsa_free_irqs_and_disable_msix(h);
50880 }
50881
50882@@ -7202,7 +7202,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50883 CFGTBL_Trans_enable_directed_msix |
50884 (trans_support & (CFGTBL_Trans_io_accel1 |
50885 CFGTBL_Trans_io_accel2));
50886- struct access_method access = SA5_performant_access;
50887+ struct access_method *access = &SA5_performant_access;
50888
50889 /* This is a bit complicated. There are 8 registers on
50890 * the controller which we write to to tell it 8 different
50891@@ -7244,7 +7244,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50892 * perform the superfluous readl() after each command submission.
50893 */
50894 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
50895- access = SA5_performant_access_no_read;
50896+ access = &SA5_performant_access_no_read;
50897
50898 /* Controller spec: zero out this buffer. */
50899 for (i = 0; i < h->nreply_queues; i++)
50900@@ -7274,12 +7274,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50901 * enable outbound interrupt coalescing in accelerator mode;
50902 */
50903 if (trans_support & CFGTBL_Trans_io_accel1) {
50904- access = SA5_ioaccel_mode1_access;
50905+ access = &SA5_ioaccel_mode1_access;
50906 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50907 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50908 } else {
50909 if (trans_support & CFGTBL_Trans_io_accel2) {
50910- access = SA5_ioaccel_mode2_access;
50911+ access = &SA5_ioaccel_mode2_access;
50912 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50913 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50914 }
50915diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
50916index 8e06d9e..396e0a1 100644
50917--- a/drivers/scsi/hpsa.h
50918+++ b/drivers/scsi/hpsa.h
50919@@ -127,7 +127,7 @@ struct ctlr_info {
50920 unsigned int msix_vector;
50921 unsigned int msi_vector;
50922 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
50923- struct access_method access;
50924+ struct access_method *access;
50925 char hba_mode_enabled;
50926
50927 /* queue and queue Info */
50928@@ -523,43 +523,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
50929 }
50930
50931 static struct access_method SA5_access = {
50932- SA5_submit_command,
50933- SA5_intr_mask,
50934- SA5_fifo_full,
50935- SA5_intr_pending,
50936- SA5_completed,
50937+ .submit_command = SA5_submit_command,
50938+ .set_intr_mask = SA5_intr_mask,
50939+ .fifo_full = SA5_fifo_full,
50940+ .intr_pending = SA5_intr_pending,
50941+ .command_completed = SA5_completed,
50942 };
50943
50944 static struct access_method SA5_ioaccel_mode1_access = {
50945- SA5_submit_command,
50946- SA5_performant_intr_mask,
50947- SA5_fifo_full,
50948- SA5_ioaccel_mode1_intr_pending,
50949- SA5_ioaccel_mode1_completed,
50950+ .submit_command = SA5_submit_command,
50951+ .set_intr_mask = SA5_performant_intr_mask,
50952+ .fifo_full = SA5_fifo_full,
50953+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
50954+ .command_completed = SA5_ioaccel_mode1_completed,
50955 };
50956
50957 static struct access_method SA5_ioaccel_mode2_access = {
50958- SA5_submit_command_ioaccel2,
50959- SA5_performant_intr_mask,
50960- SA5_fifo_full,
50961- SA5_performant_intr_pending,
50962- SA5_performant_completed,
50963+ .submit_command = SA5_submit_command_ioaccel2,
50964+ .set_intr_mask = SA5_performant_intr_mask,
50965+ .fifo_full = SA5_fifo_full,
50966+ .intr_pending = SA5_performant_intr_pending,
50967+ .command_completed = SA5_performant_completed,
50968 };
50969
50970 static struct access_method SA5_performant_access = {
50971- SA5_submit_command,
50972- SA5_performant_intr_mask,
50973- SA5_fifo_full,
50974- SA5_performant_intr_pending,
50975- SA5_performant_completed,
50976+ .submit_command = SA5_submit_command,
50977+ .set_intr_mask = SA5_performant_intr_mask,
50978+ .fifo_full = SA5_fifo_full,
50979+ .intr_pending = SA5_performant_intr_pending,
50980+ .command_completed = SA5_performant_completed,
50981 };
50982
50983 static struct access_method SA5_performant_access_no_read = {
50984- SA5_submit_command_no_read,
50985- SA5_performant_intr_mask,
50986- SA5_fifo_full,
50987- SA5_performant_intr_pending,
50988- SA5_performant_completed,
50989+ .submit_command = SA5_submit_command_no_read,
50990+ .set_intr_mask = SA5_performant_intr_mask,
50991+ .fifo_full = SA5_fifo_full,
50992+ .intr_pending = SA5_performant_intr_pending,
50993+ .command_completed = SA5_performant_completed,
50994 };
50995
50996 struct board_type {
50997diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
50998index 1b3a094..068e683 100644
50999--- a/drivers/scsi/libfc/fc_exch.c
51000+++ b/drivers/scsi/libfc/fc_exch.c
51001@@ -101,12 +101,12 @@ struct fc_exch_mgr {
51002 u16 pool_max_index;
51003
51004 struct {
51005- atomic_t no_free_exch;
51006- atomic_t no_free_exch_xid;
51007- atomic_t xid_not_found;
51008- atomic_t xid_busy;
51009- atomic_t seq_not_found;
51010- atomic_t non_bls_resp;
51011+ atomic_unchecked_t no_free_exch;
51012+ atomic_unchecked_t no_free_exch_xid;
51013+ atomic_unchecked_t xid_not_found;
51014+ atomic_unchecked_t xid_busy;
51015+ atomic_unchecked_t seq_not_found;
51016+ atomic_unchecked_t non_bls_resp;
51017 } stats;
51018 };
51019
51020@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
51021 /* allocate memory for exchange */
51022 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
51023 if (!ep) {
51024- atomic_inc(&mp->stats.no_free_exch);
51025+ atomic_inc_unchecked(&mp->stats.no_free_exch);
51026 goto out;
51027 }
51028 memset(ep, 0, sizeof(*ep));
51029@@ -874,7 +874,7 @@ out:
51030 return ep;
51031 err:
51032 spin_unlock_bh(&pool->lock);
51033- atomic_inc(&mp->stats.no_free_exch_xid);
51034+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
51035 mempool_free(ep, mp->ep_pool);
51036 return NULL;
51037 }
51038@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51039 xid = ntohs(fh->fh_ox_id); /* we originated exch */
51040 ep = fc_exch_find(mp, xid);
51041 if (!ep) {
51042- atomic_inc(&mp->stats.xid_not_found);
51043+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51044 reject = FC_RJT_OX_ID;
51045 goto out;
51046 }
51047@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51048 ep = fc_exch_find(mp, xid);
51049 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
51050 if (ep) {
51051- atomic_inc(&mp->stats.xid_busy);
51052+ atomic_inc_unchecked(&mp->stats.xid_busy);
51053 reject = FC_RJT_RX_ID;
51054 goto rel;
51055 }
51056@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51057 }
51058 xid = ep->xid; /* get our XID */
51059 } else if (!ep) {
51060- atomic_inc(&mp->stats.xid_not_found);
51061+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51062 reject = FC_RJT_RX_ID; /* XID not found */
51063 goto out;
51064 }
51065@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51066 } else {
51067 sp = &ep->seq;
51068 if (sp->id != fh->fh_seq_id) {
51069- atomic_inc(&mp->stats.seq_not_found);
51070+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51071 if (f_ctl & FC_FC_END_SEQ) {
51072 /*
51073 * Update sequence_id based on incoming last
51074@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51075
51076 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
51077 if (!ep) {
51078- atomic_inc(&mp->stats.xid_not_found);
51079+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51080 goto out;
51081 }
51082 if (ep->esb_stat & ESB_ST_COMPLETE) {
51083- atomic_inc(&mp->stats.xid_not_found);
51084+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51085 goto rel;
51086 }
51087 if (ep->rxid == FC_XID_UNKNOWN)
51088 ep->rxid = ntohs(fh->fh_rx_id);
51089 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
51090- atomic_inc(&mp->stats.xid_not_found);
51091+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51092 goto rel;
51093 }
51094 if (ep->did != ntoh24(fh->fh_s_id) &&
51095 ep->did != FC_FID_FLOGI) {
51096- atomic_inc(&mp->stats.xid_not_found);
51097+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51098 goto rel;
51099 }
51100 sof = fr_sof(fp);
51101@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51102 sp->ssb_stat |= SSB_ST_RESP;
51103 sp->id = fh->fh_seq_id;
51104 } else if (sp->id != fh->fh_seq_id) {
51105- atomic_inc(&mp->stats.seq_not_found);
51106+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51107 goto rel;
51108 }
51109
51110@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51111 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51112
51113 if (!sp)
51114- atomic_inc(&mp->stats.xid_not_found);
51115+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51116 else
51117- atomic_inc(&mp->stats.non_bls_resp);
51118+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51119
51120 fc_frame_free(fp);
51121 }
51122@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51123
51124 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51125 mp = ema->mp;
51126- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51127+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51128 st->fc_no_free_exch_xid +=
51129- atomic_read(&mp->stats.no_free_exch_xid);
51130- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51131- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51132- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51133- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51134+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51135+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51136+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51137+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51138+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51139 }
51140 }
51141 EXPORT_SYMBOL(fc_exch_update_stats);
51142diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51143index 932d9cc..50c7ee9 100644
51144--- a/drivers/scsi/libsas/sas_ata.c
51145+++ b/drivers/scsi/libsas/sas_ata.c
51146@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
51147 .postreset = ata_std_postreset,
51148 .error_handler = ata_std_error_handler,
51149 .post_internal_cmd = sas_ata_post_internal,
51150- .qc_defer = ata_std_qc_defer,
51151+ .qc_defer = ata_std_qc_defer,
51152 .qc_prep = ata_noop_qc_prep,
51153 .qc_issue = sas_ata_qc_issue,
51154 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51155diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51156index 434e903..5a4a79b 100644
51157--- a/drivers/scsi/lpfc/lpfc.h
51158+++ b/drivers/scsi/lpfc/lpfc.h
51159@@ -430,7 +430,7 @@ struct lpfc_vport {
51160 struct dentry *debug_nodelist;
51161 struct dentry *vport_debugfs_root;
51162 struct lpfc_debugfs_trc *disc_trc;
51163- atomic_t disc_trc_cnt;
51164+ atomic_unchecked_t disc_trc_cnt;
51165 #endif
51166 uint8_t stat_data_enabled;
51167 uint8_t stat_data_blocked;
51168@@ -880,8 +880,8 @@ struct lpfc_hba {
51169 struct timer_list fabric_block_timer;
51170 unsigned long bit_flags;
51171 #define FABRIC_COMANDS_BLOCKED 0
51172- atomic_t num_rsrc_err;
51173- atomic_t num_cmd_success;
51174+ atomic_unchecked_t num_rsrc_err;
51175+ atomic_unchecked_t num_cmd_success;
51176 unsigned long last_rsrc_error_time;
51177 unsigned long last_ramp_down_time;
51178 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51179@@ -916,7 +916,7 @@ struct lpfc_hba {
51180
51181 struct dentry *debug_slow_ring_trc;
51182 struct lpfc_debugfs_trc *slow_ring_trc;
51183- atomic_t slow_ring_trc_cnt;
51184+ atomic_unchecked_t slow_ring_trc_cnt;
51185 /* iDiag debugfs sub-directory */
51186 struct dentry *idiag_root;
51187 struct dentry *idiag_pci_cfg;
51188diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51189index 5633e7d..8272114 100644
51190--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51191+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51192@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51193
51194 #include <linux/debugfs.h>
51195
51196-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51197+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51198 static unsigned long lpfc_debugfs_start_time = 0L;
51199
51200 /* iDiag */
51201@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51202 lpfc_debugfs_enable = 0;
51203
51204 len = 0;
51205- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51206+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51207 (lpfc_debugfs_max_disc_trc - 1);
51208 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51209 dtp = vport->disc_trc + i;
51210@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51211 lpfc_debugfs_enable = 0;
51212
51213 len = 0;
51214- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51215+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51216 (lpfc_debugfs_max_slow_ring_trc - 1);
51217 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51218 dtp = phba->slow_ring_trc + i;
51219@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51220 !vport || !vport->disc_trc)
51221 return;
51222
51223- index = atomic_inc_return(&vport->disc_trc_cnt) &
51224+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51225 (lpfc_debugfs_max_disc_trc - 1);
51226 dtp = vport->disc_trc + index;
51227 dtp->fmt = fmt;
51228 dtp->data1 = data1;
51229 dtp->data2 = data2;
51230 dtp->data3 = data3;
51231- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51232+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51233 dtp->jif = jiffies;
51234 #endif
51235 return;
51236@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51237 !phba || !phba->slow_ring_trc)
51238 return;
51239
51240- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51241+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51242 (lpfc_debugfs_max_slow_ring_trc - 1);
51243 dtp = phba->slow_ring_trc + index;
51244 dtp->fmt = fmt;
51245 dtp->data1 = data1;
51246 dtp->data2 = data2;
51247 dtp->data3 = data3;
51248- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51249+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51250 dtp->jif = jiffies;
51251 #endif
51252 return;
51253@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51254 "slow_ring buffer\n");
51255 goto debug_failed;
51256 }
51257- atomic_set(&phba->slow_ring_trc_cnt, 0);
51258+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51259 memset(phba->slow_ring_trc, 0,
51260 (sizeof(struct lpfc_debugfs_trc) *
51261 lpfc_debugfs_max_slow_ring_trc));
51262@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51263 "buffer\n");
51264 goto debug_failed;
51265 }
51266- atomic_set(&vport->disc_trc_cnt, 0);
51267+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51268
51269 snprintf(name, sizeof(name), "discovery_trace");
51270 vport->debug_disc_trc =
51271diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51272index 0b2c53a..aec2b45 100644
51273--- a/drivers/scsi/lpfc/lpfc_init.c
51274+++ b/drivers/scsi/lpfc/lpfc_init.c
51275@@ -11290,8 +11290,10 @@ lpfc_init(void)
51276 "misc_register returned with status %d", error);
51277
51278 if (lpfc_enable_npiv) {
51279- lpfc_transport_functions.vport_create = lpfc_vport_create;
51280- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51281+ pax_open_kernel();
51282+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51283+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51284+ pax_close_kernel();
51285 }
51286 lpfc_transport_template =
51287 fc_attach_transport(&lpfc_transport_functions);
51288diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51289index 4f9222e..f1850e3 100644
51290--- a/drivers/scsi/lpfc/lpfc_scsi.c
51291+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51292@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51293 unsigned long expires;
51294
51295 spin_lock_irqsave(&phba->hbalock, flags);
51296- atomic_inc(&phba->num_rsrc_err);
51297+ atomic_inc_unchecked(&phba->num_rsrc_err);
51298 phba->last_rsrc_error_time = jiffies;
51299
51300 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
51301@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51302 unsigned long num_rsrc_err, num_cmd_success;
51303 int i;
51304
51305- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51306- num_cmd_success = atomic_read(&phba->num_cmd_success);
51307+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51308+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51309
51310 /*
51311 * The error and success command counters are global per
51312@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51313 }
51314 }
51315 lpfc_destroy_vport_work_array(phba, vports);
51316- atomic_set(&phba->num_rsrc_err, 0);
51317- atomic_set(&phba->num_cmd_success, 0);
51318+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51319+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51320 }
51321
51322 /**
51323diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51324index 6a1c036..38e0e8d 100644
51325--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51326+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51327@@ -1508,7 +1508,7 @@ _scsih_get_resync(struct device *dev)
51328 {
51329 struct scsi_device *sdev = to_scsi_device(dev);
51330 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51331- static struct _raid_device *raid_device;
51332+ struct _raid_device *raid_device;
51333 unsigned long flags;
51334 Mpi2RaidVolPage0_t vol_pg0;
51335 Mpi2ConfigReply_t mpi_reply;
51336@@ -1560,7 +1560,7 @@ _scsih_get_state(struct device *dev)
51337 {
51338 struct scsi_device *sdev = to_scsi_device(dev);
51339 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51340- static struct _raid_device *raid_device;
51341+ struct _raid_device *raid_device;
51342 unsigned long flags;
51343 Mpi2RaidVolPage0_t vol_pg0;
51344 Mpi2ConfigReply_t mpi_reply;
51345@@ -6602,7 +6602,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51346 Mpi2EventDataIrOperationStatus_t *event_data =
51347 (Mpi2EventDataIrOperationStatus_t *)
51348 fw_event->event_data;
51349- static struct _raid_device *raid_device;
51350+ struct _raid_device *raid_device;
51351 unsigned long flags;
51352 u16 handle;
51353
51354@@ -7073,7 +7073,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51355 u64 sas_address;
51356 struct _sas_device *sas_device;
51357 struct _sas_node *expander_device;
51358- static struct _raid_device *raid_device;
51359+ struct _raid_device *raid_device;
51360 u8 retry_count;
51361 unsigned long flags;
51362
51363diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51364index 8c27b6a..607f56e 100644
51365--- a/drivers/scsi/pmcraid.c
51366+++ b/drivers/scsi/pmcraid.c
51367@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51368 res->scsi_dev = scsi_dev;
51369 scsi_dev->hostdata = res;
51370 res->change_detected = 0;
51371- atomic_set(&res->read_failures, 0);
51372- atomic_set(&res->write_failures, 0);
51373+ atomic_set_unchecked(&res->read_failures, 0);
51374+ atomic_set_unchecked(&res->write_failures, 0);
51375 rc = 0;
51376 }
51377 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51378@@ -2646,9 +2646,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51379
51380 /* If this was a SCSI read/write command keep count of errors */
51381 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51382- atomic_inc(&res->read_failures);
51383+ atomic_inc_unchecked(&res->read_failures);
51384 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51385- atomic_inc(&res->write_failures);
51386+ atomic_inc_unchecked(&res->write_failures);
51387
51388 if (!RES_IS_GSCSI(res->cfg_entry) &&
51389 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51390@@ -3474,7 +3474,7 @@ static int pmcraid_queuecommand_lck(
51391 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51392 * hrrq_id assigned here in queuecommand
51393 */
51394- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51395+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51396 pinstance->num_hrrq;
51397 cmd->cmd_done = pmcraid_io_done;
51398
51399@@ -3788,7 +3788,7 @@ static long pmcraid_ioctl_passthrough(
51400 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51401 * hrrq_id assigned here in queuecommand
51402 */
51403- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51404+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51405 pinstance->num_hrrq;
51406
51407 if (request_size) {
51408@@ -4426,7 +4426,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51409
51410 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51411 /* add resources only after host is added into system */
51412- if (!atomic_read(&pinstance->expose_resources))
51413+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51414 return;
51415
51416 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51417@@ -5243,8 +5243,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51418 init_waitqueue_head(&pinstance->reset_wait_q);
51419
51420 atomic_set(&pinstance->outstanding_cmds, 0);
51421- atomic_set(&pinstance->last_message_id, 0);
51422- atomic_set(&pinstance->expose_resources, 0);
51423+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51424+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51425
51426 INIT_LIST_HEAD(&pinstance->free_res_q);
51427 INIT_LIST_HEAD(&pinstance->used_res_q);
51428@@ -5957,7 +5957,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51429 /* Schedule worker thread to handle CCN and take care of adding and
51430 * removing devices to OS
51431 */
51432- atomic_set(&pinstance->expose_resources, 1);
51433+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51434 schedule_work(&pinstance->worker_q);
51435 return rc;
51436
51437diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51438index e1d150f..6c6df44 100644
51439--- a/drivers/scsi/pmcraid.h
51440+++ b/drivers/scsi/pmcraid.h
51441@@ -748,7 +748,7 @@ struct pmcraid_instance {
51442 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51443
51444 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51445- atomic_t last_message_id;
51446+ atomic_unchecked_t last_message_id;
51447
51448 /* configuration table */
51449 struct pmcraid_config_table *cfg_table;
51450@@ -777,7 +777,7 @@ struct pmcraid_instance {
51451 atomic_t outstanding_cmds;
51452
51453 /* should add/delete resources to mid-layer now ?*/
51454- atomic_t expose_resources;
51455+ atomic_unchecked_t expose_resources;
51456
51457
51458
51459@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51460 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51461 };
51462 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51463- atomic_t read_failures; /* count of failed READ commands */
51464- atomic_t write_failures; /* count of failed WRITE commands */
51465+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51466+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51467
51468 /* To indicate add/delete/modify during CCN */
51469 u8 change_detected;
51470diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51471index 82b92c4..3178171 100644
51472--- a/drivers/scsi/qla2xxx/qla_attr.c
51473+++ b/drivers/scsi/qla2xxx/qla_attr.c
51474@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51475 return 0;
51476 }
51477
51478-struct fc_function_template qla2xxx_transport_functions = {
51479+fc_function_template_no_const qla2xxx_transport_functions = {
51480
51481 .show_host_node_name = 1,
51482 .show_host_port_name = 1,
51483@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51484 .bsg_timeout = qla24xx_bsg_timeout,
51485 };
51486
51487-struct fc_function_template qla2xxx_transport_vport_functions = {
51488+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51489
51490 .show_host_node_name = 1,
51491 .show_host_port_name = 1,
51492diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51493index 7686bfe..4710893 100644
51494--- a/drivers/scsi/qla2xxx/qla_gbl.h
51495+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51496@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
51497 struct device_attribute;
51498 extern struct device_attribute *qla2x00_host_attrs[];
51499 struct fc_function_template;
51500-extern struct fc_function_template qla2xxx_transport_functions;
51501-extern struct fc_function_template qla2xxx_transport_vport_functions;
51502+extern fc_function_template_no_const qla2xxx_transport_functions;
51503+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
51504 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
51505 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
51506 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
51507diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
51508index cce1cbc..5b9f0fe 100644
51509--- a/drivers/scsi/qla2xxx/qla_os.c
51510+++ b/drivers/scsi/qla2xxx/qla_os.c
51511@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
51512 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
51513 /* Ok, a 64bit DMA mask is applicable. */
51514 ha->flags.enable_64bit_addressing = 1;
51515- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51516- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51517+ pax_open_kernel();
51518+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51519+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51520+ pax_close_kernel();
51521 return;
51522 }
51523 }
51524diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
51525index 8f6d0fb..1b21097 100644
51526--- a/drivers/scsi/qla4xxx/ql4_def.h
51527+++ b/drivers/scsi/qla4xxx/ql4_def.h
51528@@ -305,7 +305,7 @@ struct ddb_entry {
51529 * (4000 only) */
51530 atomic_t relogin_timer; /* Max Time to wait for
51531 * relogin to complete */
51532- atomic_t relogin_retry_count; /* Num of times relogin has been
51533+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
51534 * retried */
51535 uint32_t default_time2wait; /* Default Min time between
51536 * relogins (+aens) */
51537diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
51538index 6d25879..3031a9f 100644
51539--- a/drivers/scsi/qla4xxx/ql4_os.c
51540+++ b/drivers/scsi/qla4xxx/ql4_os.c
51541@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
51542 */
51543 if (!iscsi_is_session_online(cls_sess)) {
51544 /* Reset retry relogin timer */
51545- atomic_inc(&ddb_entry->relogin_retry_count);
51546+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
51547 DEBUG2(ql4_printk(KERN_INFO, ha,
51548 "%s: index[%d] relogin timed out-retrying"
51549 " relogin (%d), retry (%d)\n", __func__,
51550 ddb_entry->fw_ddb_index,
51551- atomic_read(&ddb_entry->relogin_retry_count),
51552+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
51553 ddb_entry->default_time2wait + 4));
51554 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
51555 atomic_set(&ddb_entry->retry_relogin_timer,
51556@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
51557
51558 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
51559 atomic_set(&ddb_entry->relogin_timer, 0);
51560- atomic_set(&ddb_entry->relogin_retry_count, 0);
51561+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
51562 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
51563 ddb_entry->default_relogin_timeout =
51564 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
51565diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
51566index 17bb541..85f4508 100644
51567--- a/drivers/scsi/scsi_lib.c
51568+++ b/drivers/scsi/scsi_lib.c
51569@@ -1595,7 +1595,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
51570 shost = sdev->host;
51571 scsi_init_cmd_errh(cmd);
51572 cmd->result = DID_NO_CONNECT << 16;
51573- atomic_inc(&cmd->device->iorequest_cnt);
51574+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51575
51576 /*
51577 * SCSI request completion path will do scsi_device_unbusy(),
51578@@ -1618,9 +1618,9 @@ static void scsi_softirq_done(struct request *rq)
51579
51580 INIT_LIST_HEAD(&cmd->eh_entry);
51581
51582- atomic_inc(&cmd->device->iodone_cnt);
51583+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
51584 if (cmd->result)
51585- atomic_inc(&cmd->device->ioerr_cnt);
51586+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
51587
51588 disposition = scsi_decide_disposition(cmd);
51589 if (disposition != SUCCESS &&
51590@@ -1661,7 +1661,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
51591 struct Scsi_Host *host = cmd->device->host;
51592 int rtn = 0;
51593
51594- atomic_inc(&cmd->device->iorequest_cnt);
51595+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51596
51597 /* check if the device is still usable */
51598 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
51599diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
51600index 1ac38e7..6acc656 100644
51601--- a/drivers/scsi/scsi_sysfs.c
51602+++ b/drivers/scsi/scsi_sysfs.c
51603@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
51604 char *buf) \
51605 { \
51606 struct scsi_device *sdev = to_scsi_device(dev); \
51607- unsigned long long count = atomic_read(&sdev->field); \
51608+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
51609 return snprintf(buf, 20, "0x%llx\n", count); \
51610 } \
51611 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
51612diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
51613index 5d6f348..18778a6b 100644
51614--- a/drivers/scsi/scsi_transport_fc.c
51615+++ b/drivers/scsi/scsi_transport_fc.c
51616@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
51617 * Netlink Infrastructure
51618 */
51619
51620-static atomic_t fc_event_seq;
51621+static atomic_unchecked_t fc_event_seq;
51622
51623 /**
51624 * fc_get_event_number - Obtain the next sequential FC event number
51625@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
51626 u32
51627 fc_get_event_number(void)
51628 {
51629- return atomic_add_return(1, &fc_event_seq);
51630+ return atomic_add_return_unchecked(1, &fc_event_seq);
51631 }
51632 EXPORT_SYMBOL(fc_get_event_number);
51633
51634@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
51635 {
51636 int error;
51637
51638- atomic_set(&fc_event_seq, 0);
51639+ atomic_set_unchecked(&fc_event_seq, 0);
51640
51641 error = transport_class_register(&fc_host_class);
51642 if (error)
51643@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
51644 char *cp;
51645
51646 *val = simple_strtoul(buf, &cp, 0);
51647- if ((*cp && (*cp != '\n')) || (*val < 0))
51648+ if (*cp && (*cp != '\n'))
51649 return -EINVAL;
51650 /*
51651 * Check for overflow; dev_loss_tmo is u32
51652diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
51653index 67d43e3..8cee73c 100644
51654--- a/drivers/scsi/scsi_transport_iscsi.c
51655+++ b/drivers/scsi/scsi_transport_iscsi.c
51656@@ -79,7 +79,7 @@ struct iscsi_internal {
51657 struct transport_container session_cont;
51658 };
51659
51660-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
51661+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
51662 static struct workqueue_struct *iscsi_eh_timer_workq;
51663
51664 static DEFINE_IDA(iscsi_sess_ida);
51665@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
51666 int err;
51667
51668 ihost = shost->shost_data;
51669- session->sid = atomic_add_return(1, &iscsi_session_nr);
51670+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
51671
51672 if (target_id == ISCSI_MAX_TARGET) {
51673 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
51674@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
51675 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
51676 ISCSI_TRANSPORT_VERSION);
51677
51678- atomic_set(&iscsi_session_nr, 0);
51679+ atomic_set_unchecked(&iscsi_session_nr, 0);
51680
51681 err = class_register(&iscsi_transport_class);
51682 if (err)
51683diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
51684index ae45bd9..c32a586 100644
51685--- a/drivers/scsi/scsi_transport_srp.c
51686+++ b/drivers/scsi/scsi_transport_srp.c
51687@@ -35,7 +35,7 @@
51688 #include "scsi_priv.h"
51689
51690 struct srp_host_attrs {
51691- atomic_t next_port_id;
51692+ atomic_unchecked_t next_port_id;
51693 };
51694 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
51695
51696@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
51697 struct Scsi_Host *shost = dev_to_shost(dev);
51698 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
51699
51700- atomic_set(&srp_host->next_port_id, 0);
51701+ atomic_set_unchecked(&srp_host->next_port_id, 0);
51702 return 0;
51703 }
51704
51705@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
51706 rport_fast_io_fail_timedout);
51707 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
51708
51709- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
51710+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
51711 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
51712
51713 transport_setup_device(&rport->dev);
51714diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
51715index 05ea0d4..5af8049 100644
51716--- a/drivers/scsi/sd.c
51717+++ b/drivers/scsi/sd.c
51718@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
51719 sdkp->disk = gd;
51720 sdkp->index = index;
51721 atomic_set(&sdkp->openers, 0);
51722- atomic_set(&sdkp->device->ioerr_cnt, 0);
51723+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
51724
51725 if (!sdp->request_queue->rq_timeout) {
51726 if (sdp->type != TYPE_MOD)
51727diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
51728index dbf8e77..0d565c7 100644
51729--- a/drivers/scsi/sg.c
51730+++ b/drivers/scsi/sg.c
51731@@ -1098,7 +1098,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
51732 sdp->disk->disk_name,
51733 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
51734 NULL,
51735- (char *)arg);
51736+ (char __user *)arg);
51737 case BLKTRACESTART:
51738 return blk_trace_startstop(sdp->device->request_queue, 1);
51739 case BLKTRACESTOP:
51740diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
51741index 011a336..fb2b7a0 100644
51742--- a/drivers/soc/tegra/fuse/fuse-tegra.c
51743+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
51744@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
51745 return i;
51746 }
51747
51748-static struct bin_attribute fuse_bin_attr = {
51749+static bin_attribute_no_const fuse_bin_attr = {
51750 .attr = { .name = "fuse", .mode = S_IRUGO, },
51751 .read = fuse_read,
51752 };
51753diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
51754index 66a70e9..f82cea4 100644
51755--- a/drivers/spi/spi.c
51756+++ b/drivers/spi/spi.c
51757@@ -2238,7 +2238,7 @@ int spi_bus_unlock(struct spi_master *master)
51758 EXPORT_SYMBOL_GPL(spi_bus_unlock);
51759
51760 /* portable code must never pass more than 32 bytes */
51761-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
51762+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
51763
51764 static u8 *buf;
51765
51766diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
51767index b41429f..2de5373 100644
51768--- a/drivers/staging/android/timed_output.c
51769+++ b/drivers/staging/android/timed_output.c
51770@@ -25,7 +25,7 @@
51771 #include "timed_output.h"
51772
51773 static struct class *timed_output_class;
51774-static atomic_t device_count;
51775+static atomic_unchecked_t device_count;
51776
51777 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
51778 char *buf)
51779@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
51780 timed_output_class = class_create(THIS_MODULE, "timed_output");
51781 if (IS_ERR(timed_output_class))
51782 return PTR_ERR(timed_output_class);
51783- atomic_set(&device_count, 0);
51784+ atomic_set_unchecked(&device_count, 0);
51785 timed_output_class->dev_groups = timed_output_groups;
51786 }
51787
51788@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
51789 if (ret < 0)
51790 return ret;
51791
51792- tdev->index = atomic_inc_return(&device_count);
51793+ tdev->index = atomic_inc_return_unchecked(&device_count);
51794 tdev->dev = device_create(timed_output_class, NULL,
51795 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
51796 if (IS_ERR(tdev->dev))
51797diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
51798index f143cb6..6fb8255 100644
51799--- a/drivers/staging/comedi/comedi_fops.c
51800+++ b/drivers/staging/comedi/comedi_fops.c
51801@@ -273,8 +273,8 @@ static void comedi_file_reset(struct file *file)
51802 }
51803 cfp->last_attached = dev->attached;
51804 cfp->last_detach_count = dev->detach_count;
51805- ACCESS_ONCE(cfp->read_subdev) = read_s;
51806- ACCESS_ONCE(cfp->write_subdev) = write_s;
51807+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
51808+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
51809 }
51810
51811 static void comedi_file_check(struct file *file)
51812@@ -1885,7 +1885,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51813 !(s_old->async->cmd.flags & CMDF_WRITE))
51814 return -EBUSY;
51815
51816- ACCESS_ONCE(cfp->read_subdev) = s_new;
51817+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
51818 return 0;
51819 }
51820
51821@@ -1927,7 +1927,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51822 (s_old->async->cmd.flags & CMDF_WRITE))
51823 return -EBUSY;
51824
51825- ACCESS_ONCE(cfp->write_subdev) = s_new;
51826+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
51827 return 0;
51828 }
51829
51830diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
51831index 001348c..cfaac8a 100644
51832--- a/drivers/staging/gdm724x/gdm_tty.c
51833+++ b/drivers/staging/gdm724x/gdm_tty.c
51834@@ -44,7 +44,7 @@
51835 #define gdm_tty_send_control(n, r, v, d, l) (\
51836 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
51837
51838-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
51839+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
51840
51841 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
51842 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
51843diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
51844index 503b2d7..c904931 100644
51845--- a/drivers/staging/line6/driver.c
51846+++ b/drivers/staging/line6/driver.c
51847@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51848 {
51849 struct usb_device *usbdev = line6->usbdev;
51850 int ret;
51851- unsigned char len;
51852+ unsigned char *plen;
51853
51854 /* query the serial number: */
51855 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51856@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51857 return ret;
51858 }
51859
51860+ plen = kmalloc(1, GFP_KERNEL);
51861+ if (plen == NULL)
51862+ return -ENOMEM;
51863+
51864 /* Wait for data length. We'll get 0xff until length arrives. */
51865 do {
51866 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51867 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51868 USB_DIR_IN,
51869- 0x0012, 0x0000, &len, 1,
51870+ 0x0012, 0x0000, plen, 1,
51871 LINE6_TIMEOUT * HZ);
51872 if (ret < 0) {
51873 dev_err(line6->ifcdev,
51874 "receive length failed (error %d)\n", ret);
51875+ kfree(plen);
51876 return ret;
51877 }
51878- } while (len == 0xff);
51879+ } while (*plen == 0xff);
51880
51881- if (len != datalen) {
51882+ if (*plen != datalen) {
51883 /* should be equal or something went wrong */
51884 dev_err(line6->ifcdev,
51885 "length mismatch (expected %d, got %d)\n",
51886- (int)datalen, (int)len);
51887+ (int)datalen, (int)*plen);
51888+ kfree(plen);
51889 return -EINVAL;
51890 }
51891+ kfree(plen);
51892
51893 /* receive the result: */
51894 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51895@@ -520,7 +527,7 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51896 {
51897 struct usb_device *usbdev = line6->usbdev;
51898 int ret;
51899- unsigned char status;
51900+ unsigned char *status;
51901
51902 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51903 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
51904@@ -533,26 +540,34 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51905 return ret;
51906 }
51907
51908+ status = kmalloc(1, GFP_KERNEL);
51909+ if (status == NULL)
51910+ return -ENOMEM;
51911+
51912 do {
51913 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
51914 0x67,
51915 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51916 USB_DIR_IN,
51917 0x0012, 0x0000,
51918- &status, 1, LINE6_TIMEOUT * HZ);
51919+ status, 1, LINE6_TIMEOUT * HZ);
51920
51921 if (ret < 0) {
51922 dev_err(line6->ifcdev,
51923 "receiving status failed (error %d)\n", ret);
51924+ kfree(status);
51925 return ret;
51926 }
51927- } while (status == 0xff);
51928+ } while (*status == 0xff);
51929
51930- if (status != 0) {
51931+ if (*status != 0) {
51932 dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
51933+ kfree(status);
51934 return -EINVAL;
51935 }
51936
51937+ kfree(status);
51938+
51939 return 0;
51940 }
51941
51942diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
51943index 6943715..0a93632 100644
51944--- a/drivers/staging/line6/toneport.c
51945+++ b/drivers/staging/line6/toneport.c
51946@@ -11,6 +11,7 @@
51947 */
51948
51949 #include <linux/wait.h>
51950+#include <linux/slab.h>
51951 #include <sound/control.h>
51952
51953 #include "audio.h"
51954@@ -307,14 +308,20 @@ static void toneport_destruct(struct usb_interface *interface)
51955 */
51956 static void toneport_setup(struct usb_line6_toneport *toneport)
51957 {
51958- int ticks;
51959+ int *ticks;
51960 struct usb_line6 *line6 = &toneport->line6;
51961 struct usb_device *usbdev = line6->usbdev;
51962 u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
51963
51964+ ticks = kmalloc(sizeof(int), GFP_KERNEL);
51965+ if (ticks == NULL)
51966+ return;
51967+
51968 /* sync time on device with host: */
51969- ticks = (int)get_seconds();
51970- line6_write_data(line6, 0x80c6, &ticks, 4);
51971+ *ticks = (int)get_seconds();
51972+ line6_write_data(line6, 0x80c6, ticks, sizeof(int));
51973+
51974+ kfree(ticks);
51975
51976 /* enable device: */
51977 toneport_send_cmd(usbdev, 0x0301, 0x0000);
51978diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
51979index 463da07..e791ce9 100644
51980--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
51981+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
51982@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
51983 return 0;
51984 }
51985
51986-sfw_test_client_ops_t brw_test_client;
51987-void brw_init_test_client(void)
51988-{
51989- brw_test_client.tso_init = brw_client_init;
51990- brw_test_client.tso_fini = brw_client_fini;
51991- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
51992- brw_test_client.tso_done_rpc = brw_client_done_rpc;
51993+sfw_test_client_ops_t brw_test_client = {
51994+ .tso_init = brw_client_init,
51995+ .tso_fini = brw_client_fini,
51996+ .tso_prep_rpc = brw_client_prep_rpc,
51997+ .tso_done_rpc = brw_client_done_rpc,
51998 };
51999
52000 srpc_service_t brw_test_service;
52001diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
52002index cc9d182..8fabce3 100644
52003--- a/drivers/staging/lustre/lnet/selftest/framework.c
52004+++ b/drivers/staging/lustre/lnet/selftest/framework.c
52005@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
52006
52007 extern sfw_test_client_ops_t ping_test_client;
52008 extern srpc_service_t ping_test_service;
52009-extern void ping_init_test_client(void);
52010 extern void ping_init_test_service(void);
52011
52012 extern sfw_test_client_ops_t brw_test_client;
52013 extern srpc_service_t brw_test_service;
52014-extern void brw_init_test_client(void);
52015 extern void brw_init_test_service(void);
52016
52017
52018@@ -1675,12 +1673,10 @@ sfw_startup (void)
52019 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
52020 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
52021
52022- brw_init_test_client();
52023 brw_init_test_service();
52024 rc = sfw_register_test(&brw_test_service, &brw_test_client);
52025 LASSERT (rc == 0);
52026
52027- ping_init_test_client();
52028 ping_init_test_service();
52029 rc = sfw_register_test(&ping_test_service, &ping_test_client);
52030 LASSERT (rc == 0);
52031diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
52032index d8c0df6..5041cbb 100644
52033--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
52034+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
52035@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
52036 return 0;
52037 }
52038
52039-sfw_test_client_ops_t ping_test_client;
52040-void ping_init_test_client(void)
52041-{
52042- ping_test_client.tso_init = ping_client_init;
52043- ping_test_client.tso_fini = ping_client_fini;
52044- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
52045- ping_test_client.tso_done_rpc = ping_client_done_rpc;
52046-}
52047+sfw_test_client_ops_t ping_test_client = {
52048+ .tso_init = ping_client_init,
52049+ .tso_fini = ping_client_fini,
52050+ .tso_prep_rpc = ping_client_prep_rpc,
52051+ .tso_done_rpc = ping_client_done_rpc,
52052+};
52053
52054 srpc_service_t ping_test_service;
52055 void ping_init_test_service(void)
52056diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52057index 83bc0a9..12ba00a 100644
52058--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
52059+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52060@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
52061 ldlm_completion_callback lcs_completion;
52062 ldlm_blocking_callback lcs_blocking;
52063 ldlm_glimpse_callback lcs_glimpse;
52064-};
52065+} __no_const;
52066
52067 /* ldlm_lockd.c */
52068 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
52069diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
52070index 2a88b80..62e7e5f 100644
52071--- a/drivers/staging/lustre/lustre/include/obd.h
52072+++ b/drivers/staging/lustre/lustre/include/obd.h
52073@@ -1362,7 +1362,7 @@ struct md_ops {
52074 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
52075 * wrapper function in include/linux/obd_class.h.
52076 */
52077-};
52078+} __no_const;
52079
52080 struct lsm_operations {
52081 void (*lsm_free)(struct lov_stripe_md *);
52082diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52083index a4c252f..b21acac 100644
52084--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52085+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52086@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
52087 int added = (mode == LCK_NL);
52088 int overlaps = 0;
52089 int splitted = 0;
52090- const struct ldlm_callback_suite null_cbs = { NULL };
52091+ const struct ldlm_callback_suite null_cbs = { };
52092
52093 CDEBUG(D_DLMTRACE,
52094 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
52095diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52096index 83d3f08..b03adad 100644
52097--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52098+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52099@@ -236,7 +236,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
52100 void __user *buffer, size_t *lenp, loff_t *ppos)
52101 {
52102 int rc, max_delay_cs;
52103- struct ctl_table dummy = *table;
52104+ ctl_table_no_const dummy = *table;
52105 long d;
52106
52107 dummy.data = &max_delay_cs;
52108@@ -268,7 +268,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
52109 void __user *buffer, size_t *lenp, loff_t *ppos)
52110 {
52111 int rc, min_delay_cs;
52112- struct ctl_table dummy = *table;
52113+ ctl_table_no_const dummy = *table;
52114 long d;
52115
52116 dummy.data = &min_delay_cs;
52117@@ -300,7 +300,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
52118 void __user *buffer, size_t *lenp, loff_t *ppos)
52119 {
52120 int rc, backoff;
52121- struct ctl_table dummy = *table;
52122+ ctl_table_no_const dummy = *table;
52123
52124 dummy.data = &backoff;
52125 dummy.proc_handler = &proc_dointvec;
52126diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52127index 2c4fc74..b04ca79 100644
52128--- a/drivers/staging/lustre/lustre/libcfs/module.c
52129+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52130@@ -315,11 +315,11 @@ out:
52131
52132
52133 struct cfs_psdev_ops libcfs_psdev_ops = {
52134- libcfs_psdev_open,
52135- libcfs_psdev_release,
52136- NULL,
52137- NULL,
52138- libcfs_ioctl
52139+ .p_open = libcfs_psdev_open,
52140+ .p_close = libcfs_psdev_release,
52141+ .p_read = NULL,
52142+ .p_write = NULL,
52143+ .p_ioctl = libcfs_ioctl
52144 };
52145
52146 extern int insert_proc(void);
52147diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52148index fcbe836..8a7ada4 100644
52149--- a/drivers/staging/octeon/ethernet-rx.c
52150+++ b/drivers/staging/octeon/ethernet-rx.c
52151@@ -352,14 +352,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52152 /* Increment RX stats for virtual ports */
52153 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52154 #ifdef CONFIG_64BIT
52155- atomic64_add(1,
52156+ atomic64_add_unchecked(1,
52157 (atomic64_t *)&priv->stats.rx_packets);
52158- atomic64_add(skb->len,
52159+ atomic64_add_unchecked(skb->len,
52160 (atomic64_t *)&priv->stats.rx_bytes);
52161 #else
52162- atomic_add(1,
52163+ atomic_add_unchecked(1,
52164 (atomic_t *)&priv->stats.rx_packets);
52165- atomic_add(skb->len,
52166+ atomic_add_unchecked(skb->len,
52167 (atomic_t *)&priv->stats.rx_bytes);
52168 #endif
52169 }
52170@@ -371,10 +371,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52171 dev->name);
52172 */
52173 #ifdef CONFIG_64BIT
52174- atomic64_add(1,
52175+ atomic64_add_unchecked(1,
52176 (atomic64_t *)&priv->stats.rx_dropped);
52177 #else
52178- atomic_add(1,
52179+ atomic_add_unchecked(1,
52180 (atomic_t *)&priv->stats.rx_dropped);
52181 #endif
52182 dev_kfree_skb_irq(skb);
52183diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52184index ee32149..052d1836 100644
52185--- a/drivers/staging/octeon/ethernet.c
52186+++ b/drivers/staging/octeon/ethernet.c
52187@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52188 * since the RX tasklet also increments it.
52189 */
52190 #ifdef CONFIG_64BIT
52191- atomic64_add(rx_status.dropped_packets,
52192- (atomic64_t *)&priv->stats.rx_dropped);
52193+ atomic64_add_unchecked(rx_status.dropped_packets,
52194+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52195 #else
52196- atomic_add(rx_status.dropped_packets,
52197- (atomic_t *)&priv->stats.rx_dropped);
52198+ atomic_add_unchecked(rx_status.dropped_packets,
52199+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52200 #endif
52201 }
52202
52203diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52204index 3b476d8..f522d68 100644
52205--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52206+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52207@@ -225,7 +225,7 @@ struct hal_ops {
52208
52209 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52210 void (*hal_reset_security_engine)(struct adapter *adapter);
52211-};
52212+} __no_const;
52213
52214 enum rt_eeprom_type {
52215 EEPROM_93C46,
52216diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52217index 070cc03..6806e37 100644
52218--- a/drivers/staging/rtl8712/rtl871x_io.h
52219+++ b/drivers/staging/rtl8712/rtl871x_io.h
52220@@ -108,7 +108,7 @@ struct _io_ops {
52221 u8 *pmem);
52222 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52223 u8 *pmem);
52224-};
52225+} __no_const;
52226
52227 struct io_req {
52228 struct list_head list;
52229diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52230index 46dad63..fe4acdc 100644
52231--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52232+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52233@@ -226,7 +226,7 @@ struct visorchipset_busdev_notifiers {
52234 void (*device_resume)(ulong bus_no, ulong dev_no);
52235 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
52236 ulong *max_size);
52237-};
52238+} __no_const;
52239
52240 /* These functions live inside visorchipset, and will be called to indicate
52241 * responses to specific events (by code outside of visorchipset).
52242@@ -241,7 +241,7 @@ struct visorchipset_busdev_responders {
52243 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
52244 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
52245 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
52246-};
52247+} __no_const;
52248
52249 /** Register functions (in the bus driver) to get called by visorchipset
52250 * whenever a bus or device appears for which this service partition is
52251diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52252index 9512af6..045bf5a 100644
52253--- a/drivers/target/sbp/sbp_target.c
52254+++ b/drivers/target/sbp/sbp_target.c
52255@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52256
52257 #define SESSION_MAINTENANCE_INTERVAL HZ
52258
52259-static atomic_t login_id = ATOMIC_INIT(0);
52260+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52261
52262 static void session_maintenance_work(struct work_struct *);
52263 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52264@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52265 login->lun = se_lun;
52266 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52267 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52268- login->login_id = atomic_inc_return(&login_id);
52269+ login->login_id = atomic_inc_return_unchecked(&login_id);
52270
52271 login->tgt_agt = sbp_target_agent_register(login);
52272 if (IS_ERR(login->tgt_agt)) {
52273diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52274index 58f49ff..2669604 100644
52275--- a/drivers/target/target_core_device.c
52276+++ b/drivers/target/target_core_device.c
52277@@ -1469,7 +1469,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52278 spin_lock_init(&dev->se_tmr_lock);
52279 spin_lock_init(&dev->qf_cmd_lock);
52280 sema_init(&dev->caw_sem, 1);
52281- atomic_set(&dev->dev_ordered_id, 0);
52282+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52283 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52284 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52285 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52286diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52287index 0adc0f6..7757bfe 100644
52288--- a/drivers/target/target_core_transport.c
52289+++ b/drivers/target/target_core_transport.c
52290@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52291 * Used to determine when ORDERED commands should go from
52292 * Dormant to Active status.
52293 */
52294- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52295+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52296 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52297 cmd->se_ordered_id, cmd->sam_task_attr,
52298 dev->transport->name);
52299diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
52300index 65a98a9..d93d3a8 100644
52301--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
52302+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
52303@@ -277,8 +277,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
52304 platform_set_drvdata(pdev, priv);
52305
52306 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
52307- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52308- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52309+ pax_open_kernel();
52310+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52311+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52312+ pax_close_kernel();
52313 }
52314 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
52315 priv, &int3400_thermal_ops,
52316diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52317index d717f3d..cae1cc3e 100644
52318--- a/drivers/thermal/of-thermal.c
52319+++ b/drivers/thermal/of-thermal.c
52320@@ -31,6 +31,7 @@
52321 #include <linux/export.h>
52322 #include <linux/string.h>
52323 #include <linux/thermal.h>
52324+#include <linux/mm.h>
52325
52326 #include "thermal_core.h"
52327
52328@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52329 tz->ops = ops;
52330 tz->sensor_data = data;
52331
52332- tzd->ops->get_temp = of_thermal_get_temp;
52333- tzd->ops->get_trend = of_thermal_get_trend;
52334- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52335+ pax_open_kernel();
52336+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52337+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52338+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52339+ pax_close_kernel();
52340 mutex_unlock(&tzd->lock);
52341
52342 return tzd;
52343@@ -541,9 +544,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52344 return;
52345
52346 mutex_lock(&tzd->lock);
52347- tzd->ops->get_temp = NULL;
52348- tzd->ops->get_trend = NULL;
52349- tzd->ops->set_emul_temp = NULL;
52350+ pax_open_kernel();
52351+ *(void **)&tzd->ops->get_temp = NULL;
52352+ *(void **)&tzd->ops->get_trend = NULL;
52353+ *(void **)&tzd->ops->set_emul_temp = NULL;
52354+ pax_close_kernel();
52355
52356 tz->ops = NULL;
52357 tz->sensor_data = NULL;
52358diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52359index fd66f57..48e6376 100644
52360--- a/drivers/tty/cyclades.c
52361+++ b/drivers/tty/cyclades.c
52362@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52363 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52364 info->port.count);
52365 #endif
52366- info->port.count++;
52367+ atomic_inc(&info->port.count);
52368 #ifdef CY_DEBUG_COUNT
52369 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52370- current->pid, info->port.count);
52371+ current->pid, atomic_read(&info->port.count));
52372 #endif
52373
52374 /*
52375@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52376 for (j = 0; j < cy_card[i].nports; j++) {
52377 info = &cy_card[i].ports[j];
52378
52379- if (info->port.count) {
52380+ if (atomic_read(&info->port.count)) {
52381 /* XXX is the ldisc num worth this? */
52382 struct tty_struct *tty;
52383 struct tty_ldisc *ld;
52384diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52385index 4fcec1d..5a036f7 100644
52386--- a/drivers/tty/hvc/hvc_console.c
52387+++ b/drivers/tty/hvc/hvc_console.c
52388@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52389
52390 spin_lock_irqsave(&hp->port.lock, flags);
52391 /* Check and then increment for fast path open. */
52392- if (hp->port.count++ > 0) {
52393+ if (atomic_inc_return(&hp->port.count) > 1) {
52394 spin_unlock_irqrestore(&hp->port.lock, flags);
52395 hvc_kick();
52396 return 0;
52397@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52398
52399 spin_lock_irqsave(&hp->port.lock, flags);
52400
52401- if (--hp->port.count == 0) {
52402+ if (atomic_dec_return(&hp->port.count) == 0) {
52403 spin_unlock_irqrestore(&hp->port.lock, flags);
52404 /* We are done with the tty pointer now. */
52405 tty_port_tty_set(&hp->port, NULL);
52406@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52407 */
52408 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52409 } else {
52410- if (hp->port.count < 0)
52411+ if (atomic_read(&hp->port.count) < 0)
52412 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52413- hp->vtermno, hp->port.count);
52414+ hp->vtermno, atomic_read(&hp->port.count));
52415 spin_unlock_irqrestore(&hp->port.lock, flags);
52416 }
52417 }
52418@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52419 * open->hangup case this can be called after the final close so prevent
52420 * that from happening for now.
52421 */
52422- if (hp->port.count <= 0) {
52423+ if (atomic_read(&hp->port.count) <= 0) {
52424 spin_unlock_irqrestore(&hp->port.lock, flags);
52425 return;
52426 }
52427
52428- hp->port.count = 0;
52429+ atomic_set(&hp->port.count, 0);
52430 spin_unlock_irqrestore(&hp->port.lock, flags);
52431 tty_port_tty_set(&hp->port, NULL);
52432
52433@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52434 return -EPIPE;
52435
52436 /* FIXME what's this (unprotected) check for? */
52437- if (hp->port.count <= 0)
52438+ if (atomic_read(&hp->port.count) <= 0)
52439 return -EIO;
52440
52441 spin_lock_irqsave(&hp->lock, flags);
52442diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52443index 81ff7e1..dfb7b71 100644
52444--- a/drivers/tty/hvc/hvcs.c
52445+++ b/drivers/tty/hvc/hvcs.c
52446@@ -83,6 +83,7 @@
52447 #include <asm/hvcserver.h>
52448 #include <asm/uaccess.h>
52449 #include <asm/vio.h>
52450+#include <asm/local.h>
52451
52452 /*
52453 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52454@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52455
52456 spin_lock_irqsave(&hvcsd->lock, flags);
52457
52458- if (hvcsd->port.count > 0) {
52459+ if (atomic_read(&hvcsd->port.count) > 0) {
52460 spin_unlock_irqrestore(&hvcsd->lock, flags);
52461 printk(KERN_INFO "HVCS: vterm state unchanged. "
52462 "The hvcs device node is still in use.\n");
52463@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52464 }
52465 }
52466
52467- hvcsd->port.count = 0;
52468+ atomic_set(&hvcsd->port.count, 0);
52469 hvcsd->port.tty = tty;
52470 tty->driver_data = hvcsd;
52471
52472@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52473 unsigned long flags;
52474
52475 spin_lock_irqsave(&hvcsd->lock, flags);
52476- hvcsd->port.count++;
52477+ atomic_inc(&hvcsd->port.count);
52478 hvcsd->todo_mask |= HVCS_SCHED_READ;
52479 spin_unlock_irqrestore(&hvcsd->lock, flags);
52480
52481@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52482 hvcsd = tty->driver_data;
52483
52484 spin_lock_irqsave(&hvcsd->lock, flags);
52485- if (--hvcsd->port.count == 0) {
52486+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52487
52488 vio_disable_interrupts(hvcsd->vdev);
52489
52490@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52491
52492 free_irq(irq, hvcsd);
52493 return;
52494- } else if (hvcsd->port.count < 0) {
52495+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52496 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52497 " is missmanaged.\n",
52498- hvcsd->vdev->unit_address, hvcsd->port.count);
52499+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52500 }
52501
52502 spin_unlock_irqrestore(&hvcsd->lock, flags);
52503@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52504
52505 spin_lock_irqsave(&hvcsd->lock, flags);
52506 /* Preserve this so that we know how many kref refs to put */
52507- temp_open_count = hvcsd->port.count;
52508+ temp_open_count = atomic_read(&hvcsd->port.count);
52509
52510 /*
52511 * Don't kref put inside the spinlock because the destruction
52512@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52513 tty->driver_data = NULL;
52514 hvcsd->port.tty = NULL;
52515
52516- hvcsd->port.count = 0;
52517+ atomic_set(&hvcsd->port.count, 0);
52518
52519 /* This will drop any buffered data on the floor which is OK in a hangup
52520 * scenario. */
52521@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52522 * the middle of a write operation? This is a crummy place to do this
52523 * but we want to keep it all in the spinlock.
52524 */
52525- if (hvcsd->port.count <= 0) {
52526+ if (atomic_read(&hvcsd->port.count) <= 0) {
52527 spin_unlock_irqrestore(&hvcsd->lock, flags);
52528 return -ENODEV;
52529 }
52530@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52531 {
52532 struct hvcs_struct *hvcsd = tty->driver_data;
52533
52534- if (!hvcsd || hvcsd->port.count <= 0)
52535+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52536 return 0;
52537
52538 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52539diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52540index 4190199..06d5bfa 100644
52541--- a/drivers/tty/hvc/hvsi.c
52542+++ b/drivers/tty/hvc/hvsi.c
52543@@ -85,7 +85,7 @@ struct hvsi_struct {
52544 int n_outbuf;
52545 uint32_t vtermno;
52546 uint32_t virq;
52547- atomic_t seqno; /* HVSI packet sequence number */
52548+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52549 uint16_t mctrl;
52550 uint8_t state; /* HVSI protocol state */
52551 uint8_t flags;
52552@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52553
52554 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52555 packet.hdr.len = sizeof(struct hvsi_query_response);
52556- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52557+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52558 packet.verb = VSV_SEND_VERSION_NUMBER;
52559 packet.u.version = HVSI_VERSION;
52560 packet.query_seqno = query_seqno+1;
52561@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52562
52563 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52564 packet.hdr.len = sizeof(struct hvsi_query);
52565- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52566+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52567 packet.verb = verb;
52568
52569 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52570@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52571 int wrote;
52572
52573 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52574- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52575+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52576 packet.hdr.len = sizeof(struct hvsi_control);
52577 packet.verb = VSV_SET_MODEM_CTL;
52578 packet.mask = HVSI_TSDTR;
52579@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52580 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52581
52582 packet.hdr.type = VS_DATA_PACKET_HEADER;
52583- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52584+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52585 packet.hdr.len = count + sizeof(struct hvsi_header);
52586 memcpy(&packet.data, buf, count);
52587
52588@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
52589 struct hvsi_control packet __ALIGNED__;
52590
52591 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
52592- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52593+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52594 packet.hdr.len = 6;
52595 packet.verb = VSV_CLOSE_PROTOCOL;
52596
52597@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
52598
52599 tty_port_tty_set(&hp->port, tty);
52600 spin_lock_irqsave(&hp->lock, flags);
52601- hp->port.count++;
52602+ atomic_inc(&hp->port.count);
52603 atomic_set(&hp->seqno, 0);
52604 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
52605 spin_unlock_irqrestore(&hp->lock, flags);
52606@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52607
52608 spin_lock_irqsave(&hp->lock, flags);
52609
52610- if (--hp->port.count == 0) {
52611+ if (atomic_dec_return(&hp->port.count) == 0) {
52612 tty_port_tty_set(&hp->port, NULL);
52613 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
52614
52615@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52616
52617 spin_lock_irqsave(&hp->lock, flags);
52618 }
52619- } else if (hp->port.count < 0)
52620+ } else if (atomic_read(&hp->port.count) < 0)
52621 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
52622- hp - hvsi_ports, hp->port.count);
52623+ hp - hvsi_ports, atomic_read(&hp->port.count));
52624
52625 spin_unlock_irqrestore(&hp->lock, flags);
52626 }
52627@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
52628 tty_port_tty_set(&hp->port, NULL);
52629
52630 spin_lock_irqsave(&hp->lock, flags);
52631- hp->port.count = 0;
52632+ atomic_set(&hp->port.count, 0);
52633 hp->n_outbuf = 0;
52634 spin_unlock_irqrestore(&hp->lock, flags);
52635 }
52636diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
52637index a270f04..7c77b5d 100644
52638--- a/drivers/tty/hvc/hvsi_lib.c
52639+++ b/drivers/tty/hvc/hvsi_lib.c
52640@@ -8,7 +8,7 @@
52641
52642 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
52643 {
52644- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
52645+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
52646
52647 /* Assumes that always succeeds, works in practice */
52648 return pv->put_chars(pv->termno, (char *)packet, packet->len);
52649@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
52650
52651 /* Reset state */
52652 pv->established = 0;
52653- atomic_set(&pv->seqno, 0);
52654+ atomic_set_unchecked(&pv->seqno, 0);
52655
52656 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
52657
52658diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
52659index 345cebb..d5a1e9e 100644
52660--- a/drivers/tty/ipwireless/tty.c
52661+++ b/drivers/tty/ipwireless/tty.c
52662@@ -28,6 +28,7 @@
52663 #include <linux/tty_driver.h>
52664 #include <linux/tty_flip.h>
52665 #include <linux/uaccess.h>
52666+#include <asm/local.h>
52667
52668 #include "tty.h"
52669 #include "network.h"
52670@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52671 return -ENODEV;
52672
52673 mutex_lock(&tty->ipw_tty_mutex);
52674- if (tty->port.count == 0)
52675+ if (atomic_read(&tty->port.count) == 0)
52676 tty->tx_bytes_queued = 0;
52677
52678- tty->port.count++;
52679+ atomic_inc(&tty->port.count);
52680
52681 tty->port.tty = linux_tty;
52682 linux_tty->driver_data = tty;
52683@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52684
52685 static void do_ipw_close(struct ipw_tty *tty)
52686 {
52687- tty->port.count--;
52688-
52689- if (tty->port.count == 0) {
52690+ if (atomic_dec_return(&tty->port.count) == 0) {
52691 struct tty_struct *linux_tty = tty->port.tty;
52692
52693 if (linux_tty != NULL) {
52694@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
52695 return;
52696
52697 mutex_lock(&tty->ipw_tty_mutex);
52698- if (tty->port.count == 0) {
52699+ if (atomic_read(&tty->port.count) == 0) {
52700 mutex_unlock(&tty->ipw_tty_mutex);
52701 return;
52702 }
52703@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
52704
52705 mutex_lock(&tty->ipw_tty_mutex);
52706
52707- if (!tty->port.count) {
52708+ if (!atomic_read(&tty->port.count)) {
52709 mutex_unlock(&tty->ipw_tty_mutex);
52710 return;
52711 }
52712@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
52713 return -ENODEV;
52714
52715 mutex_lock(&tty->ipw_tty_mutex);
52716- if (!tty->port.count) {
52717+ if (!atomic_read(&tty->port.count)) {
52718 mutex_unlock(&tty->ipw_tty_mutex);
52719 return -EINVAL;
52720 }
52721@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
52722 if (!tty)
52723 return -ENODEV;
52724
52725- if (!tty->port.count)
52726+ if (!atomic_read(&tty->port.count))
52727 return -EINVAL;
52728
52729 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
52730@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
52731 if (!tty)
52732 return 0;
52733
52734- if (!tty->port.count)
52735+ if (!atomic_read(&tty->port.count))
52736 return 0;
52737
52738 return tty->tx_bytes_queued;
52739@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
52740 if (!tty)
52741 return -ENODEV;
52742
52743- if (!tty->port.count)
52744+ if (!atomic_read(&tty->port.count))
52745 return -EINVAL;
52746
52747 return get_control_lines(tty);
52748@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
52749 if (!tty)
52750 return -ENODEV;
52751
52752- if (!tty->port.count)
52753+ if (!atomic_read(&tty->port.count))
52754 return -EINVAL;
52755
52756 return set_control_lines(tty, set, clear);
52757@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
52758 if (!tty)
52759 return -ENODEV;
52760
52761- if (!tty->port.count)
52762+ if (!atomic_read(&tty->port.count))
52763 return -EINVAL;
52764
52765 /* FIXME: Exactly how is the tty object locked here .. */
52766@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
52767 * are gone */
52768 mutex_lock(&ttyj->ipw_tty_mutex);
52769 }
52770- while (ttyj->port.count)
52771+ while (atomic_read(&ttyj->port.count))
52772 do_ipw_close(ttyj);
52773 ipwireless_disassociate_network_ttys(network,
52774 ttyj->channel_idx);
52775diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
52776index 14c54e0..1efd4f2 100644
52777--- a/drivers/tty/moxa.c
52778+++ b/drivers/tty/moxa.c
52779@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
52780 }
52781
52782 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
52783- ch->port.count++;
52784+ atomic_inc(&ch->port.count);
52785 tty->driver_data = ch;
52786 tty_port_tty_set(&ch->port, tty);
52787 mutex_lock(&ch->port.mutex);
52788diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
52789index c434376..114ce13 100644
52790--- a/drivers/tty/n_gsm.c
52791+++ b/drivers/tty/n_gsm.c
52792@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
52793 spin_lock_init(&dlci->lock);
52794 mutex_init(&dlci->mutex);
52795 dlci->fifo = &dlci->_fifo;
52796- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
52797+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
52798 kfree(dlci);
52799 return NULL;
52800 }
52801@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
52802 struct gsm_dlci *dlci = tty->driver_data;
52803 struct tty_port *port = &dlci->port;
52804
52805- port->count++;
52806+ atomic_inc(&port->count);
52807 tty_port_tty_set(port, tty);
52808
52809 dlci->modem_rx = 0;
52810diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
52811index 4ddfa60..1b7e112 100644
52812--- a/drivers/tty/n_tty.c
52813+++ b/drivers/tty/n_tty.c
52814@@ -115,7 +115,7 @@ struct n_tty_data {
52815 int minimum_to_wake;
52816
52817 /* consumer-published */
52818- size_t read_tail;
52819+ size_t read_tail __intentional_overflow(-1);
52820 size_t line_start;
52821
52822 /* protected by output lock */
52823@@ -2503,6 +2503,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
52824 {
52825 *ops = tty_ldisc_N_TTY;
52826 ops->owner = NULL;
52827- ops->refcount = ops->flags = 0;
52828+ atomic_set(&ops->refcount, 0);
52829+ ops->flags = 0;
52830 }
52831 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
52832diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
52833index 6e1f150..c3ba598 100644
52834--- a/drivers/tty/pty.c
52835+++ b/drivers/tty/pty.c
52836@@ -850,8 +850,10 @@ static void __init unix98_pty_init(void)
52837 panic("Couldn't register Unix98 pts driver");
52838
52839 /* Now create the /dev/ptmx special device */
52840+ pax_open_kernel();
52841 tty_default_fops(&ptmx_fops);
52842- ptmx_fops.open = ptmx_open;
52843+ *(void **)&ptmx_fops.open = ptmx_open;
52844+ pax_close_kernel();
52845
52846 cdev_init(&ptmx_cdev, &ptmx_fops);
52847 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
52848diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
52849index 383c4c7..d408e21 100644
52850--- a/drivers/tty/rocket.c
52851+++ b/drivers/tty/rocket.c
52852@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52853 tty->driver_data = info;
52854 tty_port_tty_set(port, tty);
52855
52856- if (port->count++ == 0) {
52857+ if (atomic_inc_return(&port->count) == 1) {
52858 atomic_inc(&rp_num_ports_open);
52859
52860 #ifdef ROCKET_DEBUG_OPEN
52861@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52862 #endif
52863 }
52864 #ifdef ROCKET_DEBUG_OPEN
52865- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
52866+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
52867 #endif
52868
52869 /*
52870@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
52871 spin_unlock_irqrestore(&info->port.lock, flags);
52872 return;
52873 }
52874- if (info->port.count)
52875+ if (atomic_read(&info->port.count))
52876 atomic_dec(&rp_num_ports_open);
52877 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
52878 spin_unlock_irqrestore(&info->port.lock, flags);
52879diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
52880index aa28209..e08fb85 100644
52881--- a/drivers/tty/serial/ioc4_serial.c
52882+++ b/drivers/tty/serial/ioc4_serial.c
52883@@ -437,7 +437,7 @@ struct ioc4_soft {
52884 } is_intr_info[MAX_IOC4_INTR_ENTS];
52885
52886 /* Number of entries active in the above array */
52887- atomic_t is_num_intrs;
52888+ atomic_unchecked_t is_num_intrs;
52889 } is_intr_type[IOC4_NUM_INTR_TYPES];
52890
52891 /* is_ir_lock must be held while
52892@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
52893 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
52894 || (type == IOC4_OTHER_INTR_TYPE)));
52895
52896- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
52897+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
52898 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
52899
52900 /* Save off the lower level interrupt handler */
52901@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
52902
52903 soft = arg;
52904 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
52905- num_intrs = (int)atomic_read(
52906+ num_intrs = (int)atomic_read_unchecked(
52907 &soft->is_intr_type[intr_type].is_num_intrs);
52908
52909 this_mir = this_ir = pending_intrs(soft, intr_type);
52910diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
52911index 129dc5b..1da5bb8 100644
52912--- a/drivers/tty/serial/kgdb_nmi.c
52913+++ b/drivers/tty/serial/kgdb_nmi.c
52914@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
52915 * I/O utilities that messages sent to the console will automatically
52916 * be displayed on the dbg_io.
52917 */
52918- dbg_io_ops->is_console = true;
52919+ pax_open_kernel();
52920+ *(int *)&dbg_io_ops->is_console = true;
52921+ pax_close_kernel();
52922
52923 return 0;
52924 }
52925diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
52926index a260cde..6b2b5ce 100644
52927--- a/drivers/tty/serial/kgdboc.c
52928+++ b/drivers/tty/serial/kgdboc.c
52929@@ -24,8 +24,9 @@
52930 #define MAX_CONFIG_LEN 40
52931
52932 static struct kgdb_io kgdboc_io_ops;
52933+static struct kgdb_io kgdboc_io_ops_console;
52934
52935-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
52936+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
52937 static int configured = -1;
52938
52939 static char config[MAX_CONFIG_LEN];
52940@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
52941 kgdboc_unregister_kbd();
52942 if (configured == 1)
52943 kgdb_unregister_io_module(&kgdboc_io_ops);
52944+ else if (configured == 2)
52945+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
52946 }
52947
52948 static int configure_kgdboc(void)
52949@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
52950 int err;
52951 char *cptr = config;
52952 struct console *cons;
52953+ int is_console = 0;
52954
52955 err = kgdboc_option_setup(config);
52956 if (err || !strlen(config) || isspace(config[0]))
52957 goto noconfig;
52958
52959 err = -ENODEV;
52960- kgdboc_io_ops.is_console = 0;
52961 kgdb_tty_driver = NULL;
52962
52963 kgdboc_use_kms = 0;
52964@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
52965 int idx;
52966 if (cons->device && cons->device(cons, &idx) == p &&
52967 idx == tty_line) {
52968- kgdboc_io_ops.is_console = 1;
52969+ is_console = 1;
52970 break;
52971 }
52972 cons = cons->next;
52973@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
52974 kgdb_tty_line = tty_line;
52975
52976 do_register:
52977- err = kgdb_register_io_module(&kgdboc_io_ops);
52978+ if (is_console) {
52979+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
52980+ configured = 2;
52981+ } else {
52982+ err = kgdb_register_io_module(&kgdboc_io_ops);
52983+ configured = 1;
52984+ }
52985 if (err)
52986 goto noconfig;
52987
52988@@ -205,8 +214,6 @@ do_register:
52989 if (err)
52990 goto nmi_con_failed;
52991
52992- configured = 1;
52993-
52994 return 0;
52995
52996 nmi_con_failed:
52997@@ -223,7 +230,7 @@ noconfig:
52998 static int __init init_kgdboc(void)
52999 {
53000 /* Already configured? */
53001- if (configured == 1)
53002+ if (configured >= 1)
53003 return 0;
53004
53005 return configure_kgdboc();
53006@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
53007 if (config[len - 1] == '\n')
53008 config[len - 1] = '\0';
53009
53010- if (configured == 1)
53011+ if (configured >= 1)
53012 cleanup_kgdboc();
53013
53014 /* Go and configure with the new params. */
53015@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
53016 .post_exception = kgdboc_post_exp_handler,
53017 };
53018
53019+static struct kgdb_io kgdboc_io_ops_console = {
53020+ .name = "kgdboc",
53021+ .read_char = kgdboc_get_char,
53022+ .write_char = kgdboc_put_char,
53023+ .pre_exception = kgdboc_pre_exp_handler,
53024+ .post_exception = kgdboc_post_exp_handler,
53025+ .is_console = 1
53026+};
53027+
53028 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
53029 /* This is only available if kgdboc is a built in for early debugging */
53030 static int __init kgdboc_early_init(char *opt)
53031diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
53032index c88b522..e763029 100644
53033--- a/drivers/tty/serial/msm_serial.c
53034+++ b/drivers/tty/serial/msm_serial.c
53035@@ -1028,7 +1028,7 @@ static struct uart_driver msm_uart_driver = {
53036 .cons = MSM_CONSOLE,
53037 };
53038
53039-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
53040+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
53041
53042 static const struct of_device_id msm_uartdm_table[] = {
53043 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
53044@@ -1052,7 +1052,7 @@ static int msm_serial_probe(struct platform_device *pdev)
53045 line = pdev->id;
53046
53047 if (line < 0)
53048- line = atomic_inc_return(&msm_uart_next_id) - 1;
53049+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
53050
53051 if (unlikely(line < 0 || line >= UART_NR))
53052 return -ENXIO;
53053diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
53054index 107e807..d4a02fa 100644
53055--- a/drivers/tty/serial/samsung.c
53056+++ b/drivers/tty/serial/samsung.c
53057@@ -480,11 +480,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
53058 }
53059 }
53060
53061+static int s3c64xx_serial_startup(struct uart_port *port);
53062 static int s3c24xx_serial_startup(struct uart_port *port)
53063 {
53064 struct s3c24xx_uart_port *ourport = to_ourport(port);
53065 int ret;
53066
53067+ /* Startup sequence is different for s3c64xx and higher SoC's */
53068+ if (s3c24xx_serial_has_interrupt_mask(port))
53069+ return s3c64xx_serial_startup(port);
53070+
53071 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
53072 port, (unsigned long long)port->mapbase, port->membase);
53073
53074@@ -1169,10 +1174,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
53075 /* setup info for port */
53076 port->dev = &platdev->dev;
53077
53078- /* Startup sequence is different for s3c64xx and higher SoC's */
53079- if (s3c24xx_serial_has_interrupt_mask(port))
53080- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
53081-
53082 port->uartclk = 1;
53083
53084 if (cfg->uart_flags & UPF_CONS_FLOW) {
53085diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53086index 984605b..e538330 100644
53087--- a/drivers/tty/serial/serial_core.c
53088+++ b/drivers/tty/serial/serial_core.c
53089@@ -1396,7 +1396,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53090 state = drv->state + tty->index;
53091 port = &state->port;
53092 spin_lock_irq(&port->lock);
53093- --port->count;
53094+ atomic_dec(&port->count);
53095 spin_unlock_irq(&port->lock);
53096 return;
53097 }
53098@@ -1406,7 +1406,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53099
53100 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
53101
53102- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53103+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53104 return;
53105
53106 /*
53107@@ -1530,7 +1530,7 @@ static void uart_hangup(struct tty_struct *tty)
53108 uart_flush_buffer(tty);
53109 uart_shutdown(tty, state);
53110 spin_lock_irqsave(&port->lock, flags);
53111- port->count = 0;
53112+ atomic_set(&port->count, 0);
53113 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53114 spin_unlock_irqrestore(&port->lock, flags);
53115 tty_port_tty_set(port, NULL);
53116@@ -1617,7 +1617,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53117 pr_debug("uart_open(%d) called\n", line);
53118
53119 spin_lock_irq(&port->lock);
53120- ++port->count;
53121+ atomic_inc(&port->count);
53122 spin_unlock_irq(&port->lock);
53123
53124 /*
53125diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53126index b799170..87dafd5 100644
53127--- a/drivers/tty/synclink.c
53128+++ b/drivers/tty/synclink.c
53129@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53130
53131 if (debug_level >= DEBUG_LEVEL_INFO)
53132 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53133- __FILE__,__LINE__, info->device_name, info->port.count);
53134+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53135
53136 if (tty_port_close_start(&info->port, tty, filp) == 0)
53137 goto cleanup;
53138@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53139 cleanup:
53140 if (debug_level >= DEBUG_LEVEL_INFO)
53141 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53142- tty->driver->name, info->port.count);
53143+ tty->driver->name, atomic_read(&info->port.count));
53144
53145 } /* end of mgsl_close() */
53146
53147@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53148
53149 mgsl_flush_buffer(tty);
53150 shutdown(info);
53151-
53152- info->port.count = 0;
53153+
53154+ atomic_set(&info->port.count, 0);
53155 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53156 info->port.tty = NULL;
53157
53158@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53159
53160 if (debug_level >= DEBUG_LEVEL_INFO)
53161 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53162- __FILE__,__LINE__, tty->driver->name, port->count );
53163+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53164
53165 spin_lock_irqsave(&info->irq_spinlock, flags);
53166- port->count--;
53167+ atomic_dec(&port->count);
53168 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53169 port->blocked_open++;
53170
53171@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53172
53173 if (debug_level >= DEBUG_LEVEL_INFO)
53174 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53175- __FILE__,__LINE__, tty->driver->name, port->count );
53176+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53177
53178 tty_unlock(tty);
53179 schedule();
53180@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53181
53182 /* FIXME: Racy on hangup during close wait */
53183 if (!tty_hung_up_p(filp))
53184- port->count++;
53185+ atomic_inc(&port->count);
53186 port->blocked_open--;
53187
53188 if (debug_level >= DEBUG_LEVEL_INFO)
53189 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53190- __FILE__,__LINE__, tty->driver->name, port->count );
53191+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53192
53193 if (!retval)
53194 port->flags |= ASYNC_NORMAL_ACTIVE;
53195@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53196
53197 if (debug_level >= DEBUG_LEVEL_INFO)
53198 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53199- __FILE__,__LINE__,tty->driver->name, info->port.count);
53200+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53201
53202 /* If port is closing, signal caller to try again */
53203 if (info->port.flags & ASYNC_CLOSING){
53204@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53205 spin_unlock_irqrestore(&info->netlock, flags);
53206 goto cleanup;
53207 }
53208- info->port.count++;
53209+ atomic_inc(&info->port.count);
53210 spin_unlock_irqrestore(&info->netlock, flags);
53211
53212- if (info->port.count == 1) {
53213+ if (atomic_read(&info->port.count) == 1) {
53214 /* 1st open on this device, init hardware */
53215 retval = startup(info);
53216 if (retval < 0)
53217@@ -3442,8 +3442,8 @@ cleanup:
53218 if (retval) {
53219 if (tty->count == 1)
53220 info->port.tty = NULL; /* tty layer will release tty struct */
53221- if(info->port.count)
53222- info->port.count--;
53223+ if (atomic_read(&info->port.count))
53224+ atomic_dec(&info->port.count);
53225 }
53226
53227 return retval;
53228@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53229 unsigned short new_crctype;
53230
53231 /* return error if TTY interface open */
53232- if (info->port.count)
53233+ if (atomic_read(&info->port.count))
53234 return -EBUSY;
53235
53236 switch (encoding)
53237@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53238
53239 /* arbitrate between network and tty opens */
53240 spin_lock_irqsave(&info->netlock, flags);
53241- if (info->port.count != 0 || info->netcount != 0) {
53242+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53243 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53244 spin_unlock_irqrestore(&info->netlock, flags);
53245 return -EBUSY;
53246@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53247 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53248
53249 /* return error if TTY interface open */
53250- if (info->port.count)
53251+ if (atomic_read(&info->port.count))
53252 return -EBUSY;
53253
53254 if (cmd != SIOCWANDEV)
53255diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53256index 0e8c39b..e0cb171 100644
53257--- a/drivers/tty/synclink_gt.c
53258+++ b/drivers/tty/synclink_gt.c
53259@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53260 tty->driver_data = info;
53261 info->port.tty = tty;
53262
53263- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53264+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53265
53266 /* If port is closing, signal caller to try again */
53267 if (info->port.flags & ASYNC_CLOSING){
53268@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53269 mutex_unlock(&info->port.mutex);
53270 goto cleanup;
53271 }
53272- info->port.count++;
53273+ atomic_inc(&info->port.count);
53274 spin_unlock_irqrestore(&info->netlock, flags);
53275
53276- if (info->port.count == 1) {
53277+ if (atomic_read(&info->port.count) == 1) {
53278 /* 1st open on this device, init hardware */
53279 retval = startup(info);
53280 if (retval < 0) {
53281@@ -715,8 +715,8 @@ cleanup:
53282 if (retval) {
53283 if (tty->count == 1)
53284 info->port.tty = NULL; /* tty layer will release tty struct */
53285- if(info->port.count)
53286- info->port.count--;
53287+ if(atomic_read(&info->port.count))
53288+ atomic_dec(&info->port.count);
53289 }
53290
53291 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53292@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53293
53294 if (sanity_check(info, tty->name, "close"))
53295 return;
53296- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53297+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53298
53299 if (tty_port_close_start(&info->port, tty, filp) == 0)
53300 goto cleanup;
53301@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53302 tty_port_close_end(&info->port, tty);
53303 info->port.tty = NULL;
53304 cleanup:
53305- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53306+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53307 }
53308
53309 static void hangup(struct tty_struct *tty)
53310@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53311 shutdown(info);
53312
53313 spin_lock_irqsave(&info->port.lock, flags);
53314- info->port.count = 0;
53315+ atomic_set(&info->port.count, 0);
53316 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53317 info->port.tty = NULL;
53318 spin_unlock_irqrestore(&info->port.lock, flags);
53319@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53320 unsigned short new_crctype;
53321
53322 /* return error if TTY interface open */
53323- if (info->port.count)
53324+ if (atomic_read(&info->port.count))
53325 return -EBUSY;
53326
53327 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53328@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53329
53330 /* arbitrate between network and tty opens */
53331 spin_lock_irqsave(&info->netlock, flags);
53332- if (info->port.count != 0 || info->netcount != 0) {
53333+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53334 DBGINFO(("%s hdlc_open busy\n", dev->name));
53335 spin_unlock_irqrestore(&info->netlock, flags);
53336 return -EBUSY;
53337@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53338 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53339
53340 /* return error if TTY interface open */
53341- if (info->port.count)
53342+ if (atomic_read(&info->port.count))
53343 return -EBUSY;
53344
53345 if (cmd != SIOCWANDEV)
53346@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53347 if (port == NULL)
53348 continue;
53349 spin_lock(&port->lock);
53350- if ((port->port.count || port->netcount) &&
53351+ if ((atomic_read(&port->port.count) || port->netcount) &&
53352 port->pending_bh && !port->bh_running &&
53353 !port->bh_requested) {
53354 DBGISR(("%s bh queued\n", port->device_name));
53355@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53356 add_wait_queue(&port->open_wait, &wait);
53357
53358 spin_lock_irqsave(&info->lock, flags);
53359- port->count--;
53360+ atomic_dec(&port->count);
53361 spin_unlock_irqrestore(&info->lock, flags);
53362 port->blocked_open++;
53363
53364@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53365 remove_wait_queue(&port->open_wait, &wait);
53366
53367 if (!tty_hung_up_p(filp))
53368- port->count++;
53369+ atomic_inc(&port->count);
53370 port->blocked_open--;
53371
53372 if (!retval)
53373diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53374index c3f9091..abe4601 100644
53375--- a/drivers/tty/synclinkmp.c
53376+++ b/drivers/tty/synclinkmp.c
53377@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53378
53379 if (debug_level >= DEBUG_LEVEL_INFO)
53380 printk("%s(%d):%s open(), old ref count = %d\n",
53381- __FILE__,__LINE__,tty->driver->name, info->port.count);
53382+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53383
53384 /* If port is closing, signal caller to try again */
53385 if (info->port.flags & ASYNC_CLOSING){
53386@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53387 spin_unlock_irqrestore(&info->netlock, flags);
53388 goto cleanup;
53389 }
53390- info->port.count++;
53391+ atomic_inc(&info->port.count);
53392 spin_unlock_irqrestore(&info->netlock, flags);
53393
53394- if (info->port.count == 1) {
53395+ if (atomic_read(&info->port.count) == 1) {
53396 /* 1st open on this device, init hardware */
53397 retval = startup(info);
53398 if (retval < 0)
53399@@ -796,8 +796,8 @@ cleanup:
53400 if (retval) {
53401 if (tty->count == 1)
53402 info->port.tty = NULL; /* tty layer will release tty struct */
53403- if(info->port.count)
53404- info->port.count--;
53405+ if(atomic_read(&info->port.count))
53406+ atomic_dec(&info->port.count);
53407 }
53408
53409 return retval;
53410@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53411
53412 if (debug_level >= DEBUG_LEVEL_INFO)
53413 printk("%s(%d):%s close() entry, count=%d\n",
53414- __FILE__,__LINE__, info->device_name, info->port.count);
53415+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53416
53417 if (tty_port_close_start(&info->port, tty, filp) == 0)
53418 goto cleanup;
53419@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53420 cleanup:
53421 if (debug_level >= DEBUG_LEVEL_INFO)
53422 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53423- tty->driver->name, info->port.count);
53424+ tty->driver->name, atomic_read(&info->port.count));
53425 }
53426
53427 /* Called by tty_hangup() when a hangup is signaled.
53428@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53429 shutdown(info);
53430
53431 spin_lock_irqsave(&info->port.lock, flags);
53432- info->port.count = 0;
53433+ atomic_set(&info->port.count, 0);
53434 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53435 info->port.tty = NULL;
53436 spin_unlock_irqrestore(&info->port.lock, flags);
53437@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53438 unsigned short new_crctype;
53439
53440 /* return error if TTY interface open */
53441- if (info->port.count)
53442+ if (atomic_read(&info->port.count))
53443 return -EBUSY;
53444
53445 switch (encoding)
53446@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53447
53448 /* arbitrate between network and tty opens */
53449 spin_lock_irqsave(&info->netlock, flags);
53450- if (info->port.count != 0 || info->netcount != 0) {
53451+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53452 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53453 spin_unlock_irqrestore(&info->netlock, flags);
53454 return -EBUSY;
53455@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53456 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53457
53458 /* return error if TTY interface open */
53459- if (info->port.count)
53460+ if (atomic_read(&info->port.count))
53461 return -EBUSY;
53462
53463 if (cmd != SIOCWANDEV)
53464@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53465 * do not request bottom half processing if the
53466 * device is not open in a normal mode.
53467 */
53468- if ( port && (port->port.count || port->netcount) &&
53469+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53470 port->pending_bh && !port->bh_running &&
53471 !port->bh_requested ) {
53472 if ( debug_level >= DEBUG_LEVEL_ISR )
53473@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53474
53475 if (debug_level >= DEBUG_LEVEL_INFO)
53476 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53477- __FILE__,__LINE__, tty->driver->name, port->count );
53478+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53479
53480 spin_lock_irqsave(&info->lock, flags);
53481- port->count--;
53482+ atomic_dec(&port->count);
53483 spin_unlock_irqrestore(&info->lock, flags);
53484 port->blocked_open++;
53485
53486@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53487
53488 if (debug_level >= DEBUG_LEVEL_INFO)
53489 printk("%s(%d):%s block_til_ready() count=%d\n",
53490- __FILE__,__LINE__, tty->driver->name, port->count );
53491+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53492
53493 tty_unlock(tty);
53494 schedule();
53495@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53496 set_current_state(TASK_RUNNING);
53497 remove_wait_queue(&port->open_wait, &wait);
53498 if (!tty_hung_up_p(filp))
53499- port->count++;
53500+ atomic_inc(&port->count);
53501 port->blocked_open--;
53502
53503 if (debug_level >= DEBUG_LEVEL_INFO)
53504 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53505- __FILE__,__LINE__, tty->driver->name, port->count );
53506+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53507
53508 if (!retval)
53509 port->flags |= ASYNC_NORMAL_ACTIVE;
53510diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53511index 42bad18..447d7a2 100644
53512--- a/drivers/tty/sysrq.c
53513+++ b/drivers/tty/sysrq.c
53514@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53515 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53516 size_t count, loff_t *ppos)
53517 {
53518- if (count) {
53519+ if (count && capable(CAP_SYS_ADMIN)) {
53520 char c;
53521
53522 if (get_user(c, buf))
53523diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53524index 2bb4dfc..a7f6e86 100644
53525--- a/drivers/tty/tty_io.c
53526+++ b/drivers/tty/tty_io.c
53527@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
53528
53529 void tty_default_fops(struct file_operations *fops)
53530 {
53531- *fops = tty_fops;
53532+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53533 }
53534
53535 /*
53536diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53537index 3737f55..7cef448 100644
53538--- a/drivers/tty/tty_ldisc.c
53539+++ b/drivers/tty/tty_ldisc.c
53540@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53541 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53542 tty_ldiscs[disc] = new_ldisc;
53543 new_ldisc->num = disc;
53544- new_ldisc->refcount = 0;
53545+ atomic_set(&new_ldisc->refcount, 0);
53546 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53547
53548 return ret;
53549@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53550 return -EINVAL;
53551
53552 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53553- if (tty_ldiscs[disc]->refcount)
53554+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53555 ret = -EBUSY;
53556 else
53557 tty_ldiscs[disc] = NULL;
53558@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53559 if (ldops) {
53560 ret = ERR_PTR(-EAGAIN);
53561 if (try_module_get(ldops->owner)) {
53562- ldops->refcount++;
53563+ atomic_inc(&ldops->refcount);
53564 ret = ldops;
53565 }
53566 }
53567@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53568 unsigned long flags;
53569
53570 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53571- ldops->refcount--;
53572+ atomic_dec(&ldops->refcount);
53573 module_put(ldops->owner);
53574 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53575 }
53576diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53577index 40b31835..94d92ae 100644
53578--- a/drivers/tty/tty_port.c
53579+++ b/drivers/tty/tty_port.c
53580@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
53581 unsigned long flags;
53582
53583 spin_lock_irqsave(&port->lock, flags);
53584- port->count = 0;
53585+ atomic_set(&port->count, 0);
53586 port->flags &= ~ASYNC_NORMAL_ACTIVE;
53587 tty = port->tty;
53588 if (tty)
53589@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53590
53591 /* The port lock protects the port counts */
53592 spin_lock_irqsave(&port->lock, flags);
53593- port->count--;
53594+ atomic_dec(&port->count);
53595 port->blocked_open++;
53596 spin_unlock_irqrestore(&port->lock, flags);
53597
53598@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53599 we must not mess that up further */
53600 spin_lock_irqsave(&port->lock, flags);
53601 if (!tty_hung_up_p(filp))
53602- port->count++;
53603+ atomic_inc(&port->count);
53604 port->blocked_open--;
53605 if (retval == 0)
53606 port->flags |= ASYNC_NORMAL_ACTIVE;
53607@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
53608 return 0;
53609
53610 spin_lock_irqsave(&port->lock, flags);
53611- if (tty->count == 1 && port->count != 1) {
53612+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
53613 printk(KERN_WARNING
53614 "tty_port_close_start: tty->count = 1 port count = %d.\n",
53615- port->count);
53616- port->count = 1;
53617+ atomic_read(&port->count));
53618+ atomic_set(&port->count, 1);
53619 }
53620- if (--port->count < 0) {
53621+ if (atomic_dec_return(&port->count) < 0) {
53622 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
53623- port->count);
53624- port->count = 0;
53625+ atomic_read(&port->count));
53626+ atomic_set(&port->count, 0);
53627 }
53628
53629- if (port->count) {
53630+ if (atomic_read(&port->count)) {
53631 spin_unlock_irqrestore(&port->lock, flags);
53632 return 0;
53633 }
53634@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
53635 struct file *filp)
53636 {
53637 spin_lock_irq(&port->lock);
53638- ++port->count;
53639+ atomic_inc(&port->count);
53640 spin_unlock_irq(&port->lock);
53641 tty_port_tty_set(port, tty);
53642
53643diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
53644index 8a89f6e..50b32af 100644
53645--- a/drivers/tty/vt/keyboard.c
53646+++ b/drivers/tty/vt/keyboard.c
53647@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
53648 kbd->kbdmode == VC_OFF) &&
53649 value != KVAL(K_SAK))
53650 return; /* SAK is allowed even in raw mode */
53651+
53652+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53653+ {
53654+ void *func = fn_handler[value];
53655+ if (func == fn_show_state || func == fn_show_ptregs ||
53656+ func == fn_show_mem)
53657+ return;
53658+ }
53659+#endif
53660+
53661 fn_handler[value](vc);
53662 }
53663
53664@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53665 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
53666 return -EFAULT;
53667
53668- if (!capable(CAP_SYS_TTY_CONFIG))
53669- perm = 0;
53670-
53671 switch (cmd) {
53672 case KDGKBENT:
53673 /* Ensure another thread doesn't free it under us */
53674@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53675 spin_unlock_irqrestore(&kbd_event_lock, flags);
53676 return put_user(val, &user_kbe->kb_value);
53677 case KDSKBENT:
53678+ if (!capable(CAP_SYS_TTY_CONFIG))
53679+ perm = 0;
53680+
53681 if (!perm)
53682 return -EPERM;
53683 if (!i && v == K_NOSUCHMAP) {
53684@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53685 int i, j, k;
53686 int ret;
53687
53688- if (!capable(CAP_SYS_TTY_CONFIG))
53689- perm = 0;
53690-
53691 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
53692 if (!kbs) {
53693 ret = -ENOMEM;
53694@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53695 kfree(kbs);
53696 return ((p && *p) ? -EOVERFLOW : 0);
53697 case KDSKBSENT:
53698+ if (!capable(CAP_SYS_TTY_CONFIG))
53699+ perm = 0;
53700+
53701 if (!perm) {
53702 ret = -EPERM;
53703 goto reterr;
53704diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
53705index 6276f13..84f2449 100644
53706--- a/drivers/uio/uio.c
53707+++ b/drivers/uio/uio.c
53708@@ -25,6 +25,7 @@
53709 #include <linux/kobject.h>
53710 #include <linux/cdev.h>
53711 #include <linux/uio_driver.h>
53712+#include <asm/local.h>
53713
53714 #define UIO_MAX_DEVICES (1U << MINORBITS)
53715
53716@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
53717 struct device_attribute *attr, char *buf)
53718 {
53719 struct uio_device *idev = dev_get_drvdata(dev);
53720- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
53721+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
53722 }
53723 static DEVICE_ATTR_RO(event);
53724
53725@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
53726 {
53727 struct uio_device *idev = info->uio_dev;
53728
53729- atomic_inc(&idev->event);
53730+ atomic_inc_unchecked(&idev->event);
53731 wake_up_interruptible(&idev->wait);
53732 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
53733 }
53734@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
53735 }
53736
53737 listener->dev = idev;
53738- listener->event_count = atomic_read(&idev->event);
53739+ listener->event_count = atomic_read_unchecked(&idev->event);
53740 filep->private_data = listener;
53741
53742 if (idev->info->open) {
53743@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
53744 return -EIO;
53745
53746 poll_wait(filep, &idev->wait, wait);
53747- if (listener->event_count != atomic_read(&idev->event))
53748+ if (listener->event_count != atomic_read_unchecked(&idev->event))
53749 return POLLIN | POLLRDNORM;
53750 return 0;
53751 }
53752@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
53753 do {
53754 set_current_state(TASK_INTERRUPTIBLE);
53755
53756- event_count = atomic_read(&idev->event);
53757+ event_count = atomic_read_unchecked(&idev->event);
53758 if (event_count != listener->event_count) {
53759 if (copy_to_user(buf, &event_count, count))
53760 retval = -EFAULT;
53761@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
53762 static int uio_find_mem_index(struct vm_area_struct *vma)
53763 {
53764 struct uio_device *idev = vma->vm_private_data;
53765+ unsigned long size;
53766
53767 if (vma->vm_pgoff < MAX_UIO_MAPS) {
53768- if (idev->info->mem[vma->vm_pgoff].size == 0)
53769+ size = idev->info->mem[vma->vm_pgoff].size;
53770+ if (size == 0)
53771+ return -1;
53772+ if (vma->vm_end - vma->vm_start > size)
53773 return -1;
53774 return (int)vma->vm_pgoff;
53775 }
53776@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
53777 idev->owner = owner;
53778 idev->info = info;
53779 init_waitqueue_head(&idev->wait);
53780- atomic_set(&idev->event, 0);
53781+ atomic_set_unchecked(&idev->event, 0);
53782
53783 ret = uio_get_minor(idev);
53784 if (ret)
53785diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
53786index 813d4d3..a71934f 100644
53787--- a/drivers/usb/atm/cxacru.c
53788+++ b/drivers/usb/atm/cxacru.c
53789@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
53790 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
53791 if (ret < 2)
53792 return -EINVAL;
53793- if (index < 0 || index > 0x7f)
53794+ if (index > 0x7f)
53795 return -EINVAL;
53796 pos += tmp;
53797
53798diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
53799index dada014..1d0d517 100644
53800--- a/drivers/usb/atm/usbatm.c
53801+++ b/drivers/usb/atm/usbatm.c
53802@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53803 if (printk_ratelimit())
53804 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
53805 __func__, vpi, vci);
53806- atomic_inc(&vcc->stats->rx_err);
53807+ atomic_inc_unchecked(&vcc->stats->rx_err);
53808 return;
53809 }
53810
53811@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53812 if (length > ATM_MAX_AAL5_PDU) {
53813 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
53814 __func__, length, vcc);
53815- atomic_inc(&vcc->stats->rx_err);
53816+ atomic_inc_unchecked(&vcc->stats->rx_err);
53817 goto out;
53818 }
53819
53820@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53821 if (sarb->len < pdu_length) {
53822 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
53823 __func__, pdu_length, sarb->len, vcc);
53824- atomic_inc(&vcc->stats->rx_err);
53825+ atomic_inc_unchecked(&vcc->stats->rx_err);
53826 goto out;
53827 }
53828
53829 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
53830 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
53831 __func__, vcc);
53832- atomic_inc(&vcc->stats->rx_err);
53833+ atomic_inc_unchecked(&vcc->stats->rx_err);
53834 goto out;
53835 }
53836
53837@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53838 if (printk_ratelimit())
53839 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
53840 __func__, length);
53841- atomic_inc(&vcc->stats->rx_drop);
53842+ atomic_inc_unchecked(&vcc->stats->rx_drop);
53843 goto out;
53844 }
53845
53846@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53847
53848 vcc->push(vcc, skb);
53849
53850- atomic_inc(&vcc->stats->rx);
53851+ atomic_inc_unchecked(&vcc->stats->rx);
53852 out:
53853 skb_trim(sarb, 0);
53854 }
53855@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
53856 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
53857
53858 usbatm_pop(vcc, skb);
53859- atomic_inc(&vcc->stats->tx);
53860+ atomic_inc_unchecked(&vcc->stats->tx);
53861
53862 skb = skb_dequeue(&instance->sndqueue);
53863 }
53864@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
53865 if (!left--)
53866 return sprintf(page,
53867 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
53868- atomic_read(&atm_dev->stats.aal5.tx),
53869- atomic_read(&atm_dev->stats.aal5.tx_err),
53870- atomic_read(&atm_dev->stats.aal5.rx),
53871- atomic_read(&atm_dev->stats.aal5.rx_err),
53872- atomic_read(&atm_dev->stats.aal5.rx_drop));
53873+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
53874+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
53875+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
53876+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
53877+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
53878
53879 if (!left--) {
53880 if (instance->disconnected)
53881diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
53882index 2a3bbdf..91d72cf 100644
53883--- a/drivers/usb/core/devices.c
53884+++ b/drivers/usb/core/devices.c
53885@@ -126,7 +126,7 @@ static const char format_endpt[] =
53886 * time it gets called.
53887 */
53888 static struct device_connect_event {
53889- atomic_t count;
53890+ atomic_unchecked_t count;
53891 wait_queue_head_t wait;
53892 } device_event = {
53893 .count = ATOMIC_INIT(1),
53894@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
53895
53896 void usbfs_conn_disc_event(void)
53897 {
53898- atomic_add(2, &device_event.count);
53899+ atomic_add_unchecked(2, &device_event.count);
53900 wake_up(&device_event.wait);
53901 }
53902
53903@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
53904
53905 poll_wait(file, &device_event.wait, wait);
53906
53907- event_count = atomic_read(&device_event.count);
53908+ event_count = atomic_read_unchecked(&device_event.count);
53909 if (file->f_version != event_count) {
53910 file->f_version = event_count;
53911 return POLLIN | POLLRDNORM;
53912diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
53913index e500243..401300f 100644
53914--- a/drivers/usb/core/devio.c
53915+++ b/drivers/usb/core/devio.c
53916@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
53917 struct usb_dev_state *ps = file->private_data;
53918 struct usb_device *dev = ps->dev;
53919 ssize_t ret = 0;
53920- unsigned len;
53921+ size_t len;
53922 loff_t pos;
53923 int i;
53924
53925@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
53926 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
53927 struct usb_config_descriptor *config =
53928 (struct usb_config_descriptor *)dev->rawdescriptors[i];
53929- unsigned int length = le16_to_cpu(config->wTotalLength);
53930+ size_t length = le16_to_cpu(config->wTotalLength);
53931
53932 if (*ppos < pos + length) {
53933
53934 /* The descriptor may claim to be longer than it
53935 * really is. Here is the actual allocated length. */
53936- unsigned alloclen =
53937+ size_t alloclen =
53938 le16_to_cpu(dev->config[i].desc.wTotalLength);
53939
53940- len = length - (*ppos - pos);
53941+ len = length + pos - *ppos;
53942 if (len > nbytes)
53943 len = nbytes;
53944
53945 /* Simply don't write (skip over) unallocated parts */
53946 if (alloclen > (*ppos - pos)) {
53947- alloclen -= (*ppos - pos);
53948+ alloclen = alloclen + pos - *ppos;
53949 if (copy_to_user(buf,
53950 dev->rawdescriptors[i] + (*ppos - pos),
53951 min(len, alloclen))) {
53952diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
53953index 45a915c..09f9735 100644
53954--- a/drivers/usb/core/hcd.c
53955+++ b/drivers/usb/core/hcd.c
53956@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
53957 */
53958 usb_get_urb(urb);
53959 atomic_inc(&urb->use_count);
53960- atomic_inc(&urb->dev->urbnum);
53961+ atomic_inc_unchecked(&urb->dev->urbnum);
53962 usbmon_urb_submit(&hcd->self, urb);
53963
53964 /* NOTE requirements on root-hub callers (usbfs and the hub
53965@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
53966 urb->hcpriv = NULL;
53967 INIT_LIST_HEAD(&urb->urb_list);
53968 atomic_dec(&urb->use_count);
53969- atomic_dec(&urb->dev->urbnum);
53970+ atomic_dec_unchecked(&urb->dev->urbnum);
53971 if (atomic_read(&urb->reject))
53972 wake_up(&usb_kill_urb_queue);
53973 usb_put_urb(urb);
53974diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
53975index b4bfa3a..008f926 100644
53976--- a/drivers/usb/core/hub.c
53977+++ b/drivers/usb/core/hub.c
53978@@ -26,6 +26,7 @@
53979 #include <linux/mutex.h>
53980 #include <linux/random.h>
53981 #include <linux/pm_qos.h>
53982+#include <linux/grsecurity.h>
53983
53984 #include <asm/uaccess.h>
53985 #include <asm/byteorder.h>
53986@@ -4664,6 +4665,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
53987 goto done;
53988 return;
53989 }
53990+
53991+ if (gr_handle_new_usb())
53992+ goto done;
53993+
53994 if (hub_is_superspeed(hub->hdev))
53995 unit_load = 150;
53996 else
53997diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
53998index f368d20..0c30ac5 100644
53999--- a/drivers/usb/core/message.c
54000+++ b/drivers/usb/core/message.c
54001@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
54002 * Return: If successful, the number of bytes transferred. Otherwise, a negative
54003 * error number.
54004 */
54005-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54006+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54007 __u8 requesttype, __u16 value, __u16 index, void *data,
54008 __u16 size, int timeout)
54009 {
54010@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
54011 * If successful, 0. Otherwise a negative error number. The number of actual
54012 * bytes transferred will be stored in the @actual_length parameter.
54013 */
54014-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54015+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54016 void *data, int len, int *actual_length, int timeout)
54017 {
54018 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
54019@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
54020 * bytes transferred will be stored in the @actual_length parameter.
54021 *
54022 */
54023-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54024+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54025 void *data, int len, int *actual_length, int timeout)
54026 {
54027 struct urb *urb;
54028diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
54029index d269738..7340cd7 100644
54030--- a/drivers/usb/core/sysfs.c
54031+++ b/drivers/usb/core/sysfs.c
54032@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
54033 struct usb_device *udev;
54034
54035 udev = to_usb_device(dev);
54036- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
54037+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
54038 }
54039 static DEVICE_ATTR_RO(urbnum);
54040
54041diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
54042index b1fb9ae..4224885 100644
54043--- a/drivers/usb/core/usb.c
54044+++ b/drivers/usb/core/usb.c
54045@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
54046 set_dev_node(&dev->dev, dev_to_node(bus->controller));
54047 dev->state = USB_STATE_ATTACHED;
54048 dev->lpm_disable_count = 1;
54049- atomic_set(&dev->urbnum, 0);
54050+ atomic_set_unchecked(&dev->urbnum, 0);
54051
54052 INIT_LIST_HEAD(&dev->ep0.urb_list);
54053 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
54054diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
54055index 8cfc319..4868255 100644
54056--- a/drivers/usb/early/ehci-dbgp.c
54057+++ b/drivers/usb/early/ehci-dbgp.c
54058@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
54059
54060 #ifdef CONFIG_KGDB
54061 static struct kgdb_io kgdbdbgp_io_ops;
54062-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
54063+static struct kgdb_io kgdbdbgp_io_ops_console;
54064+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
54065 #else
54066 #define dbgp_kgdb_mode (0)
54067 #endif
54068@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54069 .write_char = kgdbdbgp_write_char,
54070 };
54071
54072+static struct kgdb_io kgdbdbgp_io_ops_console = {
54073+ .name = "kgdbdbgp",
54074+ .read_char = kgdbdbgp_read_char,
54075+ .write_char = kgdbdbgp_write_char,
54076+ .is_console = 1
54077+};
54078+
54079 static int kgdbdbgp_wait_time;
54080
54081 static int __init kgdbdbgp_parse_config(char *str)
54082@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54083 ptr++;
54084 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54085 }
54086- kgdb_register_io_module(&kgdbdbgp_io_ops);
54087- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54088+ if (early_dbgp_console.index != -1)
54089+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54090+ else
54091+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54092
54093 return 0;
54094 }
54095diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54096index e971584..03495ab 100644
54097--- a/drivers/usb/gadget/function/f_uac1.c
54098+++ b/drivers/usb/gadget/function/f_uac1.c
54099@@ -14,6 +14,7 @@
54100 #include <linux/module.h>
54101 #include <linux/device.h>
54102 #include <linux/atomic.h>
54103+#include <linux/module.h>
54104
54105 #include "u_uac1.h"
54106
54107diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54108index 491082a..dfd7d17 100644
54109--- a/drivers/usb/gadget/function/u_serial.c
54110+++ b/drivers/usb/gadget/function/u_serial.c
54111@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54112 spin_lock_irq(&port->port_lock);
54113
54114 /* already open? Great. */
54115- if (port->port.count) {
54116+ if (atomic_read(&port->port.count)) {
54117 status = 0;
54118- port->port.count++;
54119+ atomic_inc(&port->port.count);
54120
54121 /* currently opening/closing? wait ... */
54122 } else if (port->openclose) {
54123@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54124 tty->driver_data = port;
54125 port->port.tty = tty;
54126
54127- port->port.count = 1;
54128+ atomic_set(&port->port.count, 1);
54129 port->openclose = false;
54130
54131 /* if connected, start the I/O stream */
54132@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54133
54134 spin_lock_irq(&port->port_lock);
54135
54136- if (port->port.count != 1) {
54137- if (port->port.count == 0)
54138+ if (atomic_read(&port->port.count) != 1) {
54139+ if (atomic_read(&port->port.count) == 0)
54140 WARN_ON(1);
54141 else
54142- --port->port.count;
54143+ atomic_dec(&port->port.count);
54144 goto exit;
54145 }
54146
54147@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54148 * and sleep if necessary
54149 */
54150 port->openclose = true;
54151- port->port.count = 0;
54152+ atomic_set(&port->port.count, 0);
54153
54154 gser = port->port_usb;
54155 if (gser && gser->disconnect)
54156@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
54157 int cond;
54158
54159 spin_lock_irq(&port->port_lock);
54160- cond = (port->port.count == 0) && !port->openclose;
54161+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54162 spin_unlock_irq(&port->port_lock);
54163 return cond;
54164 }
54165@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54166 /* if it's already open, start I/O ... and notify the serial
54167 * protocol about open/close status (connect/disconnect).
54168 */
54169- if (port->port.count) {
54170+ if (atomic_read(&port->port.count)) {
54171 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54172 gs_start_io(port);
54173 if (gser->connect)
54174@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
54175
54176 port->port_usb = NULL;
54177 gser->ioport = NULL;
54178- if (port->port.count > 0 || port->openclose) {
54179+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54180 wake_up_interruptible(&port->drain_wait);
54181 if (port->port.tty)
54182 tty_hangup(port->port.tty);
54183@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
54184
54185 /* finally, free any unused/unusable I/O buffers */
54186 spin_lock_irqsave(&port->port_lock, flags);
54187- if (port->port.count == 0 && !port->openclose)
54188+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54189 gs_buf_free(&port->port_write_buf);
54190 gs_free_requests(gser->out, &port->read_pool, NULL);
54191 gs_free_requests(gser->out, &port->read_queue, NULL);
54192diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54193index 53842a1..2bef3b6 100644
54194--- a/drivers/usb/gadget/function/u_uac1.c
54195+++ b/drivers/usb/gadget/function/u_uac1.c
54196@@ -17,6 +17,7 @@
54197 #include <linux/ctype.h>
54198 #include <linux/random.h>
54199 #include <linux/syscalls.h>
54200+#include <linux/module.h>
54201
54202 #include "u_uac1.h"
54203
54204diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54205index 118edb7..7a6415f 100644
54206--- a/drivers/usb/host/ehci-hub.c
54207+++ b/drivers/usb/host/ehci-hub.c
54208@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
54209 urb->transfer_flags = URB_DIR_IN;
54210 usb_get_urb(urb);
54211 atomic_inc(&urb->use_count);
54212- atomic_inc(&urb->dev->urbnum);
54213+ atomic_inc_unchecked(&urb->dev->urbnum);
54214 urb->setup_dma = dma_map_single(
54215 hcd->self.controller,
54216 urb->setup_packet,
54217@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54218 urb->status = -EINPROGRESS;
54219 usb_get_urb(urb);
54220 atomic_inc(&urb->use_count);
54221- atomic_inc(&urb->dev->urbnum);
54222+ atomic_inc_unchecked(&urb->dev->urbnum);
54223 retval = submit_single_step_set_feature(hcd, urb, 0);
54224 if (!retval && !wait_for_completion_timeout(&done,
54225 msecs_to_jiffies(2000))) {
54226diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54227index 1db0626..4948782 100644
54228--- a/drivers/usb/host/hwa-hc.c
54229+++ b/drivers/usb/host/hwa-hc.c
54230@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54231 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54232 struct wahc *wa = &hwahc->wa;
54233 struct device *dev = &wa->usb_iface->dev;
54234- u8 mas_le[UWB_NUM_MAS/8];
54235+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54236+
54237+ if (mas_le == NULL)
54238+ return -ENOMEM;
54239
54240 /* Set the stream index */
54241 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54242@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54243 WUSB_REQ_SET_WUSB_MAS,
54244 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54245 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54246- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54247+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54248 if (result < 0)
54249 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54250 out:
54251+ kfree(mas_le);
54252+
54253 return result;
54254 }
54255
54256diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54257index b3d245e..99549ed 100644
54258--- a/drivers/usb/misc/appledisplay.c
54259+++ b/drivers/usb/misc/appledisplay.c
54260@@ -84,7 +84,7 @@ struct appledisplay {
54261 struct mutex sysfslock; /* concurrent read and write */
54262 };
54263
54264-static atomic_t count_displays = ATOMIC_INIT(0);
54265+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54266 static struct workqueue_struct *wq;
54267
54268 static void appledisplay_complete(struct urb *urb)
54269@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54270
54271 /* Register backlight device */
54272 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54273- atomic_inc_return(&count_displays) - 1);
54274+ atomic_inc_return_unchecked(&count_displays) - 1);
54275 memset(&props, 0, sizeof(struct backlight_properties));
54276 props.type = BACKLIGHT_RAW;
54277 props.max_brightness = 0xff;
54278diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54279index 29fa1c3..a57b08e 100644
54280--- a/drivers/usb/serial/console.c
54281+++ b/drivers/usb/serial/console.c
54282@@ -125,7 +125,7 @@ static int usb_console_setup(struct console *co, char *options)
54283
54284 info->port = port;
54285
54286- ++port->port.count;
54287+ atomic_inc(&port->port.count);
54288 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54289 if (serial->type->set_termios) {
54290 /*
54291@@ -173,7 +173,7 @@ static int usb_console_setup(struct console *co, char *options)
54292 }
54293 /* Now that any required fake tty operations are completed restore
54294 * the tty port count */
54295- --port->port.count;
54296+ atomic_dec(&port->port.count);
54297 /* The console is special in terms of closing the device so
54298 * indicate this port is now acting as a system console. */
54299 port->port.console = 1;
54300@@ -186,7 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
54301 put_tty:
54302 tty_kref_put(tty);
54303 reset_open_count:
54304- port->port.count = 0;
54305+ atomic_set(&port->port.count, 0);
54306 usb_autopm_put_interface(serial->interface);
54307 error_get_interface:
54308 usb_serial_put(serial);
54309@@ -197,7 +197,7 @@ static int usb_console_setup(struct console *co, char *options)
54310 static void usb_console_write(struct console *co,
54311 const char *buf, unsigned count)
54312 {
54313- static struct usbcons_info *info = &usbcons_info;
54314+ struct usbcons_info *info = &usbcons_info;
54315 struct usb_serial_port *port = info->port;
54316 struct usb_serial *serial;
54317 int retval = -ENODEV;
54318diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54319index 307e339..6aa97cb 100644
54320--- a/drivers/usb/storage/usb.h
54321+++ b/drivers/usb/storage/usb.h
54322@@ -63,7 +63,7 @@ struct us_unusual_dev {
54323 __u8 useProtocol;
54324 __u8 useTransport;
54325 int (*initFunction)(struct us_data *);
54326-};
54327+} __do_const;
54328
54329
54330 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54331diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54332index a863a98..d272795 100644
54333--- a/drivers/usb/usbip/vhci.h
54334+++ b/drivers/usb/usbip/vhci.h
54335@@ -83,7 +83,7 @@ struct vhci_hcd {
54336 unsigned resuming:1;
54337 unsigned long re_timeout;
54338
54339- atomic_t seqnum;
54340+ atomic_unchecked_t seqnum;
54341
54342 /*
54343 * NOTE:
54344diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54345index 1ae9d40..c62604b 100644
54346--- a/drivers/usb/usbip/vhci_hcd.c
54347+++ b/drivers/usb/usbip/vhci_hcd.c
54348@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
54349
54350 spin_lock(&vdev->priv_lock);
54351
54352- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54353+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54354 if (priv->seqnum == 0xffff)
54355 dev_info(&urb->dev->dev, "seqnum max\n");
54356
54357@@ -684,7 +684,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54358 return -ENOMEM;
54359 }
54360
54361- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54362+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54363 if (unlink->seqnum == 0xffff)
54364 pr_info("seqnum max\n");
54365
54366@@ -888,7 +888,7 @@ static int vhci_start(struct usb_hcd *hcd)
54367 vdev->rhport = rhport;
54368 }
54369
54370- atomic_set(&vhci->seqnum, 0);
54371+ atomic_set_unchecked(&vhci->seqnum, 0);
54372 spin_lock_init(&vhci->lock);
54373
54374 hcd->power_budget = 0; /* no limit */
54375diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54376index 00e4a54..d676f85 100644
54377--- a/drivers/usb/usbip/vhci_rx.c
54378+++ b/drivers/usb/usbip/vhci_rx.c
54379@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54380 if (!urb) {
54381 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54382 pr_info("max seqnum %d\n",
54383- atomic_read(&the_controller->seqnum));
54384+ atomic_read_unchecked(&the_controller->seqnum));
54385 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54386 return;
54387 }
54388diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54389index edc7267..9f65ce2 100644
54390--- a/drivers/usb/wusbcore/wa-hc.h
54391+++ b/drivers/usb/wusbcore/wa-hc.h
54392@@ -240,7 +240,7 @@ struct wahc {
54393 spinlock_t xfer_list_lock;
54394 struct work_struct xfer_enqueue_work;
54395 struct work_struct xfer_error_work;
54396- atomic_t xfer_id_count;
54397+ atomic_unchecked_t xfer_id_count;
54398
54399 kernel_ulong_t quirks;
54400 };
54401@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54402 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54403 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54404 wa->dto_in_use = 0;
54405- atomic_set(&wa->xfer_id_count, 1);
54406+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54407 /* init the buf in URBs */
54408 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54409 usb_init_urb(&(wa->buf_in_urbs[index]));
54410diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54411index 69af4fd..da390d7 100644
54412--- a/drivers/usb/wusbcore/wa-xfer.c
54413+++ b/drivers/usb/wusbcore/wa-xfer.c
54414@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54415 */
54416 static void wa_xfer_id_init(struct wa_xfer *xfer)
54417 {
54418- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54419+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54420 }
54421
54422 /* Return the xfer's ID. */
54423diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54424index f018d8d..ccab63f 100644
54425--- a/drivers/vfio/vfio.c
54426+++ b/drivers/vfio/vfio.c
54427@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54428 return 0;
54429
54430 /* TODO Prevent device auto probing */
54431- WARN("Device %s added to live group %d!\n", dev_name(dev),
54432+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54433 iommu_group_id(group->iommu_group));
54434
54435 return 0;
54436diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
54437index 9484d56..d415d69 100644
54438--- a/drivers/vhost/net.c
54439+++ b/drivers/vhost/net.c
54440@@ -650,10 +650,8 @@ static void handle_rx(struct vhost_net *net)
54441 break;
54442 }
54443 /* TODO: Should check and handle checksum. */
54444-
54445- hdr.num_buffers = cpu_to_vhost16(vq, headcount);
54446 if (likely(mergeable) &&
54447- memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
54448+ memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
54449 offsetof(typeof(hdr), num_buffers),
54450 sizeof hdr.num_buffers)) {
54451 vq_err(vq, "Failed num_buffers write");
54452diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54453index 3bb02c6..a01ff38 100644
54454--- a/drivers/vhost/vringh.c
54455+++ b/drivers/vhost/vringh.c
54456@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54457 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
54458 {
54459 __virtio16 v = 0;
54460- int rc = get_user(v, (__force __virtio16 __user *)p);
54461+ int rc = get_user(v, (__force_user __virtio16 *)p);
54462 *val = vringh16_to_cpu(vrh, v);
54463 return rc;
54464 }
54465@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
54466 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
54467 {
54468 __virtio16 v = cpu_to_vringh16(vrh, val);
54469- return put_user(v, (__force __virtio16 __user *)p);
54470+ return put_user(v, (__force_user __virtio16 *)p);
54471 }
54472
54473 static inline int copydesc_user(void *dst, const void *src, size_t len)
54474 {
54475- return copy_from_user(dst, (__force void __user *)src, len) ?
54476+ return copy_from_user(dst, (void __force_user *)src, len) ?
54477 -EFAULT : 0;
54478 }
54479
54480@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54481 const struct vring_used_elem *src,
54482 unsigned int num)
54483 {
54484- return copy_to_user((__force void __user *)dst, src,
54485+ return copy_to_user((void __force_user *)dst, src,
54486 sizeof(*dst) * num) ? -EFAULT : 0;
54487 }
54488
54489 static inline int xfer_from_user(void *src, void *dst, size_t len)
54490 {
54491- return copy_from_user(dst, (__force void __user *)src, len) ?
54492+ return copy_from_user(dst, (void __force_user *)src, len) ?
54493 -EFAULT : 0;
54494 }
54495
54496 static inline int xfer_to_user(void *dst, void *src, size_t len)
54497 {
54498- return copy_to_user((__force void __user *)dst, src, len) ?
54499+ return copy_to_user((void __force_user *)dst, src, len) ?
54500 -EFAULT : 0;
54501 }
54502
54503@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
54504 vrh->last_used_idx = 0;
54505 vrh->vring.num = num;
54506 /* vring expects kernel addresses, but only used via accessors. */
54507- vrh->vring.desc = (__force struct vring_desc *)desc;
54508- vrh->vring.avail = (__force struct vring_avail *)avail;
54509- vrh->vring.used = (__force struct vring_used *)used;
54510+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54511+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54512+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54513 return 0;
54514 }
54515 EXPORT_SYMBOL(vringh_init_user);
54516@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
54517
54518 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
54519 {
54520- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
54521+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
54522 return 0;
54523 }
54524
54525diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54526index 84a110a..96312c3 100644
54527--- a/drivers/video/backlight/kb3886_bl.c
54528+++ b/drivers/video/backlight/kb3886_bl.c
54529@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54530 static unsigned long kb3886bl_flags;
54531 #define KB3886BL_SUSPENDED 0x01
54532
54533-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54534+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54535 {
54536 .ident = "Sahara Touch-iT",
54537 .matches = {
54538diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54539index 1b0b233..6f34c2c 100644
54540--- a/drivers/video/fbdev/arcfb.c
54541+++ b/drivers/video/fbdev/arcfb.c
54542@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54543 return -ENOSPC;
54544
54545 err = 0;
54546- if ((count + p) > fbmemlength) {
54547+ if (count > (fbmemlength - p)) {
54548 count = fbmemlength - p;
54549 err = -ENOSPC;
54550 }
54551diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54552index aedf2fb..47c9aca 100644
54553--- a/drivers/video/fbdev/aty/aty128fb.c
54554+++ b/drivers/video/fbdev/aty/aty128fb.c
54555@@ -149,7 +149,7 @@ enum {
54556 };
54557
54558 /* Must match above enum */
54559-static char * const r128_family[] = {
54560+static const char * const r128_family[] = {
54561 "AGP",
54562 "PCI",
54563 "PRO AGP",
54564diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54565index 37ec09b..98f8862 100644
54566--- a/drivers/video/fbdev/aty/atyfb_base.c
54567+++ b/drivers/video/fbdev/aty/atyfb_base.c
54568@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54569 par->accel_flags = var->accel_flags; /* hack */
54570
54571 if (var->accel_flags) {
54572- info->fbops->fb_sync = atyfb_sync;
54573+ pax_open_kernel();
54574+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54575+ pax_close_kernel();
54576 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54577 } else {
54578- info->fbops->fb_sync = NULL;
54579+ pax_open_kernel();
54580+ *(void **)&info->fbops->fb_sync = NULL;
54581+ pax_close_kernel();
54582 info->flags |= FBINFO_HWACCEL_DISABLED;
54583 }
54584
54585diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54586index 2fa0317..4983f2a 100644
54587--- a/drivers/video/fbdev/aty/mach64_cursor.c
54588+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54589@@ -8,6 +8,7 @@
54590 #include "../core/fb_draw.h"
54591
54592 #include <asm/io.h>
54593+#include <asm/pgtable.h>
54594
54595 #ifdef __sparc__
54596 #include <asm/fbio.h>
54597@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
54598 info->sprite.buf_align = 16; /* and 64 lines tall. */
54599 info->sprite.flags = FB_PIXMAP_IO;
54600
54601- info->fbops->fb_cursor = atyfb_cursor;
54602+ pax_open_kernel();
54603+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
54604+ pax_close_kernel();
54605
54606 return 0;
54607 }
54608diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
54609index d6cab1f..112f680 100644
54610--- a/drivers/video/fbdev/core/fb_defio.c
54611+++ b/drivers/video/fbdev/core/fb_defio.c
54612@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
54613
54614 BUG_ON(!fbdefio);
54615 mutex_init(&fbdefio->lock);
54616- info->fbops->fb_mmap = fb_deferred_io_mmap;
54617+ pax_open_kernel();
54618+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
54619+ pax_close_kernel();
54620 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
54621 INIT_LIST_HEAD(&fbdefio->pagelist);
54622 if (fbdefio->delay == 0) /* set a default of 1 s */
54623@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
54624 page->mapping = NULL;
54625 }
54626
54627- info->fbops->fb_mmap = NULL;
54628+ *(void **)&info->fbops->fb_mmap = NULL;
54629 mutex_destroy(&fbdefio->lock);
54630 }
54631 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
54632diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
54633index 0705d88..d9429bf 100644
54634--- a/drivers/video/fbdev/core/fbmem.c
54635+++ b/drivers/video/fbdev/core/fbmem.c
54636@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
54637 __u32 data;
54638 int err;
54639
54640- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
54641+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
54642
54643 data = (__u32) (unsigned long) fix->smem_start;
54644 err |= put_user(data, &fix32->smem_start);
54645diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
54646index 4254336..282567e 100644
54647--- a/drivers/video/fbdev/hyperv_fb.c
54648+++ b/drivers/video/fbdev/hyperv_fb.c
54649@@ -240,7 +240,7 @@ static uint screen_fb_size;
54650 static inline int synthvid_send(struct hv_device *hdev,
54651 struct synthvid_msg *msg)
54652 {
54653- static atomic64_t request_id = ATOMIC64_INIT(0);
54654+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
54655 int ret;
54656
54657 msg->pipe_hdr.type = PIPE_MSG_DATA;
54658@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
54659
54660 ret = vmbus_sendpacket(hdev->channel, msg,
54661 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
54662- atomic64_inc_return(&request_id),
54663+ atomic64_inc_return_unchecked(&request_id),
54664 VM_PKT_DATA_INBAND, 0);
54665
54666 if (ret)
54667diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
54668index 7672d2e..b56437f 100644
54669--- a/drivers/video/fbdev/i810/i810_accel.c
54670+++ b/drivers/video/fbdev/i810/i810_accel.c
54671@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
54672 }
54673 }
54674 printk("ringbuffer lockup!!!\n");
54675+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
54676 i810_report_error(mmio);
54677 par->dev_flags |= LOCKUP;
54678 info->pixmap.scan_align = 1;
54679diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54680index a01147f..5d896f8 100644
54681--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54682+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54683@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
54684
54685 #ifdef CONFIG_FB_MATROX_MYSTIQUE
54686 struct matrox_switch matrox_mystique = {
54687- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
54688+ .preinit = MGA1064_preinit,
54689+ .reset = MGA1064_reset,
54690+ .init = MGA1064_init,
54691+ .restore = MGA1064_restore,
54692 };
54693 EXPORT_SYMBOL(matrox_mystique);
54694 #endif
54695
54696 #ifdef CONFIG_FB_MATROX_G
54697 struct matrox_switch matrox_G100 = {
54698- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
54699+ .preinit = MGAG100_preinit,
54700+ .reset = MGAG100_reset,
54701+ .init = MGAG100_init,
54702+ .restore = MGAG100_restore,
54703 };
54704 EXPORT_SYMBOL(matrox_G100);
54705 #endif
54706diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54707index 195ad7c..09743fc 100644
54708--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54709+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54710@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
54711 }
54712
54713 struct matrox_switch matrox_millennium = {
54714- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
54715+ .preinit = Ti3026_preinit,
54716+ .reset = Ti3026_reset,
54717+ .init = Ti3026_init,
54718+ .restore = Ti3026_restore
54719 };
54720 EXPORT_SYMBOL(matrox_millennium);
54721 #endif
54722diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54723index fe92eed..106e085 100644
54724--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54725+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54726@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
54727 struct mb862xxfb_par *par = info->par;
54728
54729 if (info->var.bits_per_pixel == 32) {
54730- info->fbops->fb_fillrect = cfb_fillrect;
54731- info->fbops->fb_copyarea = cfb_copyarea;
54732- info->fbops->fb_imageblit = cfb_imageblit;
54733+ pax_open_kernel();
54734+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54735+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54736+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54737+ pax_close_kernel();
54738 } else {
54739 outreg(disp, GC_L0EM, 3);
54740- info->fbops->fb_fillrect = mb86290fb_fillrect;
54741- info->fbops->fb_copyarea = mb86290fb_copyarea;
54742- info->fbops->fb_imageblit = mb86290fb_imageblit;
54743+ pax_open_kernel();
54744+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
54745+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
54746+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
54747+ pax_close_kernel();
54748 }
54749 outreg(draw, GDC_REG_DRAW_BASE, 0);
54750 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
54751diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
54752index def0412..fed6529 100644
54753--- a/drivers/video/fbdev/nvidia/nvidia.c
54754+++ b/drivers/video/fbdev/nvidia/nvidia.c
54755@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
54756 info->fix.line_length = (info->var.xres_virtual *
54757 info->var.bits_per_pixel) >> 3;
54758 if (info->var.accel_flags) {
54759- info->fbops->fb_imageblit = nvidiafb_imageblit;
54760- info->fbops->fb_fillrect = nvidiafb_fillrect;
54761- info->fbops->fb_copyarea = nvidiafb_copyarea;
54762- info->fbops->fb_sync = nvidiafb_sync;
54763+ pax_open_kernel();
54764+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
54765+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
54766+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
54767+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
54768+ pax_close_kernel();
54769 info->pixmap.scan_align = 4;
54770 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54771 info->flags |= FBINFO_READS_FAST;
54772 NVResetGraphics(info);
54773 } else {
54774- info->fbops->fb_imageblit = cfb_imageblit;
54775- info->fbops->fb_fillrect = cfb_fillrect;
54776- info->fbops->fb_copyarea = cfb_copyarea;
54777- info->fbops->fb_sync = NULL;
54778+ pax_open_kernel();
54779+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54780+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54781+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54782+ *(void **)&info->fbops->fb_sync = NULL;
54783+ pax_close_kernel();
54784 info->pixmap.scan_align = 1;
54785 info->flags |= FBINFO_HWACCEL_DISABLED;
54786 info->flags &= ~FBINFO_READS_FAST;
54787@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
54788 info->pixmap.size = 8 * 1024;
54789 info->pixmap.flags = FB_PIXMAP_SYSTEM;
54790
54791- if (!hwcur)
54792- info->fbops->fb_cursor = NULL;
54793+ if (!hwcur) {
54794+ pax_open_kernel();
54795+ *(void **)&info->fbops->fb_cursor = NULL;
54796+ pax_close_kernel();
54797+ }
54798
54799 info->var.accel_flags = (!noaccel);
54800
54801diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
54802index 2412a0d..294215b 100644
54803--- a/drivers/video/fbdev/omap2/dss/display.c
54804+++ b/drivers/video/fbdev/omap2/dss/display.c
54805@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
54806 if (dssdev->name == NULL)
54807 dssdev->name = dssdev->alias;
54808
54809+ pax_open_kernel();
54810 if (drv && drv->get_resolution == NULL)
54811- drv->get_resolution = omapdss_default_get_resolution;
54812+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
54813 if (drv && drv->get_recommended_bpp == NULL)
54814- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54815+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54816 if (drv && drv->get_timings == NULL)
54817- drv->get_timings = omapdss_default_get_timings;
54818+ *(void **)&drv->get_timings = omapdss_default_get_timings;
54819+ pax_close_kernel();
54820
54821 mutex_lock(&panel_list_mutex);
54822 list_add_tail(&dssdev->panel_list, &panel_list);
54823diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
54824index 83433cb..71e9b98 100644
54825--- a/drivers/video/fbdev/s1d13xxxfb.c
54826+++ b/drivers/video/fbdev/s1d13xxxfb.c
54827@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
54828
54829 switch(prod_id) {
54830 case S1D13506_PROD_ID: /* activate acceleration */
54831- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54832- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54833+ pax_open_kernel();
54834+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54835+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54836+ pax_close_kernel();
54837 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
54838 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
54839 break;
54840diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54841index d3013cd..95b8285 100644
54842--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
54843+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54844@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
54845 }
54846
54847 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
54848- lcdc_sys_write_index,
54849- lcdc_sys_write_data,
54850- lcdc_sys_read_data,
54851+ .write_index = lcdc_sys_write_index,
54852+ .write_data = lcdc_sys_write_data,
54853+ .read_data = lcdc_sys_read_data,
54854 };
54855
54856 static int sh_mobile_lcdc_sginit(struct fb_info *info,
54857diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
54858index 9279e5f..d5f5276 100644
54859--- a/drivers/video/fbdev/smscufx.c
54860+++ b/drivers/video/fbdev/smscufx.c
54861@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
54862 fb_deferred_io_cleanup(info);
54863 kfree(info->fbdefio);
54864 info->fbdefio = NULL;
54865- info->fbops->fb_mmap = ufx_ops_mmap;
54866+ pax_open_kernel();
54867+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
54868+ pax_close_kernel();
54869 }
54870
54871 pr_debug("released /dev/fb%d user=%d count=%d",
54872diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
54873index ff2b873..626a8d5 100644
54874--- a/drivers/video/fbdev/udlfb.c
54875+++ b/drivers/video/fbdev/udlfb.c
54876@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
54877 dlfb_urb_completion(urb);
54878
54879 error:
54880- atomic_add(bytes_sent, &dev->bytes_sent);
54881- atomic_add(bytes_identical, &dev->bytes_identical);
54882- atomic_add(width*height*2, &dev->bytes_rendered);
54883+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54884+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54885+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
54886 end_cycles = get_cycles();
54887- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54888+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54889 >> 10)), /* Kcycles */
54890 &dev->cpu_kcycles_used);
54891
54892@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
54893 dlfb_urb_completion(urb);
54894
54895 error:
54896- atomic_add(bytes_sent, &dev->bytes_sent);
54897- atomic_add(bytes_identical, &dev->bytes_identical);
54898- atomic_add(bytes_rendered, &dev->bytes_rendered);
54899+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54900+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54901+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
54902 end_cycles = get_cycles();
54903- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54904+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54905 >> 10)), /* Kcycles */
54906 &dev->cpu_kcycles_used);
54907 }
54908@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
54909 fb_deferred_io_cleanup(info);
54910 kfree(info->fbdefio);
54911 info->fbdefio = NULL;
54912- info->fbops->fb_mmap = dlfb_ops_mmap;
54913+ pax_open_kernel();
54914+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
54915+ pax_close_kernel();
54916 }
54917
54918 pr_warn("released /dev/fb%d user=%d count=%d\n",
54919@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
54920 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54921 struct dlfb_data *dev = fb_info->par;
54922 return snprintf(buf, PAGE_SIZE, "%u\n",
54923- atomic_read(&dev->bytes_rendered));
54924+ atomic_read_unchecked(&dev->bytes_rendered));
54925 }
54926
54927 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54928@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54929 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54930 struct dlfb_data *dev = fb_info->par;
54931 return snprintf(buf, PAGE_SIZE, "%u\n",
54932- atomic_read(&dev->bytes_identical));
54933+ atomic_read_unchecked(&dev->bytes_identical));
54934 }
54935
54936 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54937@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54938 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54939 struct dlfb_data *dev = fb_info->par;
54940 return snprintf(buf, PAGE_SIZE, "%u\n",
54941- atomic_read(&dev->bytes_sent));
54942+ atomic_read_unchecked(&dev->bytes_sent));
54943 }
54944
54945 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54946@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54947 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54948 struct dlfb_data *dev = fb_info->par;
54949 return snprintf(buf, PAGE_SIZE, "%u\n",
54950- atomic_read(&dev->cpu_kcycles_used));
54951+ atomic_read_unchecked(&dev->cpu_kcycles_used));
54952 }
54953
54954 static ssize_t edid_show(
54955@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
54956 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54957 struct dlfb_data *dev = fb_info->par;
54958
54959- atomic_set(&dev->bytes_rendered, 0);
54960- atomic_set(&dev->bytes_identical, 0);
54961- atomic_set(&dev->bytes_sent, 0);
54962- atomic_set(&dev->cpu_kcycles_used, 0);
54963+ atomic_set_unchecked(&dev->bytes_rendered, 0);
54964+ atomic_set_unchecked(&dev->bytes_identical, 0);
54965+ atomic_set_unchecked(&dev->bytes_sent, 0);
54966+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
54967
54968 return count;
54969 }
54970diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
54971index d32d1c4..46722e6 100644
54972--- a/drivers/video/fbdev/uvesafb.c
54973+++ b/drivers/video/fbdev/uvesafb.c
54974@@ -19,6 +19,7 @@
54975 #include <linux/io.h>
54976 #include <linux/mutex.h>
54977 #include <linux/slab.h>
54978+#include <linux/moduleloader.h>
54979 #include <video/edid.h>
54980 #include <video/uvesafb.h>
54981 #ifdef CONFIG_X86
54982@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
54983 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
54984 par->pmi_setpal = par->ypan = 0;
54985 } else {
54986+
54987+#ifdef CONFIG_PAX_KERNEXEC
54988+#ifdef CONFIG_MODULES
54989+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
54990+#endif
54991+ if (!par->pmi_code) {
54992+ par->pmi_setpal = par->ypan = 0;
54993+ return 0;
54994+ }
54995+#endif
54996+
54997 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
54998 + task->t.regs.edi);
54999+
55000+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55001+ pax_open_kernel();
55002+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55003+ pax_close_kernel();
55004+
55005+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55006+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55007+#else
55008 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55009 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55010+#endif
55011+
55012 printk(KERN_INFO "uvesafb: protected mode interface info at "
55013 "%04x:%04x\n",
55014 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55015@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55016 par->ypan = ypan;
55017
55018 if (par->pmi_setpal || par->ypan) {
55019+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55020 if (__supported_pte_mask & _PAGE_NX) {
55021 par->pmi_setpal = par->ypan = 0;
55022 printk(KERN_WARNING "uvesafb: NX protection is active, "
55023 "better not use the PMI.\n");
55024- } else {
55025+ } else
55026+#endif
55027 uvesafb_vbe_getpmi(task, par);
55028- }
55029 }
55030 #else
55031 /* The protected mode interface is not available on non-x86. */
55032@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55033 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55034
55035 /* Disable blanking if the user requested so. */
55036- if (!blank)
55037- info->fbops->fb_blank = NULL;
55038+ if (!blank) {
55039+ pax_open_kernel();
55040+ *(void **)&info->fbops->fb_blank = NULL;
55041+ pax_close_kernel();
55042+ }
55043
55044 /*
55045 * Find out how much IO memory is required for the mode with
55046@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55047 info->flags = FBINFO_FLAG_DEFAULT |
55048 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55049
55050- if (!par->ypan)
55051- info->fbops->fb_pan_display = NULL;
55052+ if (!par->ypan) {
55053+ pax_open_kernel();
55054+ *(void **)&info->fbops->fb_pan_display = NULL;
55055+ pax_close_kernel();
55056+ }
55057 }
55058
55059 static void uvesafb_init_mtrr(struct fb_info *info)
55060@@ -1786,6 +1816,11 @@ out_mode:
55061 out:
55062 kfree(par->vbe_modes);
55063
55064+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55065+ if (par->pmi_code)
55066+ module_memfree_exec(par->pmi_code);
55067+#endif
55068+
55069 framebuffer_release(info);
55070 return err;
55071 }
55072@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
55073 kfree(par->vbe_state_orig);
55074 kfree(par->vbe_state_saved);
55075
55076+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55077+ if (par->pmi_code)
55078+ module_memfree_exec(par->pmi_code);
55079+#endif
55080+
55081 framebuffer_release(info);
55082 }
55083 return 0;
55084diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55085index d79a0ac..2d0c3d4 100644
55086--- a/drivers/video/fbdev/vesafb.c
55087+++ b/drivers/video/fbdev/vesafb.c
55088@@ -9,6 +9,7 @@
55089 */
55090
55091 #include <linux/module.h>
55092+#include <linux/moduleloader.h>
55093 #include <linux/kernel.h>
55094 #include <linux/errno.h>
55095 #include <linux/string.h>
55096@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55097 static int vram_total; /* Set total amount of memory */
55098 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55099 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55100-static void (*pmi_start)(void) __read_mostly;
55101-static void (*pmi_pal) (void) __read_mostly;
55102+static void (*pmi_start)(void) __read_only;
55103+static void (*pmi_pal) (void) __read_only;
55104 static int depth __read_mostly;
55105 static int vga_compat __read_mostly;
55106 /* --------------------------------------------------------------------- */
55107@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55108 unsigned int size_remap;
55109 unsigned int size_total;
55110 char *option = NULL;
55111+ void *pmi_code = NULL;
55112
55113 /* ignore error return of fb_get_options */
55114 fb_get_options("vesafb", &option);
55115@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55116 size_remap = size_total;
55117 vesafb_fix.smem_len = size_remap;
55118
55119-#ifndef __i386__
55120- screen_info.vesapm_seg = 0;
55121-#endif
55122-
55123 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55124 printk(KERN_WARNING
55125 "vesafb: cannot reserve video memory at 0x%lx\n",
55126@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55127 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55128 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55129
55130+#ifdef __i386__
55131+
55132+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55133+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55134+ if (!pmi_code)
55135+#elif !defined(CONFIG_PAX_KERNEXEC)
55136+ if (0)
55137+#endif
55138+
55139+#endif
55140+ screen_info.vesapm_seg = 0;
55141+
55142 if (screen_info.vesapm_seg) {
55143- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55144- screen_info.vesapm_seg,screen_info.vesapm_off);
55145+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55146+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55147 }
55148
55149 if (screen_info.vesapm_seg < 0xc000)
55150@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55151
55152 if (ypan || pmi_setpal) {
55153 unsigned short *pmi_base;
55154+
55155 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55156- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55157- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55158+
55159+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55160+ pax_open_kernel();
55161+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55162+#else
55163+ pmi_code = pmi_base;
55164+#endif
55165+
55166+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55167+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55168+
55169+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55170+ pmi_start = ktva_ktla(pmi_start);
55171+ pmi_pal = ktva_ktla(pmi_pal);
55172+ pax_close_kernel();
55173+#endif
55174+
55175 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55176 if (pmi_base[3]) {
55177 printk(KERN_INFO "vesafb: pmi: ports = ");
55178@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55179 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55180 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55181
55182- if (!ypan)
55183- info->fbops->fb_pan_display = NULL;
55184+ if (!ypan) {
55185+ pax_open_kernel();
55186+ *(void **)&info->fbops->fb_pan_display = NULL;
55187+ pax_close_kernel();
55188+ }
55189
55190 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55191 err = -ENOMEM;
55192@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55193 fb_info(info, "%s frame buffer device\n", info->fix.id);
55194 return 0;
55195 err:
55196+
55197+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55198+ module_memfree_exec(pmi_code);
55199+#endif
55200+
55201 if (info->screen_base)
55202 iounmap(info->screen_base);
55203 framebuffer_release(info);
55204diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55205index 88714ae..16c2e11 100644
55206--- a/drivers/video/fbdev/via/via_clock.h
55207+++ b/drivers/video/fbdev/via/via_clock.h
55208@@ -56,7 +56,7 @@ struct via_clock {
55209
55210 void (*set_engine_pll_state)(u8 state);
55211 void (*set_engine_pll)(struct via_pll_config config);
55212-};
55213+} __no_const;
55214
55215
55216 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55217diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55218index 3c14e43..2630570 100644
55219--- a/drivers/video/logo/logo_linux_clut224.ppm
55220+++ b/drivers/video/logo/logo_linux_clut224.ppm
55221@@ -2,1603 +2,1123 @@ P3
55222 # Standard 224-color Linux logo
55223 80 80
55224 255
55225- 0 0 0 0 0 0 0 0 0 0 0 0
55226- 0 0 0 0 0 0 0 0 0 0 0 0
55227- 0 0 0 0 0 0 0 0 0 0 0 0
55228- 0 0 0 0 0 0 0 0 0 0 0 0
55229- 0 0 0 0 0 0 0 0 0 0 0 0
55230- 0 0 0 0 0 0 0 0 0 0 0 0
55231- 0 0 0 0 0 0 0 0 0 0 0 0
55232- 0 0 0 0 0 0 0 0 0 0 0 0
55233- 0 0 0 0 0 0 0 0 0 0 0 0
55234- 6 6 6 6 6 6 10 10 10 10 10 10
55235- 10 10 10 6 6 6 6 6 6 6 6 6
55236- 0 0 0 0 0 0 0 0 0 0 0 0
55237- 0 0 0 0 0 0 0 0 0 0 0 0
55238- 0 0 0 0 0 0 0 0 0 0 0 0
55239- 0 0 0 0 0 0 0 0 0 0 0 0
55240- 0 0 0 0 0 0 0 0 0 0 0 0
55241- 0 0 0 0 0 0 0 0 0 0 0 0
55242- 0 0 0 0 0 0 0 0 0 0 0 0
55243- 0 0 0 0 0 0 0 0 0 0 0 0
55244- 0 0 0 0 0 0 0 0 0 0 0 0
55245- 0 0 0 0 0 0 0 0 0 0 0 0
55246- 0 0 0 0 0 0 0 0 0 0 0 0
55247- 0 0 0 0 0 0 0 0 0 0 0 0
55248- 0 0 0 0 0 0 0 0 0 0 0 0
55249- 0 0 0 0 0 0 0 0 0 0 0 0
55250- 0 0 0 0 0 0 0 0 0 0 0 0
55251- 0 0 0 0 0 0 0 0 0 0 0 0
55252- 0 0 0 0 0 0 0 0 0 0 0 0
55253- 0 0 0 6 6 6 10 10 10 14 14 14
55254- 22 22 22 26 26 26 30 30 30 34 34 34
55255- 30 30 30 30 30 30 26 26 26 18 18 18
55256- 14 14 14 10 10 10 6 6 6 0 0 0
55257- 0 0 0 0 0 0 0 0 0 0 0 0
55258- 0 0 0 0 0 0 0 0 0 0 0 0
55259- 0 0 0 0 0 0 0 0 0 0 0 0
55260- 0 0 0 0 0 0 0 0 0 0 0 0
55261- 0 0 0 0 0 0 0 0 0 0 0 0
55262- 0 0 0 0 0 0 0 0 0 0 0 0
55263- 0 0 0 0 0 0 0 0 0 0 0 0
55264- 0 0 0 0 0 0 0 0 0 0 0 0
55265- 0 0 0 0 0 0 0 0 0 0 0 0
55266- 0 0 0 0 0 1 0 0 1 0 0 0
55267- 0 0 0 0 0 0 0 0 0 0 0 0
55268- 0 0 0 0 0 0 0 0 0 0 0 0
55269- 0 0 0 0 0 0 0 0 0 0 0 0
55270- 0 0 0 0 0 0 0 0 0 0 0 0
55271- 0 0 0 0 0 0 0 0 0 0 0 0
55272- 0 0 0 0 0 0 0 0 0 0 0 0
55273- 6 6 6 14 14 14 26 26 26 42 42 42
55274- 54 54 54 66 66 66 78 78 78 78 78 78
55275- 78 78 78 74 74 74 66 66 66 54 54 54
55276- 42 42 42 26 26 26 18 18 18 10 10 10
55277- 6 6 6 0 0 0 0 0 0 0 0 0
55278- 0 0 0 0 0 0 0 0 0 0 0 0
55279- 0 0 0 0 0 0 0 0 0 0 0 0
55280- 0 0 0 0 0 0 0 0 0 0 0 0
55281- 0 0 0 0 0 0 0 0 0 0 0 0
55282- 0 0 0 0 0 0 0 0 0 0 0 0
55283- 0 0 0 0 0 0 0 0 0 0 0 0
55284- 0 0 0 0 0 0 0 0 0 0 0 0
55285- 0 0 0 0 0 0 0 0 0 0 0 0
55286- 0 0 1 0 0 0 0 0 0 0 0 0
55287- 0 0 0 0 0 0 0 0 0 0 0 0
55288- 0 0 0 0 0 0 0 0 0 0 0 0
55289- 0 0 0 0 0 0 0 0 0 0 0 0
55290- 0 0 0 0 0 0 0 0 0 0 0 0
55291- 0 0 0 0 0 0 0 0 0 0 0 0
55292- 0 0 0 0 0 0 0 0 0 10 10 10
55293- 22 22 22 42 42 42 66 66 66 86 86 86
55294- 66 66 66 38 38 38 38 38 38 22 22 22
55295- 26 26 26 34 34 34 54 54 54 66 66 66
55296- 86 86 86 70 70 70 46 46 46 26 26 26
55297- 14 14 14 6 6 6 0 0 0 0 0 0
55298- 0 0 0 0 0 0 0 0 0 0 0 0
55299- 0 0 0 0 0 0 0 0 0 0 0 0
55300- 0 0 0 0 0 0 0 0 0 0 0 0
55301- 0 0 0 0 0 0 0 0 0 0 0 0
55302- 0 0 0 0 0 0 0 0 0 0 0 0
55303- 0 0 0 0 0 0 0 0 0 0 0 0
55304- 0 0 0 0 0 0 0 0 0 0 0 0
55305- 0 0 0 0 0 0 0 0 0 0 0 0
55306- 0 0 1 0 0 1 0 0 1 0 0 0
55307- 0 0 0 0 0 0 0 0 0 0 0 0
55308- 0 0 0 0 0 0 0 0 0 0 0 0
55309- 0 0 0 0 0 0 0 0 0 0 0 0
55310- 0 0 0 0 0 0 0 0 0 0 0 0
55311- 0 0 0 0 0 0 0 0 0 0 0 0
55312- 0 0 0 0 0 0 10 10 10 26 26 26
55313- 50 50 50 82 82 82 58 58 58 6 6 6
55314- 2 2 6 2 2 6 2 2 6 2 2 6
55315- 2 2 6 2 2 6 2 2 6 2 2 6
55316- 6 6 6 54 54 54 86 86 86 66 66 66
55317- 38 38 38 18 18 18 6 6 6 0 0 0
55318- 0 0 0 0 0 0 0 0 0 0 0 0
55319- 0 0 0 0 0 0 0 0 0 0 0 0
55320- 0 0 0 0 0 0 0 0 0 0 0 0
55321- 0 0 0 0 0 0 0 0 0 0 0 0
55322- 0 0 0 0 0 0 0 0 0 0 0 0
55323- 0 0 0 0 0 0 0 0 0 0 0 0
55324- 0 0 0 0 0 0 0 0 0 0 0 0
55325- 0 0 0 0 0 0 0 0 0 0 0 0
55326- 0 0 0 0 0 0 0 0 0 0 0 0
55327- 0 0 0 0 0 0 0 0 0 0 0 0
55328- 0 0 0 0 0 0 0 0 0 0 0 0
55329- 0 0 0 0 0 0 0 0 0 0 0 0
55330- 0 0 0 0 0 0 0 0 0 0 0 0
55331- 0 0 0 0 0 0 0 0 0 0 0 0
55332- 0 0 0 6 6 6 22 22 22 50 50 50
55333- 78 78 78 34 34 34 2 2 6 2 2 6
55334- 2 2 6 2 2 6 2 2 6 2 2 6
55335- 2 2 6 2 2 6 2 2 6 2 2 6
55336- 2 2 6 2 2 6 6 6 6 70 70 70
55337- 78 78 78 46 46 46 22 22 22 6 6 6
55338- 0 0 0 0 0 0 0 0 0 0 0 0
55339- 0 0 0 0 0 0 0 0 0 0 0 0
55340- 0 0 0 0 0 0 0 0 0 0 0 0
55341- 0 0 0 0 0 0 0 0 0 0 0 0
55342- 0 0 0 0 0 0 0 0 0 0 0 0
55343- 0 0 0 0 0 0 0 0 0 0 0 0
55344- 0 0 0 0 0 0 0 0 0 0 0 0
55345- 0 0 0 0 0 0 0 0 0 0 0 0
55346- 0 0 1 0 0 1 0 0 1 0 0 0
55347- 0 0 0 0 0 0 0 0 0 0 0 0
55348- 0 0 0 0 0 0 0 0 0 0 0 0
55349- 0 0 0 0 0 0 0 0 0 0 0 0
55350- 0 0 0 0 0 0 0 0 0 0 0 0
55351- 0 0 0 0 0 0 0 0 0 0 0 0
55352- 6 6 6 18 18 18 42 42 42 82 82 82
55353- 26 26 26 2 2 6 2 2 6 2 2 6
55354- 2 2 6 2 2 6 2 2 6 2 2 6
55355- 2 2 6 2 2 6 2 2 6 14 14 14
55356- 46 46 46 34 34 34 6 6 6 2 2 6
55357- 42 42 42 78 78 78 42 42 42 18 18 18
55358- 6 6 6 0 0 0 0 0 0 0 0 0
55359- 0 0 0 0 0 0 0 0 0 0 0 0
55360- 0 0 0 0 0 0 0 0 0 0 0 0
55361- 0 0 0 0 0 0 0 0 0 0 0 0
55362- 0 0 0 0 0 0 0 0 0 0 0 0
55363- 0 0 0 0 0 0 0 0 0 0 0 0
55364- 0 0 0 0 0 0 0 0 0 0 0 0
55365- 0 0 0 0 0 0 0 0 0 0 0 0
55366- 0 0 1 0 0 0 0 0 1 0 0 0
55367- 0 0 0 0 0 0 0 0 0 0 0 0
55368- 0 0 0 0 0 0 0 0 0 0 0 0
55369- 0 0 0 0 0 0 0 0 0 0 0 0
55370- 0 0 0 0 0 0 0 0 0 0 0 0
55371- 0 0 0 0 0 0 0 0 0 0 0 0
55372- 10 10 10 30 30 30 66 66 66 58 58 58
55373- 2 2 6 2 2 6 2 2 6 2 2 6
55374- 2 2 6 2 2 6 2 2 6 2 2 6
55375- 2 2 6 2 2 6 2 2 6 26 26 26
55376- 86 86 86 101 101 101 46 46 46 10 10 10
55377- 2 2 6 58 58 58 70 70 70 34 34 34
55378- 10 10 10 0 0 0 0 0 0 0 0 0
55379- 0 0 0 0 0 0 0 0 0 0 0 0
55380- 0 0 0 0 0 0 0 0 0 0 0 0
55381- 0 0 0 0 0 0 0 0 0 0 0 0
55382- 0 0 0 0 0 0 0 0 0 0 0 0
55383- 0 0 0 0 0 0 0 0 0 0 0 0
55384- 0 0 0 0 0 0 0 0 0 0 0 0
55385- 0 0 0 0 0 0 0 0 0 0 0 0
55386- 0 0 1 0 0 1 0 0 1 0 0 0
55387- 0 0 0 0 0 0 0 0 0 0 0 0
55388- 0 0 0 0 0 0 0 0 0 0 0 0
55389- 0 0 0 0 0 0 0 0 0 0 0 0
55390- 0 0 0 0 0 0 0 0 0 0 0 0
55391- 0 0 0 0 0 0 0 0 0 0 0 0
55392- 14 14 14 42 42 42 86 86 86 10 10 10
55393- 2 2 6 2 2 6 2 2 6 2 2 6
55394- 2 2 6 2 2 6 2 2 6 2 2 6
55395- 2 2 6 2 2 6 2 2 6 30 30 30
55396- 94 94 94 94 94 94 58 58 58 26 26 26
55397- 2 2 6 6 6 6 78 78 78 54 54 54
55398- 22 22 22 6 6 6 0 0 0 0 0 0
55399- 0 0 0 0 0 0 0 0 0 0 0 0
55400- 0 0 0 0 0 0 0 0 0 0 0 0
55401- 0 0 0 0 0 0 0 0 0 0 0 0
55402- 0 0 0 0 0 0 0 0 0 0 0 0
55403- 0 0 0 0 0 0 0 0 0 0 0 0
55404- 0 0 0 0 0 0 0 0 0 0 0 0
55405- 0 0 0 0 0 0 0 0 0 0 0 0
55406- 0 0 0 0 0 0 0 0 0 0 0 0
55407- 0 0 0 0 0 0 0 0 0 0 0 0
55408- 0 0 0 0 0 0 0 0 0 0 0 0
55409- 0 0 0 0 0 0 0 0 0 0 0 0
55410- 0 0 0 0 0 0 0 0 0 0 0 0
55411- 0 0 0 0 0 0 0 0 0 6 6 6
55412- 22 22 22 62 62 62 62 62 62 2 2 6
55413- 2 2 6 2 2 6 2 2 6 2 2 6
55414- 2 2 6 2 2 6 2 2 6 2 2 6
55415- 2 2 6 2 2 6 2 2 6 26 26 26
55416- 54 54 54 38 38 38 18 18 18 10 10 10
55417- 2 2 6 2 2 6 34 34 34 82 82 82
55418- 38 38 38 14 14 14 0 0 0 0 0 0
55419- 0 0 0 0 0 0 0 0 0 0 0 0
55420- 0 0 0 0 0 0 0 0 0 0 0 0
55421- 0 0 0 0 0 0 0 0 0 0 0 0
55422- 0 0 0 0 0 0 0 0 0 0 0 0
55423- 0 0 0 0 0 0 0 0 0 0 0 0
55424- 0 0 0 0 0 0 0 0 0 0 0 0
55425- 0 0 0 0 0 0 0 0 0 0 0 0
55426- 0 0 0 0 0 1 0 0 1 0 0 0
55427- 0 0 0 0 0 0 0 0 0 0 0 0
55428- 0 0 0 0 0 0 0 0 0 0 0 0
55429- 0 0 0 0 0 0 0 0 0 0 0 0
55430- 0 0 0 0 0 0 0 0 0 0 0 0
55431- 0 0 0 0 0 0 0 0 0 6 6 6
55432- 30 30 30 78 78 78 30 30 30 2 2 6
55433- 2 2 6 2 2 6 2 2 6 2 2 6
55434- 2 2 6 2 2 6 2 2 6 2 2 6
55435- 2 2 6 2 2 6 2 2 6 10 10 10
55436- 10 10 10 2 2 6 2 2 6 2 2 6
55437- 2 2 6 2 2 6 2 2 6 78 78 78
55438- 50 50 50 18 18 18 6 6 6 0 0 0
55439- 0 0 0 0 0 0 0 0 0 0 0 0
55440- 0 0 0 0 0 0 0 0 0 0 0 0
55441- 0 0 0 0 0 0 0 0 0 0 0 0
55442- 0 0 0 0 0 0 0 0 0 0 0 0
55443- 0 0 0 0 0 0 0 0 0 0 0 0
55444- 0 0 0 0 0 0 0 0 0 0 0 0
55445- 0 0 0 0 0 0 0 0 0 0 0 0
55446- 0 0 1 0 0 0 0 0 0 0 0 0
55447- 0 0 0 0 0 0 0 0 0 0 0 0
55448- 0 0 0 0 0 0 0 0 0 0 0 0
55449- 0 0 0 0 0 0 0 0 0 0 0 0
55450- 0 0 0 0 0 0 0 0 0 0 0 0
55451- 0 0 0 0 0 0 0 0 0 10 10 10
55452- 38 38 38 86 86 86 14 14 14 2 2 6
55453- 2 2 6 2 2 6 2 2 6 2 2 6
55454- 2 2 6 2 2 6 2 2 6 2 2 6
55455- 2 2 6 2 2 6 2 2 6 2 2 6
55456- 2 2 6 2 2 6 2 2 6 2 2 6
55457- 2 2 6 2 2 6 2 2 6 54 54 54
55458- 66 66 66 26 26 26 6 6 6 0 0 0
55459- 0 0 0 0 0 0 0 0 0 0 0 0
55460- 0 0 0 0 0 0 0 0 0 0 0 0
55461- 0 0 0 0 0 0 0 0 0 0 0 0
55462- 0 0 0 0 0 0 0 0 0 0 0 0
55463- 0 0 0 0 0 0 0 0 0 0 0 0
55464- 0 0 0 0 0 0 0 0 0 0 0 0
55465- 0 0 0 0 0 0 0 0 0 0 0 0
55466- 0 0 0 0 0 1 0 0 1 0 0 0
55467- 0 0 0 0 0 0 0 0 0 0 0 0
55468- 0 0 0 0 0 0 0 0 0 0 0 0
55469- 0 0 0 0 0 0 0 0 0 0 0 0
55470- 0 0 0 0 0 0 0 0 0 0 0 0
55471- 0 0 0 0 0 0 0 0 0 14 14 14
55472- 42 42 42 82 82 82 2 2 6 2 2 6
55473- 2 2 6 6 6 6 10 10 10 2 2 6
55474- 2 2 6 2 2 6 2 2 6 2 2 6
55475- 2 2 6 2 2 6 2 2 6 6 6 6
55476- 14 14 14 10 10 10 2 2 6 2 2 6
55477- 2 2 6 2 2 6 2 2 6 18 18 18
55478- 82 82 82 34 34 34 10 10 10 0 0 0
55479- 0 0 0 0 0 0 0 0 0 0 0 0
55480- 0 0 0 0 0 0 0 0 0 0 0 0
55481- 0 0 0 0 0 0 0 0 0 0 0 0
55482- 0 0 0 0 0 0 0 0 0 0 0 0
55483- 0 0 0 0 0 0 0 0 0 0 0 0
55484- 0 0 0 0 0 0 0 0 0 0 0 0
55485- 0 0 0 0 0 0 0 0 0 0 0 0
55486- 0 0 1 0 0 0 0 0 0 0 0 0
55487- 0 0 0 0 0 0 0 0 0 0 0 0
55488- 0 0 0 0 0 0 0 0 0 0 0 0
55489- 0 0 0 0 0 0 0 0 0 0 0 0
55490- 0 0 0 0 0 0 0 0 0 0 0 0
55491- 0 0 0 0 0 0 0 0 0 14 14 14
55492- 46 46 46 86 86 86 2 2 6 2 2 6
55493- 6 6 6 6 6 6 22 22 22 34 34 34
55494- 6 6 6 2 2 6 2 2 6 2 2 6
55495- 2 2 6 2 2 6 18 18 18 34 34 34
55496- 10 10 10 50 50 50 22 22 22 2 2 6
55497- 2 2 6 2 2 6 2 2 6 10 10 10
55498- 86 86 86 42 42 42 14 14 14 0 0 0
55499- 0 0 0 0 0 0 0 0 0 0 0 0
55500- 0 0 0 0 0 0 0 0 0 0 0 0
55501- 0 0 0 0 0 0 0 0 0 0 0 0
55502- 0 0 0 0 0 0 0 0 0 0 0 0
55503- 0 0 0 0 0 0 0 0 0 0 0 0
55504- 0 0 0 0 0 0 0 0 0 0 0 0
55505- 0 0 0 0 0 0 0 0 0 0 0 0
55506- 0 0 1 0 0 1 0 0 1 0 0 0
55507- 0 0 0 0 0 0 0 0 0 0 0 0
55508- 0 0 0 0 0 0 0 0 0 0 0 0
55509- 0 0 0 0 0 0 0 0 0 0 0 0
55510- 0 0 0 0 0 0 0 0 0 0 0 0
55511- 0 0 0 0 0 0 0 0 0 14 14 14
55512- 46 46 46 86 86 86 2 2 6 2 2 6
55513- 38 38 38 116 116 116 94 94 94 22 22 22
55514- 22 22 22 2 2 6 2 2 6 2 2 6
55515- 14 14 14 86 86 86 138 138 138 162 162 162
55516-154 154 154 38 38 38 26 26 26 6 6 6
55517- 2 2 6 2 2 6 2 2 6 2 2 6
55518- 86 86 86 46 46 46 14 14 14 0 0 0
55519- 0 0 0 0 0 0 0 0 0 0 0 0
55520- 0 0 0 0 0 0 0 0 0 0 0 0
55521- 0 0 0 0 0 0 0 0 0 0 0 0
55522- 0 0 0 0 0 0 0 0 0 0 0 0
55523- 0 0 0 0 0 0 0 0 0 0 0 0
55524- 0 0 0 0 0 0 0 0 0 0 0 0
55525- 0 0 0 0 0 0 0 0 0 0 0 0
55526- 0 0 0 0 0 0 0 0 0 0 0 0
55527- 0 0 0 0 0 0 0 0 0 0 0 0
55528- 0 0 0 0 0 0 0 0 0 0 0 0
55529- 0 0 0 0 0 0 0 0 0 0 0 0
55530- 0 0 0 0 0 0 0 0 0 0 0 0
55531- 0 0 0 0 0 0 0 0 0 14 14 14
55532- 46 46 46 86 86 86 2 2 6 14 14 14
55533-134 134 134 198 198 198 195 195 195 116 116 116
55534- 10 10 10 2 2 6 2 2 6 6 6 6
55535-101 98 89 187 187 187 210 210 210 218 218 218
55536-214 214 214 134 134 134 14 14 14 6 6 6
55537- 2 2 6 2 2 6 2 2 6 2 2 6
55538- 86 86 86 50 50 50 18 18 18 6 6 6
55539- 0 0 0 0 0 0 0 0 0 0 0 0
55540- 0 0 0 0 0 0 0 0 0 0 0 0
55541- 0 0 0 0 0 0 0 0 0 0 0 0
55542- 0 0 0 0 0 0 0 0 0 0 0 0
55543- 0 0 0 0 0 0 0 0 0 0 0 0
55544- 0 0 0 0 0 0 0 0 0 0 0 0
55545- 0 0 0 0 0 0 0 0 1 0 0 0
55546- 0 0 1 0 0 1 0 0 1 0 0 0
55547- 0 0 0 0 0 0 0 0 0 0 0 0
55548- 0 0 0 0 0 0 0 0 0 0 0 0
55549- 0 0 0 0 0 0 0 0 0 0 0 0
55550- 0 0 0 0 0 0 0 0 0 0 0 0
55551- 0 0 0 0 0 0 0 0 0 14 14 14
55552- 46 46 46 86 86 86 2 2 6 54 54 54
55553-218 218 218 195 195 195 226 226 226 246 246 246
55554- 58 58 58 2 2 6 2 2 6 30 30 30
55555-210 210 210 253 253 253 174 174 174 123 123 123
55556-221 221 221 234 234 234 74 74 74 2 2 6
55557- 2 2 6 2 2 6 2 2 6 2 2 6
55558- 70 70 70 58 58 58 22 22 22 6 6 6
55559- 0 0 0 0 0 0 0 0 0 0 0 0
55560- 0 0 0 0 0 0 0 0 0 0 0 0
55561- 0 0 0 0 0 0 0 0 0 0 0 0
55562- 0 0 0 0 0 0 0 0 0 0 0 0
55563- 0 0 0 0 0 0 0 0 0 0 0 0
55564- 0 0 0 0 0 0 0 0 0 0 0 0
55565- 0 0 0 0 0 0 0 0 0 0 0 0
55566- 0 0 0 0 0 0 0 0 0 0 0 0
55567- 0 0 0 0 0 0 0 0 0 0 0 0
55568- 0 0 0 0 0 0 0 0 0 0 0 0
55569- 0 0 0 0 0 0 0 0 0 0 0 0
55570- 0 0 0 0 0 0 0 0 0 0 0 0
55571- 0 0 0 0 0 0 0 0 0 14 14 14
55572- 46 46 46 82 82 82 2 2 6 106 106 106
55573-170 170 170 26 26 26 86 86 86 226 226 226
55574-123 123 123 10 10 10 14 14 14 46 46 46
55575-231 231 231 190 190 190 6 6 6 70 70 70
55576- 90 90 90 238 238 238 158 158 158 2 2 6
55577- 2 2 6 2 2 6 2 2 6 2 2 6
55578- 70 70 70 58 58 58 22 22 22 6 6 6
55579- 0 0 0 0 0 0 0 0 0 0 0 0
55580- 0 0 0 0 0 0 0 0 0 0 0 0
55581- 0 0 0 0 0 0 0 0 0 0 0 0
55582- 0 0 0 0 0 0 0 0 0 0 0 0
55583- 0 0 0 0 0 0 0 0 0 0 0 0
55584- 0 0 0 0 0 0 0 0 0 0 0 0
55585- 0 0 0 0 0 0 0 0 1 0 0 0
55586- 0 0 1 0 0 1 0 0 1 0 0 0
55587- 0 0 0 0 0 0 0 0 0 0 0 0
55588- 0 0 0 0 0 0 0 0 0 0 0 0
55589- 0 0 0 0 0 0 0 0 0 0 0 0
55590- 0 0 0 0 0 0 0 0 0 0 0 0
55591- 0 0 0 0 0 0 0 0 0 14 14 14
55592- 42 42 42 86 86 86 6 6 6 116 116 116
55593-106 106 106 6 6 6 70 70 70 149 149 149
55594-128 128 128 18 18 18 38 38 38 54 54 54
55595-221 221 221 106 106 106 2 2 6 14 14 14
55596- 46 46 46 190 190 190 198 198 198 2 2 6
55597- 2 2 6 2 2 6 2 2 6 2 2 6
55598- 74 74 74 62 62 62 22 22 22 6 6 6
55599- 0 0 0 0 0 0 0 0 0 0 0 0
55600- 0 0 0 0 0 0 0 0 0 0 0 0
55601- 0 0 0 0 0 0 0 0 0 0 0 0
55602- 0 0 0 0 0 0 0 0 0 0 0 0
55603- 0 0 0 0 0 0 0 0 0 0 0 0
55604- 0 0 0 0 0 0 0 0 0 0 0 0
55605- 0 0 0 0 0 0 0 0 1 0 0 0
55606- 0 0 1 0 0 0 0 0 1 0 0 0
55607- 0 0 0 0 0 0 0 0 0 0 0 0
55608- 0 0 0 0 0 0 0 0 0 0 0 0
55609- 0 0 0 0 0 0 0 0 0 0 0 0
55610- 0 0 0 0 0 0 0 0 0 0 0 0
55611- 0 0 0 0 0 0 0 0 0 14 14 14
55612- 42 42 42 94 94 94 14 14 14 101 101 101
55613-128 128 128 2 2 6 18 18 18 116 116 116
55614-118 98 46 121 92 8 121 92 8 98 78 10
55615-162 162 162 106 106 106 2 2 6 2 2 6
55616- 2 2 6 195 195 195 195 195 195 6 6 6
55617- 2 2 6 2 2 6 2 2 6 2 2 6
55618- 74 74 74 62 62 62 22 22 22 6 6 6
55619- 0 0 0 0 0 0 0 0 0 0 0 0
55620- 0 0 0 0 0 0 0 0 0 0 0 0
55621- 0 0 0 0 0 0 0 0 0 0 0 0
55622- 0 0 0 0 0 0 0 0 0 0 0 0
55623- 0 0 0 0 0 0 0 0 0 0 0 0
55624- 0 0 0 0 0 0 0 0 0 0 0 0
55625- 0 0 0 0 0 0 0 0 1 0 0 1
55626- 0 0 1 0 0 0 0 0 1 0 0 0
55627- 0 0 0 0 0 0 0 0 0 0 0 0
55628- 0 0 0 0 0 0 0 0 0 0 0 0
55629- 0 0 0 0 0 0 0 0 0 0 0 0
55630- 0 0 0 0 0 0 0 0 0 0 0 0
55631- 0 0 0 0 0 0 0 0 0 10 10 10
55632- 38 38 38 90 90 90 14 14 14 58 58 58
55633-210 210 210 26 26 26 54 38 6 154 114 10
55634-226 170 11 236 186 11 225 175 15 184 144 12
55635-215 174 15 175 146 61 37 26 9 2 2 6
55636- 70 70 70 246 246 246 138 138 138 2 2 6
55637- 2 2 6 2 2 6 2 2 6 2 2 6
55638- 70 70 70 66 66 66 26 26 26 6 6 6
55639- 0 0 0 0 0 0 0 0 0 0 0 0
55640- 0 0 0 0 0 0 0 0 0 0 0 0
55641- 0 0 0 0 0 0 0 0 0 0 0 0
55642- 0 0 0 0 0 0 0 0 0 0 0 0
55643- 0 0 0 0 0 0 0 0 0 0 0 0
55644- 0 0 0 0 0 0 0 0 0 0 0 0
55645- 0 0 0 0 0 0 0 0 0 0 0 0
55646- 0 0 0 0 0 0 0 0 0 0 0 0
55647- 0 0 0 0 0 0 0 0 0 0 0 0
55648- 0 0 0 0 0 0 0 0 0 0 0 0
55649- 0 0 0 0 0 0 0 0 0 0 0 0
55650- 0 0 0 0 0 0 0 0 0 0 0 0
55651- 0 0 0 0 0 0 0 0 0 10 10 10
55652- 38 38 38 86 86 86 14 14 14 10 10 10
55653-195 195 195 188 164 115 192 133 9 225 175 15
55654-239 182 13 234 190 10 232 195 16 232 200 30
55655-245 207 45 241 208 19 232 195 16 184 144 12
55656-218 194 134 211 206 186 42 42 42 2 2 6
55657- 2 2 6 2 2 6 2 2 6 2 2 6
55658- 50 50 50 74 74 74 30 30 30 6 6 6
55659- 0 0 0 0 0 0 0 0 0 0 0 0
55660- 0 0 0 0 0 0 0 0 0 0 0 0
55661- 0 0 0 0 0 0 0 0 0 0 0 0
55662- 0 0 0 0 0 0 0 0 0 0 0 0
55663- 0 0 0 0 0 0 0 0 0 0 0 0
55664- 0 0 0 0 0 0 0 0 0 0 0 0
55665- 0 0 0 0 0 0 0 0 0 0 0 0
55666- 0 0 0 0 0 0 0 0 0 0 0 0
55667- 0 0 0 0 0 0 0 0 0 0 0 0
55668- 0 0 0 0 0 0 0 0 0 0 0 0
55669- 0 0 0 0 0 0 0 0 0 0 0 0
55670- 0 0 0 0 0 0 0 0 0 0 0 0
55671- 0 0 0 0 0 0 0 0 0 10 10 10
55672- 34 34 34 86 86 86 14 14 14 2 2 6
55673-121 87 25 192 133 9 219 162 10 239 182 13
55674-236 186 11 232 195 16 241 208 19 244 214 54
55675-246 218 60 246 218 38 246 215 20 241 208 19
55676-241 208 19 226 184 13 121 87 25 2 2 6
55677- 2 2 6 2 2 6 2 2 6 2 2 6
55678- 50 50 50 82 82 82 34 34 34 10 10 10
55679- 0 0 0 0 0 0 0 0 0 0 0 0
55680- 0 0 0 0 0 0 0 0 0 0 0 0
55681- 0 0 0 0 0 0 0 0 0 0 0 0
55682- 0 0 0 0 0 0 0 0 0 0 0 0
55683- 0 0 0 0 0 0 0 0 0 0 0 0
55684- 0 0 0 0 0 0 0 0 0 0 0 0
55685- 0 0 0 0 0 0 0 0 0 0 0 0
55686- 0 0 0 0 0 0 0 0 0 0 0 0
55687- 0 0 0 0 0 0 0 0 0 0 0 0
55688- 0 0 0 0 0 0 0 0 0 0 0 0
55689- 0 0 0 0 0 0 0 0 0 0 0 0
55690- 0 0 0 0 0 0 0 0 0 0 0 0
55691- 0 0 0 0 0 0 0 0 0 10 10 10
55692- 34 34 34 82 82 82 30 30 30 61 42 6
55693-180 123 7 206 145 10 230 174 11 239 182 13
55694-234 190 10 238 202 15 241 208 19 246 218 74
55695-246 218 38 246 215 20 246 215 20 246 215 20
55696-226 184 13 215 174 15 184 144 12 6 6 6
55697- 2 2 6 2 2 6 2 2 6 2 2 6
55698- 26 26 26 94 94 94 42 42 42 14 14 14
55699- 0 0 0 0 0 0 0 0 0 0 0 0
55700- 0 0 0 0 0 0 0 0 0 0 0 0
55701- 0 0 0 0 0 0 0 0 0 0 0 0
55702- 0 0 0 0 0 0 0 0 0 0 0 0
55703- 0 0 0 0 0 0 0 0 0 0 0 0
55704- 0 0 0 0 0 0 0 0 0 0 0 0
55705- 0 0 0 0 0 0 0 0 0 0 0 0
55706- 0 0 0 0 0 0 0 0 0 0 0 0
55707- 0 0 0 0 0 0 0 0 0 0 0 0
55708- 0 0 0 0 0 0 0 0 0 0 0 0
55709- 0 0 0 0 0 0 0 0 0 0 0 0
55710- 0 0 0 0 0 0 0 0 0 0 0 0
55711- 0 0 0 0 0 0 0 0 0 10 10 10
55712- 30 30 30 78 78 78 50 50 50 104 69 6
55713-192 133 9 216 158 10 236 178 12 236 186 11
55714-232 195 16 241 208 19 244 214 54 245 215 43
55715-246 215 20 246 215 20 241 208 19 198 155 10
55716-200 144 11 216 158 10 156 118 10 2 2 6
55717- 2 2 6 2 2 6 2 2 6 2 2 6
55718- 6 6 6 90 90 90 54 54 54 18 18 18
55719- 6 6 6 0 0 0 0 0 0 0 0 0
55720- 0 0 0 0 0 0 0 0 0 0 0 0
55721- 0 0 0 0 0 0 0 0 0 0 0 0
55722- 0 0 0 0 0 0 0 0 0 0 0 0
55723- 0 0 0 0 0 0 0 0 0 0 0 0
55724- 0 0 0 0 0 0 0 0 0 0 0 0
55725- 0 0 0 0 0 0 0 0 0 0 0 0
55726- 0 0 0 0 0 0 0 0 0 0 0 0
55727- 0 0 0 0 0 0 0 0 0 0 0 0
55728- 0 0 0 0 0 0 0 0 0 0 0 0
55729- 0 0 0 0 0 0 0 0 0 0 0 0
55730- 0 0 0 0 0 0 0 0 0 0 0 0
55731- 0 0 0 0 0 0 0 0 0 10 10 10
55732- 30 30 30 78 78 78 46 46 46 22 22 22
55733-137 92 6 210 162 10 239 182 13 238 190 10
55734-238 202 15 241 208 19 246 215 20 246 215 20
55735-241 208 19 203 166 17 185 133 11 210 150 10
55736-216 158 10 210 150 10 102 78 10 2 2 6
55737- 6 6 6 54 54 54 14 14 14 2 2 6
55738- 2 2 6 62 62 62 74 74 74 30 30 30
55739- 10 10 10 0 0 0 0 0 0 0 0 0
55740- 0 0 0 0 0 0 0 0 0 0 0 0
55741- 0 0 0 0 0 0 0 0 0 0 0 0
55742- 0 0 0 0 0 0 0 0 0 0 0 0
55743- 0 0 0 0 0 0 0 0 0 0 0 0
55744- 0 0 0 0 0 0 0 0 0 0 0 0
55745- 0 0 0 0 0 0 0 0 0 0 0 0
55746- 0 0 0 0 0 0 0 0 0 0 0 0
55747- 0 0 0 0 0 0 0 0 0 0 0 0
55748- 0 0 0 0 0 0 0 0 0 0 0 0
55749- 0 0 0 0 0 0 0 0 0 0 0 0
55750- 0 0 0 0 0 0 0 0 0 0 0 0
55751- 0 0 0 0 0 0 0 0 0 10 10 10
55752- 34 34 34 78 78 78 50 50 50 6 6 6
55753- 94 70 30 139 102 15 190 146 13 226 184 13
55754-232 200 30 232 195 16 215 174 15 190 146 13
55755-168 122 10 192 133 9 210 150 10 213 154 11
55756-202 150 34 182 157 106 101 98 89 2 2 6
55757- 2 2 6 78 78 78 116 116 116 58 58 58
55758- 2 2 6 22 22 22 90 90 90 46 46 46
55759- 18 18 18 6 6 6 0 0 0 0 0 0
55760- 0 0 0 0 0 0 0 0 0 0 0 0
55761- 0 0 0 0 0 0 0 0 0 0 0 0
55762- 0 0 0 0 0 0 0 0 0 0 0 0
55763- 0 0 0 0 0 0 0 0 0 0 0 0
55764- 0 0 0 0 0 0 0 0 0 0 0 0
55765- 0 0 0 0 0 0 0 0 0 0 0 0
55766- 0 0 0 0 0 0 0 0 0 0 0 0
55767- 0 0 0 0 0 0 0 0 0 0 0 0
55768- 0 0 0 0 0 0 0 0 0 0 0 0
55769- 0 0 0 0 0 0 0 0 0 0 0 0
55770- 0 0 0 0 0 0 0 0 0 0 0 0
55771- 0 0 0 0 0 0 0 0 0 10 10 10
55772- 38 38 38 86 86 86 50 50 50 6 6 6
55773-128 128 128 174 154 114 156 107 11 168 122 10
55774-198 155 10 184 144 12 197 138 11 200 144 11
55775-206 145 10 206 145 10 197 138 11 188 164 115
55776-195 195 195 198 198 198 174 174 174 14 14 14
55777- 2 2 6 22 22 22 116 116 116 116 116 116
55778- 22 22 22 2 2 6 74 74 74 70 70 70
55779- 30 30 30 10 10 10 0 0 0 0 0 0
55780- 0 0 0 0 0 0 0 0 0 0 0 0
55781- 0 0 0 0 0 0 0 0 0 0 0 0
55782- 0 0 0 0 0 0 0 0 0 0 0 0
55783- 0 0 0 0 0 0 0 0 0 0 0 0
55784- 0 0 0 0 0 0 0 0 0 0 0 0
55785- 0 0 0 0 0 0 0 0 0 0 0 0
55786- 0 0 0 0 0 0 0 0 0 0 0 0
55787- 0 0 0 0 0 0 0 0 0 0 0 0
55788- 0 0 0 0 0 0 0 0 0 0 0 0
55789- 0 0 0 0 0 0 0 0 0 0 0 0
55790- 0 0 0 0 0 0 0 0 0 0 0 0
55791- 0 0 0 0 0 0 6 6 6 18 18 18
55792- 50 50 50 101 101 101 26 26 26 10 10 10
55793-138 138 138 190 190 190 174 154 114 156 107 11
55794-197 138 11 200 144 11 197 138 11 192 133 9
55795-180 123 7 190 142 34 190 178 144 187 187 187
55796-202 202 202 221 221 221 214 214 214 66 66 66
55797- 2 2 6 2 2 6 50 50 50 62 62 62
55798- 6 6 6 2 2 6 10 10 10 90 90 90
55799- 50 50 50 18 18 18 6 6 6 0 0 0
55800- 0 0 0 0 0 0 0 0 0 0 0 0
55801- 0 0 0 0 0 0 0 0 0 0 0 0
55802- 0 0 0 0 0 0 0 0 0 0 0 0
55803- 0 0 0 0 0 0 0 0 0 0 0 0
55804- 0 0 0 0 0 0 0 0 0 0 0 0
55805- 0 0 0 0 0 0 0 0 0 0 0 0
55806- 0 0 0 0 0 0 0 0 0 0 0 0
55807- 0 0 0 0 0 0 0 0 0 0 0 0
55808- 0 0 0 0 0 0 0 0 0 0 0 0
55809- 0 0 0 0 0 0 0 0 0 0 0 0
55810- 0 0 0 0 0 0 0 0 0 0 0 0
55811- 0 0 0 0 0 0 10 10 10 34 34 34
55812- 74 74 74 74 74 74 2 2 6 6 6 6
55813-144 144 144 198 198 198 190 190 190 178 166 146
55814-154 121 60 156 107 11 156 107 11 168 124 44
55815-174 154 114 187 187 187 190 190 190 210 210 210
55816-246 246 246 253 253 253 253 253 253 182 182 182
55817- 6 6 6 2 2 6 2 2 6 2 2 6
55818- 2 2 6 2 2 6 2 2 6 62 62 62
55819- 74 74 74 34 34 34 14 14 14 0 0 0
55820- 0 0 0 0 0 0 0 0 0 0 0 0
55821- 0 0 0 0 0 0 0 0 0 0 0 0
55822- 0 0 0 0 0 0 0 0 0 0 0 0
55823- 0 0 0 0 0 0 0 0 0 0 0 0
55824- 0 0 0 0 0 0 0 0 0 0 0 0
55825- 0 0 0 0 0 0 0 0 0 0 0 0
55826- 0 0 0 0 0 0 0 0 0 0 0 0
55827- 0 0 0 0 0 0 0 0 0 0 0 0
55828- 0 0 0 0 0 0 0 0 0 0 0 0
55829- 0 0 0 0 0 0 0 0 0 0 0 0
55830- 0 0 0 0 0 0 0 0 0 0 0 0
55831- 0 0 0 10 10 10 22 22 22 54 54 54
55832- 94 94 94 18 18 18 2 2 6 46 46 46
55833-234 234 234 221 221 221 190 190 190 190 190 190
55834-190 190 190 187 187 187 187 187 187 190 190 190
55835-190 190 190 195 195 195 214 214 214 242 242 242
55836-253 253 253 253 253 253 253 253 253 253 253 253
55837- 82 82 82 2 2 6 2 2 6 2 2 6
55838- 2 2 6 2 2 6 2 2 6 14 14 14
55839- 86 86 86 54 54 54 22 22 22 6 6 6
55840- 0 0 0 0 0 0 0 0 0 0 0 0
55841- 0 0 0 0 0 0 0 0 0 0 0 0
55842- 0 0 0 0 0 0 0 0 0 0 0 0
55843- 0 0 0 0 0 0 0 0 0 0 0 0
55844- 0 0 0 0 0 0 0 0 0 0 0 0
55845- 0 0 0 0 0 0 0 0 0 0 0 0
55846- 0 0 0 0 0 0 0 0 0 0 0 0
55847- 0 0 0 0 0 0 0 0 0 0 0 0
55848- 0 0 0 0 0 0 0 0 0 0 0 0
55849- 0 0 0 0 0 0 0 0 0 0 0 0
55850- 0 0 0 0 0 0 0 0 0 0 0 0
55851- 6 6 6 18 18 18 46 46 46 90 90 90
55852- 46 46 46 18 18 18 6 6 6 182 182 182
55853-253 253 253 246 246 246 206 206 206 190 190 190
55854-190 190 190 190 190 190 190 190 190 190 190 190
55855-206 206 206 231 231 231 250 250 250 253 253 253
55856-253 253 253 253 253 253 253 253 253 253 253 253
55857-202 202 202 14 14 14 2 2 6 2 2 6
55858- 2 2 6 2 2 6 2 2 6 2 2 6
55859- 42 42 42 86 86 86 42 42 42 18 18 18
55860- 6 6 6 0 0 0 0 0 0 0 0 0
55861- 0 0 0 0 0 0 0 0 0 0 0 0
55862- 0 0 0 0 0 0 0 0 0 0 0 0
55863- 0 0 0 0 0 0 0 0 0 0 0 0
55864- 0 0 0 0 0 0 0 0 0 0 0 0
55865- 0 0 0 0 0 0 0 0 0 0 0 0
55866- 0 0 0 0 0 0 0 0 0 0 0 0
55867- 0 0 0 0 0 0 0 0 0 0 0 0
55868- 0 0 0 0 0 0 0 0 0 0 0 0
55869- 0 0 0 0 0 0 0 0 0 0 0 0
55870- 0 0 0 0 0 0 0 0 0 6 6 6
55871- 14 14 14 38 38 38 74 74 74 66 66 66
55872- 2 2 6 6 6 6 90 90 90 250 250 250
55873-253 253 253 253 253 253 238 238 238 198 198 198
55874-190 190 190 190 190 190 195 195 195 221 221 221
55875-246 246 246 253 253 253 253 253 253 253 253 253
55876-253 253 253 253 253 253 253 253 253 253 253 253
55877-253 253 253 82 82 82 2 2 6 2 2 6
55878- 2 2 6 2 2 6 2 2 6 2 2 6
55879- 2 2 6 78 78 78 70 70 70 34 34 34
55880- 14 14 14 6 6 6 0 0 0 0 0 0
55881- 0 0 0 0 0 0 0 0 0 0 0 0
55882- 0 0 0 0 0 0 0 0 0 0 0 0
55883- 0 0 0 0 0 0 0 0 0 0 0 0
55884- 0 0 0 0 0 0 0 0 0 0 0 0
55885- 0 0 0 0 0 0 0 0 0 0 0 0
55886- 0 0 0 0 0 0 0 0 0 0 0 0
55887- 0 0 0 0 0 0 0 0 0 0 0 0
55888- 0 0 0 0 0 0 0 0 0 0 0 0
55889- 0 0 0 0 0 0 0 0 0 0 0 0
55890- 0 0 0 0 0 0 0 0 0 14 14 14
55891- 34 34 34 66 66 66 78 78 78 6 6 6
55892- 2 2 6 18 18 18 218 218 218 253 253 253
55893-253 253 253 253 253 253 253 253 253 246 246 246
55894-226 226 226 231 231 231 246 246 246 253 253 253
55895-253 253 253 253 253 253 253 253 253 253 253 253
55896-253 253 253 253 253 253 253 253 253 253 253 253
55897-253 253 253 178 178 178 2 2 6 2 2 6
55898- 2 2 6 2 2 6 2 2 6 2 2 6
55899- 2 2 6 18 18 18 90 90 90 62 62 62
55900- 30 30 30 10 10 10 0 0 0 0 0 0
55901- 0 0 0 0 0 0 0 0 0 0 0 0
55902- 0 0 0 0 0 0 0 0 0 0 0 0
55903- 0 0 0 0 0 0 0 0 0 0 0 0
55904- 0 0 0 0 0 0 0 0 0 0 0 0
55905- 0 0 0 0 0 0 0 0 0 0 0 0
55906- 0 0 0 0 0 0 0 0 0 0 0 0
55907- 0 0 0 0 0 0 0 0 0 0 0 0
55908- 0 0 0 0 0 0 0 0 0 0 0 0
55909- 0 0 0 0 0 0 0 0 0 0 0 0
55910- 0 0 0 0 0 0 10 10 10 26 26 26
55911- 58 58 58 90 90 90 18 18 18 2 2 6
55912- 2 2 6 110 110 110 253 253 253 253 253 253
55913-253 253 253 253 253 253 253 253 253 253 253 253
55914-250 250 250 253 253 253 253 253 253 253 253 253
55915-253 253 253 253 253 253 253 253 253 253 253 253
55916-253 253 253 253 253 253 253 253 253 253 253 253
55917-253 253 253 231 231 231 18 18 18 2 2 6
55918- 2 2 6 2 2 6 2 2 6 2 2 6
55919- 2 2 6 2 2 6 18 18 18 94 94 94
55920- 54 54 54 26 26 26 10 10 10 0 0 0
55921- 0 0 0 0 0 0 0 0 0 0 0 0
55922- 0 0 0 0 0 0 0 0 0 0 0 0
55923- 0 0 0 0 0 0 0 0 0 0 0 0
55924- 0 0 0 0 0 0 0 0 0 0 0 0
55925- 0 0 0 0 0 0 0 0 0 0 0 0
55926- 0 0 0 0 0 0 0 0 0 0 0 0
55927- 0 0 0 0 0 0 0 0 0 0 0 0
55928- 0 0 0 0 0 0 0 0 0 0 0 0
55929- 0 0 0 0 0 0 0 0 0 0 0 0
55930- 0 0 0 6 6 6 22 22 22 50 50 50
55931- 90 90 90 26 26 26 2 2 6 2 2 6
55932- 14 14 14 195 195 195 250 250 250 253 253 253
55933-253 253 253 253 253 253 253 253 253 253 253 253
55934-253 253 253 253 253 253 253 253 253 253 253 253
55935-253 253 253 253 253 253 253 253 253 253 253 253
55936-253 253 253 253 253 253 253 253 253 253 253 253
55937-250 250 250 242 242 242 54 54 54 2 2 6
55938- 2 2 6 2 2 6 2 2 6 2 2 6
55939- 2 2 6 2 2 6 2 2 6 38 38 38
55940- 86 86 86 50 50 50 22 22 22 6 6 6
55941- 0 0 0 0 0 0 0 0 0 0 0 0
55942- 0 0 0 0 0 0 0 0 0 0 0 0
55943- 0 0 0 0 0 0 0 0 0 0 0 0
55944- 0 0 0 0 0 0 0 0 0 0 0 0
55945- 0 0 0 0 0 0 0 0 0 0 0 0
55946- 0 0 0 0 0 0 0 0 0 0 0 0
55947- 0 0 0 0 0 0 0 0 0 0 0 0
55948- 0 0 0 0 0 0 0 0 0 0 0 0
55949- 0 0 0 0 0 0 0 0 0 0 0 0
55950- 6 6 6 14 14 14 38 38 38 82 82 82
55951- 34 34 34 2 2 6 2 2 6 2 2 6
55952- 42 42 42 195 195 195 246 246 246 253 253 253
55953-253 253 253 253 253 253 253 253 253 250 250 250
55954-242 242 242 242 242 242 250 250 250 253 253 253
55955-253 253 253 253 253 253 253 253 253 253 253 253
55956-253 253 253 250 250 250 246 246 246 238 238 238
55957-226 226 226 231 231 231 101 101 101 6 6 6
55958- 2 2 6 2 2 6 2 2 6 2 2 6
55959- 2 2 6 2 2 6 2 2 6 2 2 6
55960- 38 38 38 82 82 82 42 42 42 14 14 14
55961- 6 6 6 0 0 0 0 0 0 0 0 0
55962- 0 0 0 0 0 0 0 0 0 0 0 0
55963- 0 0 0 0 0 0 0 0 0 0 0 0
55964- 0 0 0 0 0 0 0 0 0 0 0 0
55965- 0 0 0 0 0 0 0 0 0 0 0 0
55966- 0 0 0 0 0 0 0 0 0 0 0 0
55967- 0 0 0 0 0 0 0 0 0 0 0 0
55968- 0 0 0 0 0 0 0 0 0 0 0 0
55969- 0 0 0 0 0 0 0 0 0 0 0 0
55970- 10 10 10 26 26 26 62 62 62 66 66 66
55971- 2 2 6 2 2 6 2 2 6 6 6 6
55972- 70 70 70 170 170 170 206 206 206 234 234 234
55973-246 246 246 250 250 250 250 250 250 238 238 238
55974-226 226 226 231 231 231 238 238 238 250 250 250
55975-250 250 250 250 250 250 246 246 246 231 231 231
55976-214 214 214 206 206 206 202 202 202 202 202 202
55977-198 198 198 202 202 202 182 182 182 18 18 18
55978- 2 2 6 2 2 6 2 2 6 2 2 6
55979- 2 2 6 2 2 6 2 2 6 2 2 6
55980- 2 2 6 62 62 62 66 66 66 30 30 30
55981- 10 10 10 0 0 0 0 0 0 0 0 0
55982- 0 0 0 0 0 0 0 0 0 0 0 0
55983- 0 0 0 0 0 0 0 0 0 0 0 0
55984- 0 0 0 0 0 0 0 0 0 0 0 0
55985- 0 0 0 0 0 0 0 0 0 0 0 0
55986- 0 0 0 0 0 0 0 0 0 0 0 0
55987- 0 0 0 0 0 0 0 0 0 0 0 0
55988- 0 0 0 0 0 0 0 0 0 0 0 0
55989- 0 0 0 0 0 0 0 0 0 0 0 0
55990- 14 14 14 42 42 42 82 82 82 18 18 18
55991- 2 2 6 2 2 6 2 2 6 10 10 10
55992- 94 94 94 182 182 182 218 218 218 242 242 242
55993-250 250 250 253 253 253 253 253 253 250 250 250
55994-234 234 234 253 253 253 253 253 253 253 253 253
55995-253 253 253 253 253 253 253 253 253 246 246 246
55996-238 238 238 226 226 226 210 210 210 202 202 202
55997-195 195 195 195 195 195 210 210 210 158 158 158
55998- 6 6 6 14 14 14 50 50 50 14 14 14
55999- 2 2 6 2 2 6 2 2 6 2 2 6
56000- 2 2 6 6 6 6 86 86 86 46 46 46
56001- 18 18 18 6 6 6 0 0 0 0 0 0
56002- 0 0 0 0 0 0 0 0 0 0 0 0
56003- 0 0 0 0 0 0 0 0 0 0 0 0
56004- 0 0 0 0 0 0 0 0 0 0 0 0
56005- 0 0 0 0 0 0 0 0 0 0 0 0
56006- 0 0 0 0 0 0 0 0 0 0 0 0
56007- 0 0 0 0 0 0 0 0 0 0 0 0
56008- 0 0 0 0 0 0 0 0 0 0 0 0
56009- 0 0 0 0 0 0 0 0 0 6 6 6
56010- 22 22 22 54 54 54 70 70 70 2 2 6
56011- 2 2 6 10 10 10 2 2 6 22 22 22
56012-166 166 166 231 231 231 250 250 250 253 253 253
56013-253 253 253 253 253 253 253 253 253 250 250 250
56014-242 242 242 253 253 253 253 253 253 253 253 253
56015-253 253 253 253 253 253 253 253 253 253 253 253
56016-253 253 253 253 253 253 253 253 253 246 246 246
56017-231 231 231 206 206 206 198 198 198 226 226 226
56018- 94 94 94 2 2 6 6 6 6 38 38 38
56019- 30 30 30 2 2 6 2 2 6 2 2 6
56020- 2 2 6 2 2 6 62 62 62 66 66 66
56021- 26 26 26 10 10 10 0 0 0 0 0 0
56022- 0 0 0 0 0 0 0 0 0 0 0 0
56023- 0 0 0 0 0 0 0 0 0 0 0 0
56024- 0 0 0 0 0 0 0 0 0 0 0 0
56025- 0 0 0 0 0 0 0 0 0 0 0 0
56026- 0 0 0 0 0 0 0 0 0 0 0 0
56027- 0 0 0 0 0 0 0 0 0 0 0 0
56028- 0 0 0 0 0 0 0 0 0 0 0 0
56029- 0 0 0 0 0 0 0 0 0 10 10 10
56030- 30 30 30 74 74 74 50 50 50 2 2 6
56031- 26 26 26 26 26 26 2 2 6 106 106 106
56032-238 238 238 253 253 253 253 253 253 253 253 253
56033-253 253 253 253 253 253 253 253 253 253 253 253
56034-253 253 253 253 253 253 253 253 253 253 253 253
56035-253 253 253 253 253 253 253 253 253 253 253 253
56036-253 253 253 253 253 253 253 253 253 253 253 253
56037-253 253 253 246 246 246 218 218 218 202 202 202
56038-210 210 210 14 14 14 2 2 6 2 2 6
56039- 30 30 30 22 22 22 2 2 6 2 2 6
56040- 2 2 6 2 2 6 18 18 18 86 86 86
56041- 42 42 42 14 14 14 0 0 0 0 0 0
56042- 0 0 0 0 0 0 0 0 0 0 0 0
56043- 0 0 0 0 0 0 0 0 0 0 0 0
56044- 0 0 0 0 0 0 0 0 0 0 0 0
56045- 0 0 0 0 0 0 0 0 0 0 0 0
56046- 0 0 0 0 0 0 0 0 0 0 0 0
56047- 0 0 0 0 0 0 0 0 0 0 0 0
56048- 0 0 0 0 0 0 0 0 0 0 0 0
56049- 0 0 0 0 0 0 0 0 0 14 14 14
56050- 42 42 42 90 90 90 22 22 22 2 2 6
56051- 42 42 42 2 2 6 18 18 18 218 218 218
56052-253 253 253 253 253 253 253 253 253 253 253 253
56053-253 253 253 253 253 253 253 253 253 253 253 253
56054-253 253 253 253 253 253 253 253 253 253 253 253
56055-253 253 253 253 253 253 253 253 253 253 253 253
56056-253 253 253 253 253 253 253 253 253 253 253 253
56057-253 253 253 253 253 253 250 250 250 221 221 221
56058-218 218 218 101 101 101 2 2 6 14 14 14
56059- 18 18 18 38 38 38 10 10 10 2 2 6
56060- 2 2 6 2 2 6 2 2 6 78 78 78
56061- 58 58 58 22 22 22 6 6 6 0 0 0
56062- 0 0 0 0 0 0 0 0 0 0 0 0
56063- 0 0 0 0 0 0 0 0 0 0 0 0
56064- 0 0 0 0 0 0 0 0 0 0 0 0
56065- 0 0 0 0 0 0 0 0 0 0 0 0
56066- 0 0 0 0 0 0 0 0 0 0 0 0
56067- 0 0 0 0 0 0 0 0 0 0 0 0
56068- 0 0 0 0 0 0 0 0 0 0 0 0
56069- 0 0 0 0 0 0 6 6 6 18 18 18
56070- 54 54 54 82 82 82 2 2 6 26 26 26
56071- 22 22 22 2 2 6 123 123 123 253 253 253
56072-253 253 253 253 253 253 253 253 253 253 253 253
56073-253 253 253 253 253 253 253 253 253 253 253 253
56074-253 253 253 253 253 253 253 253 253 253 253 253
56075-253 253 253 253 253 253 253 253 253 253 253 253
56076-253 253 253 253 253 253 253 253 253 253 253 253
56077-253 253 253 253 253 253 253 253 253 250 250 250
56078-238 238 238 198 198 198 6 6 6 38 38 38
56079- 58 58 58 26 26 26 38 38 38 2 2 6
56080- 2 2 6 2 2 6 2 2 6 46 46 46
56081- 78 78 78 30 30 30 10 10 10 0 0 0
56082- 0 0 0 0 0 0 0 0 0 0 0 0
56083- 0 0 0 0 0 0 0 0 0 0 0 0
56084- 0 0 0 0 0 0 0 0 0 0 0 0
56085- 0 0 0 0 0 0 0 0 0 0 0 0
56086- 0 0 0 0 0 0 0 0 0 0 0 0
56087- 0 0 0 0 0 0 0 0 0 0 0 0
56088- 0 0 0 0 0 0 0 0 0 0 0 0
56089- 0 0 0 0 0 0 10 10 10 30 30 30
56090- 74 74 74 58 58 58 2 2 6 42 42 42
56091- 2 2 6 22 22 22 231 231 231 253 253 253
56092-253 253 253 253 253 253 253 253 253 253 253 253
56093-253 253 253 253 253 253 253 253 253 250 250 250
56094-253 253 253 253 253 253 253 253 253 253 253 253
56095-253 253 253 253 253 253 253 253 253 253 253 253
56096-253 253 253 253 253 253 253 253 253 253 253 253
56097-253 253 253 253 253 253 253 253 253 253 253 253
56098-253 253 253 246 246 246 46 46 46 38 38 38
56099- 42 42 42 14 14 14 38 38 38 14 14 14
56100- 2 2 6 2 2 6 2 2 6 6 6 6
56101- 86 86 86 46 46 46 14 14 14 0 0 0
56102- 0 0 0 0 0 0 0 0 0 0 0 0
56103- 0 0 0 0 0 0 0 0 0 0 0 0
56104- 0 0 0 0 0 0 0 0 0 0 0 0
56105- 0 0 0 0 0 0 0 0 0 0 0 0
56106- 0 0 0 0 0 0 0 0 0 0 0 0
56107- 0 0 0 0 0 0 0 0 0 0 0 0
56108- 0 0 0 0 0 0 0 0 0 0 0 0
56109- 0 0 0 6 6 6 14 14 14 42 42 42
56110- 90 90 90 18 18 18 18 18 18 26 26 26
56111- 2 2 6 116 116 116 253 253 253 253 253 253
56112-253 253 253 253 253 253 253 253 253 253 253 253
56113-253 253 253 253 253 253 250 250 250 238 238 238
56114-253 253 253 253 253 253 253 253 253 253 253 253
56115-253 253 253 253 253 253 253 253 253 253 253 253
56116-253 253 253 253 253 253 253 253 253 253 253 253
56117-253 253 253 253 253 253 253 253 253 253 253 253
56118-253 253 253 253 253 253 94 94 94 6 6 6
56119- 2 2 6 2 2 6 10 10 10 34 34 34
56120- 2 2 6 2 2 6 2 2 6 2 2 6
56121- 74 74 74 58 58 58 22 22 22 6 6 6
56122- 0 0 0 0 0 0 0 0 0 0 0 0
56123- 0 0 0 0 0 0 0 0 0 0 0 0
56124- 0 0 0 0 0 0 0 0 0 0 0 0
56125- 0 0 0 0 0 0 0 0 0 0 0 0
56126- 0 0 0 0 0 0 0 0 0 0 0 0
56127- 0 0 0 0 0 0 0 0 0 0 0 0
56128- 0 0 0 0 0 0 0 0 0 0 0 0
56129- 0 0 0 10 10 10 26 26 26 66 66 66
56130- 82 82 82 2 2 6 38 38 38 6 6 6
56131- 14 14 14 210 210 210 253 253 253 253 253 253
56132-253 253 253 253 253 253 253 253 253 253 253 253
56133-253 253 253 253 253 253 246 246 246 242 242 242
56134-253 253 253 253 253 253 253 253 253 253 253 253
56135-253 253 253 253 253 253 253 253 253 253 253 253
56136-253 253 253 253 253 253 253 253 253 253 253 253
56137-253 253 253 253 253 253 253 253 253 253 253 253
56138-253 253 253 253 253 253 144 144 144 2 2 6
56139- 2 2 6 2 2 6 2 2 6 46 46 46
56140- 2 2 6 2 2 6 2 2 6 2 2 6
56141- 42 42 42 74 74 74 30 30 30 10 10 10
56142- 0 0 0 0 0 0 0 0 0 0 0 0
56143- 0 0 0 0 0 0 0 0 0 0 0 0
56144- 0 0 0 0 0 0 0 0 0 0 0 0
56145- 0 0 0 0 0 0 0 0 0 0 0 0
56146- 0 0 0 0 0 0 0 0 0 0 0 0
56147- 0 0 0 0 0 0 0 0 0 0 0 0
56148- 0 0 0 0 0 0 0 0 0 0 0 0
56149- 6 6 6 14 14 14 42 42 42 90 90 90
56150- 26 26 26 6 6 6 42 42 42 2 2 6
56151- 74 74 74 250 250 250 253 253 253 253 253 253
56152-253 253 253 253 253 253 253 253 253 253 253 253
56153-253 253 253 253 253 253 242 242 242 242 242 242
56154-253 253 253 253 253 253 253 253 253 253 253 253
56155-253 253 253 253 253 253 253 253 253 253 253 253
56156-253 253 253 253 253 253 253 253 253 253 253 253
56157-253 253 253 253 253 253 253 253 253 253 253 253
56158-253 253 253 253 253 253 182 182 182 2 2 6
56159- 2 2 6 2 2 6 2 2 6 46 46 46
56160- 2 2 6 2 2 6 2 2 6 2 2 6
56161- 10 10 10 86 86 86 38 38 38 10 10 10
56162- 0 0 0 0 0 0 0 0 0 0 0 0
56163- 0 0 0 0 0 0 0 0 0 0 0 0
56164- 0 0 0 0 0 0 0 0 0 0 0 0
56165- 0 0 0 0 0 0 0 0 0 0 0 0
56166- 0 0 0 0 0 0 0 0 0 0 0 0
56167- 0 0 0 0 0 0 0 0 0 0 0 0
56168- 0 0 0 0 0 0 0 0 0 0 0 0
56169- 10 10 10 26 26 26 66 66 66 82 82 82
56170- 2 2 6 22 22 22 18 18 18 2 2 6
56171-149 149 149 253 253 253 253 253 253 253 253 253
56172-253 253 253 253 253 253 253 253 253 253 253 253
56173-253 253 253 253 253 253 234 234 234 242 242 242
56174-253 253 253 253 253 253 253 253 253 253 253 253
56175-253 253 253 253 253 253 253 253 253 253 253 253
56176-253 253 253 253 253 253 253 253 253 253 253 253
56177-253 253 253 253 253 253 253 253 253 253 253 253
56178-253 253 253 253 253 253 206 206 206 2 2 6
56179- 2 2 6 2 2 6 2 2 6 38 38 38
56180- 2 2 6 2 2 6 2 2 6 2 2 6
56181- 6 6 6 86 86 86 46 46 46 14 14 14
56182- 0 0 0 0 0 0 0 0 0 0 0 0
56183- 0 0 0 0 0 0 0 0 0 0 0 0
56184- 0 0 0 0 0 0 0 0 0 0 0 0
56185- 0 0 0 0 0 0 0 0 0 0 0 0
56186- 0 0 0 0 0 0 0 0 0 0 0 0
56187- 0 0 0 0 0 0 0 0 0 0 0 0
56188- 0 0 0 0 0 0 0 0 0 6 6 6
56189- 18 18 18 46 46 46 86 86 86 18 18 18
56190- 2 2 6 34 34 34 10 10 10 6 6 6
56191-210 210 210 253 253 253 253 253 253 253 253 253
56192-253 253 253 253 253 253 253 253 253 253 253 253
56193-253 253 253 253 253 253 234 234 234 242 242 242
56194-253 253 253 253 253 253 253 253 253 253 253 253
56195-253 253 253 253 253 253 253 253 253 253 253 253
56196-253 253 253 253 253 253 253 253 253 253 253 253
56197-253 253 253 253 253 253 253 253 253 253 253 253
56198-253 253 253 253 253 253 221 221 221 6 6 6
56199- 2 2 6 2 2 6 6 6 6 30 30 30
56200- 2 2 6 2 2 6 2 2 6 2 2 6
56201- 2 2 6 82 82 82 54 54 54 18 18 18
56202- 6 6 6 0 0 0 0 0 0 0 0 0
56203- 0 0 0 0 0 0 0 0 0 0 0 0
56204- 0 0 0 0 0 0 0 0 0 0 0 0
56205- 0 0 0 0 0 0 0 0 0 0 0 0
56206- 0 0 0 0 0 0 0 0 0 0 0 0
56207- 0 0 0 0 0 0 0 0 0 0 0 0
56208- 0 0 0 0 0 0 0 0 0 10 10 10
56209- 26 26 26 66 66 66 62 62 62 2 2 6
56210- 2 2 6 38 38 38 10 10 10 26 26 26
56211-238 238 238 253 253 253 253 253 253 253 253 253
56212-253 253 253 253 253 253 253 253 253 253 253 253
56213-253 253 253 253 253 253 231 231 231 238 238 238
56214-253 253 253 253 253 253 253 253 253 253 253 253
56215-253 253 253 253 253 253 253 253 253 253 253 253
56216-253 253 253 253 253 253 253 253 253 253 253 253
56217-253 253 253 253 253 253 253 253 253 253 253 253
56218-253 253 253 253 253 253 231 231 231 6 6 6
56219- 2 2 6 2 2 6 10 10 10 30 30 30
56220- 2 2 6 2 2 6 2 2 6 2 2 6
56221- 2 2 6 66 66 66 58 58 58 22 22 22
56222- 6 6 6 0 0 0 0 0 0 0 0 0
56223- 0 0 0 0 0 0 0 0 0 0 0 0
56224- 0 0 0 0 0 0 0 0 0 0 0 0
56225- 0 0 0 0 0 0 0 0 0 0 0 0
56226- 0 0 0 0 0 0 0 0 0 0 0 0
56227- 0 0 0 0 0 0 0 0 0 0 0 0
56228- 0 0 0 0 0 0 0 0 0 10 10 10
56229- 38 38 38 78 78 78 6 6 6 2 2 6
56230- 2 2 6 46 46 46 14 14 14 42 42 42
56231-246 246 246 253 253 253 253 253 253 253 253 253
56232-253 253 253 253 253 253 253 253 253 253 253 253
56233-253 253 253 253 253 253 231 231 231 242 242 242
56234-253 253 253 253 253 253 253 253 253 253 253 253
56235-253 253 253 253 253 253 253 253 253 253 253 253
56236-253 253 253 253 253 253 253 253 253 253 253 253
56237-253 253 253 253 253 253 253 253 253 253 253 253
56238-253 253 253 253 253 253 234 234 234 10 10 10
56239- 2 2 6 2 2 6 22 22 22 14 14 14
56240- 2 2 6 2 2 6 2 2 6 2 2 6
56241- 2 2 6 66 66 66 62 62 62 22 22 22
56242- 6 6 6 0 0 0 0 0 0 0 0 0
56243- 0 0 0 0 0 0 0 0 0 0 0 0
56244- 0 0 0 0 0 0 0 0 0 0 0 0
56245- 0 0 0 0 0 0 0 0 0 0 0 0
56246- 0 0 0 0 0 0 0 0 0 0 0 0
56247- 0 0 0 0 0 0 0 0 0 0 0 0
56248- 0 0 0 0 0 0 6 6 6 18 18 18
56249- 50 50 50 74 74 74 2 2 6 2 2 6
56250- 14 14 14 70 70 70 34 34 34 62 62 62
56251-250 250 250 253 253 253 253 253 253 253 253 253
56252-253 253 253 253 253 253 253 253 253 253 253 253
56253-253 253 253 253 253 253 231 231 231 246 246 246
56254-253 253 253 253 253 253 253 253 253 253 253 253
56255-253 253 253 253 253 253 253 253 253 253 253 253
56256-253 253 253 253 253 253 253 253 253 253 253 253
56257-253 253 253 253 253 253 253 253 253 253 253 253
56258-253 253 253 253 253 253 234 234 234 14 14 14
56259- 2 2 6 2 2 6 30 30 30 2 2 6
56260- 2 2 6 2 2 6 2 2 6 2 2 6
56261- 2 2 6 66 66 66 62 62 62 22 22 22
56262- 6 6 6 0 0 0 0 0 0 0 0 0
56263- 0 0 0 0 0 0 0 0 0 0 0 0
56264- 0 0 0 0 0 0 0 0 0 0 0 0
56265- 0 0 0 0 0 0 0 0 0 0 0 0
56266- 0 0 0 0 0 0 0 0 0 0 0 0
56267- 0 0 0 0 0 0 0 0 0 0 0 0
56268- 0 0 0 0 0 0 6 6 6 18 18 18
56269- 54 54 54 62 62 62 2 2 6 2 2 6
56270- 2 2 6 30 30 30 46 46 46 70 70 70
56271-250 250 250 253 253 253 253 253 253 253 253 253
56272-253 253 253 253 253 253 253 253 253 253 253 253
56273-253 253 253 253 253 253 231 231 231 246 246 246
56274-253 253 253 253 253 253 253 253 253 253 253 253
56275-253 253 253 253 253 253 253 253 253 253 253 253
56276-253 253 253 253 253 253 253 253 253 253 253 253
56277-253 253 253 253 253 253 253 253 253 253 253 253
56278-253 253 253 253 253 253 226 226 226 10 10 10
56279- 2 2 6 6 6 6 30 30 30 2 2 6
56280- 2 2 6 2 2 6 2 2 6 2 2 6
56281- 2 2 6 66 66 66 58 58 58 22 22 22
56282- 6 6 6 0 0 0 0 0 0 0 0 0
56283- 0 0 0 0 0 0 0 0 0 0 0 0
56284- 0 0 0 0 0 0 0 0 0 0 0 0
56285- 0 0 0 0 0 0 0 0 0 0 0 0
56286- 0 0 0 0 0 0 0 0 0 0 0 0
56287- 0 0 0 0 0 0 0 0 0 0 0 0
56288- 0 0 0 0 0 0 6 6 6 22 22 22
56289- 58 58 58 62 62 62 2 2 6 2 2 6
56290- 2 2 6 2 2 6 30 30 30 78 78 78
56291-250 250 250 253 253 253 253 253 253 253 253 253
56292-253 253 253 253 253 253 253 253 253 253 253 253
56293-253 253 253 253 253 253 231 231 231 246 246 246
56294-253 253 253 253 253 253 253 253 253 253 253 253
56295-253 253 253 253 253 253 253 253 253 253 253 253
56296-253 253 253 253 253 253 253 253 253 253 253 253
56297-253 253 253 253 253 253 253 253 253 253 253 253
56298-253 253 253 253 253 253 206 206 206 2 2 6
56299- 22 22 22 34 34 34 18 14 6 22 22 22
56300- 26 26 26 18 18 18 6 6 6 2 2 6
56301- 2 2 6 82 82 82 54 54 54 18 18 18
56302- 6 6 6 0 0 0 0 0 0 0 0 0
56303- 0 0 0 0 0 0 0 0 0 0 0 0
56304- 0 0 0 0 0 0 0 0 0 0 0 0
56305- 0 0 0 0 0 0 0 0 0 0 0 0
56306- 0 0 0 0 0 0 0 0 0 0 0 0
56307- 0 0 0 0 0 0 0 0 0 0 0 0
56308- 0 0 0 0 0 0 6 6 6 26 26 26
56309- 62 62 62 106 106 106 74 54 14 185 133 11
56310-210 162 10 121 92 8 6 6 6 62 62 62
56311-238 238 238 253 253 253 253 253 253 253 253 253
56312-253 253 253 253 253 253 253 253 253 253 253 253
56313-253 253 253 253 253 253 231 231 231 246 246 246
56314-253 253 253 253 253 253 253 253 253 253 253 253
56315-253 253 253 253 253 253 253 253 253 253 253 253
56316-253 253 253 253 253 253 253 253 253 253 253 253
56317-253 253 253 253 253 253 253 253 253 253 253 253
56318-253 253 253 253 253 253 158 158 158 18 18 18
56319- 14 14 14 2 2 6 2 2 6 2 2 6
56320- 6 6 6 18 18 18 66 66 66 38 38 38
56321- 6 6 6 94 94 94 50 50 50 18 18 18
56322- 6 6 6 0 0 0 0 0 0 0 0 0
56323- 0 0 0 0 0 0 0 0 0 0 0 0
56324- 0 0 0 0 0 0 0 0 0 0 0 0
56325- 0 0 0 0 0 0 0 0 0 0 0 0
56326- 0 0 0 0 0 0 0 0 0 0 0 0
56327- 0 0 0 0 0 0 0 0 0 6 6 6
56328- 10 10 10 10 10 10 18 18 18 38 38 38
56329- 78 78 78 142 134 106 216 158 10 242 186 14
56330-246 190 14 246 190 14 156 118 10 10 10 10
56331- 90 90 90 238 238 238 253 253 253 253 253 253
56332-253 253 253 253 253 253 253 253 253 253 253 253
56333-253 253 253 253 253 253 231 231 231 250 250 250
56334-253 253 253 253 253 253 253 253 253 253 253 253
56335-253 253 253 253 253 253 253 253 253 253 253 253
56336-253 253 253 253 253 253 253 253 253 253 253 253
56337-253 253 253 253 253 253 253 253 253 246 230 190
56338-238 204 91 238 204 91 181 142 44 37 26 9
56339- 2 2 6 2 2 6 2 2 6 2 2 6
56340- 2 2 6 2 2 6 38 38 38 46 46 46
56341- 26 26 26 106 106 106 54 54 54 18 18 18
56342- 6 6 6 0 0 0 0 0 0 0 0 0
56343- 0 0 0 0 0 0 0 0 0 0 0 0
56344- 0 0 0 0 0 0 0 0 0 0 0 0
56345- 0 0 0 0 0 0 0 0 0 0 0 0
56346- 0 0 0 0 0 0 0 0 0 0 0 0
56347- 0 0 0 6 6 6 14 14 14 22 22 22
56348- 30 30 30 38 38 38 50 50 50 70 70 70
56349-106 106 106 190 142 34 226 170 11 242 186 14
56350-246 190 14 246 190 14 246 190 14 154 114 10
56351- 6 6 6 74 74 74 226 226 226 253 253 253
56352-253 253 253 253 253 253 253 253 253 253 253 253
56353-253 253 253 253 253 253 231 231 231 250 250 250
56354-253 253 253 253 253 253 253 253 253 253 253 253
56355-253 253 253 253 253 253 253 253 253 253 253 253
56356-253 253 253 253 253 253 253 253 253 253 253 253
56357-253 253 253 253 253 253 253 253 253 228 184 62
56358-241 196 14 241 208 19 232 195 16 38 30 10
56359- 2 2 6 2 2 6 2 2 6 2 2 6
56360- 2 2 6 6 6 6 30 30 30 26 26 26
56361-203 166 17 154 142 90 66 66 66 26 26 26
56362- 6 6 6 0 0 0 0 0 0 0 0 0
56363- 0 0 0 0 0 0 0 0 0 0 0 0
56364- 0 0 0 0 0 0 0 0 0 0 0 0
56365- 0 0 0 0 0 0 0 0 0 0 0 0
56366- 0 0 0 0 0 0 0 0 0 0 0 0
56367- 6 6 6 18 18 18 38 38 38 58 58 58
56368- 78 78 78 86 86 86 101 101 101 123 123 123
56369-175 146 61 210 150 10 234 174 13 246 186 14
56370-246 190 14 246 190 14 246 190 14 238 190 10
56371-102 78 10 2 2 6 46 46 46 198 198 198
56372-253 253 253 253 253 253 253 253 253 253 253 253
56373-253 253 253 253 253 253 234 234 234 242 242 242
56374-253 253 253 253 253 253 253 253 253 253 253 253
56375-253 253 253 253 253 253 253 253 253 253 253 253
56376-253 253 253 253 253 253 253 253 253 253 253 253
56377-253 253 253 253 253 253 253 253 253 224 178 62
56378-242 186 14 241 196 14 210 166 10 22 18 6
56379- 2 2 6 2 2 6 2 2 6 2 2 6
56380- 2 2 6 2 2 6 6 6 6 121 92 8
56381-238 202 15 232 195 16 82 82 82 34 34 34
56382- 10 10 10 0 0 0 0 0 0 0 0 0
56383- 0 0 0 0 0 0 0 0 0 0 0 0
56384- 0 0 0 0 0 0 0 0 0 0 0 0
56385- 0 0 0 0 0 0 0 0 0 0 0 0
56386- 0 0 0 0 0 0 0 0 0 0 0 0
56387- 14 14 14 38 38 38 70 70 70 154 122 46
56388-190 142 34 200 144 11 197 138 11 197 138 11
56389-213 154 11 226 170 11 242 186 14 246 190 14
56390-246 190 14 246 190 14 246 190 14 246 190 14
56391-225 175 15 46 32 6 2 2 6 22 22 22
56392-158 158 158 250 250 250 253 253 253 253 253 253
56393-253 253 253 253 253 253 253 253 253 253 253 253
56394-253 253 253 253 253 253 253 253 253 253 253 253
56395-253 253 253 253 253 253 253 253 253 253 253 253
56396-253 253 253 253 253 253 253 253 253 253 253 253
56397-253 253 253 250 250 250 242 242 242 224 178 62
56398-239 182 13 236 186 11 213 154 11 46 32 6
56399- 2 2 6 2 2 6 2 2 6 2 2 6
56400- 2 2 6 2 2 6 61 42 6 225 175 15
56401-238 190 10 236 186 11 112 100 78 42 42 42
56402- 14 14 14 0 0 0 0 0 0 0 0 0
56403- 0 0 0 0 0 0 0 0 0 0 0 0
56404- 0 0 0 0 0 0 0 0 0 0 0 0
56405- 0 0 0 0 0 0 0 0 0 0 0 0
56406- 0 0 0 0 0 0 0 0 0 6 6 6
56407- 22 22 22 54 54 54 154 122 46 213 154 11
56408-226 170 11 230 174 11 226 170 11 226 170 11
56409-236 178 12 242 186 14 246 190 14 246 190 14
56410-246 190 14 246 190 14 246 190 14 246 190 14
56411-241 196 14 184 144 12 10 10 10 2 2 6
56412- 6 6 6 116 116 116 242 242 242 253 253 253
56413-253 253 253 253 253 253 253 253 253 253 253 253
56414-253 253 253 253 253 253 253 253 253 253 253 253
56415-253 253 253 253 253 253 253 253 253 253 253 253
56416-253 253 253 253 253 253 253 253 253 253 253 253
56417-253 253 253 231 231 231 198 198 198 214 170 54
56418-236 178 12 236 178 12 210 150 10 137 92 6
56419- 18 14 6 2 2 6 2 2 6 2 2 6
56420- 6 6 6 70 47 6 200 144 11 236 178 12
56421-239 182 13 239 182 13 124 112 88 58 58 58
56422- 22 22 22 6 6 6 0 0 0 0 0 0
56423- 0 0 0 0 0 0 0 0 0 0 0 0
56424- 0 0 0 0 0 0 0 0 0 0 0 0
56425- 0 0 0 0 0 0 0 0 0 0 0 0
56426- 0 0 0 0 0 0 0 0 0 10 10 10
56427- 30 30 30 70 70 70 180 133 36 226 170 11
56428-239 182 13 242 186 14 242 186 14 246 186 14
56429-246 190 14 246 190 14 246 190 14 246 190 14
56430-246 190 14 246 190 14 246 190 14 246 190 14
56431-246 190 14 232 195 16 98 70 6 2 2 6
56432- 2 2 6 2 2 6 66 66 66 221 221 221
56433-253 253 253 253 253 253 253 253 253 253 253 253
56434-253 253 253 253 253 253 253 253 253 253 253 253
56435-253 253 253 253 253 253 253 253 253 253 253 253
56436-253 253 253 253 253 253 253 253 253 253 253 253
56437-253 253 253 206 206 206 198 198 198 214 166 58
56438-230 174 11 230 174 11 216 158 10 192 133 9
56439-163 110 8 116 81 8 102 78 10 116 81 8
56440-167 114 7 197 138 11 226 170 11 239 182 13
56441-242 186 14 242 186 14 162 146 94 78 78 78
56442- 34 34 34 14 14 14 6 6 6 0 0 0
56443- 0 0 0 0 0 0 0 0 0 0 0 0
56444- 0 0 0 0 0 0 0 0 0 0 0 0
56445- 0 0 0 0 0 0 0 0 0 0 0 0
56446- 0 0 0 0 0 0 0 0 0 6 6 6
56447- 30 30 30 78 78 78 190 142 34 226 170 11
56448-239 182 13 246 190 14 246 190 14 246 190 14
56449-246 190 14 246 190 14 246 190 14 246 190 14
56450-246 190 14 246 190 14 246 190 14 246 190 14
56451-246 190 14 241 196 14 203 166 17 22 18 6
56452- 2 2 6 2 2 6 2 2 6 38 38 38
56453-218 218 218 253 253 253 253 253 253 253 253 253
56454-253 253 253 253 253 253 253 253 253 253 253 253
56455-253 253 253 253 253 253 253 253 253 253 253 253
56456-253 253 253 253 253 253 253 253 253 253 253 253
56457-250 250 250 206 206 206 198 198 198 202 162 69
56458-226 170 11 236 178 12 224 166 10 210 150 10
56459-200 144 11 197 138 11 192 133 9 197 138 11
56460-210 150 10 226 170 11 242 186 14 246 190 14
56461-246 190 14 246 186 14 225 175 15 124 112 88
56462- 62 62 62 30 30 30 14 14 14 6 6 6
56463- 0 0 0 0 0 0 0 0 0 0 0 0
56464- 0 0 0 0 0 0 0 0 0 0 0 0
56465- 0 0 0 0 0 0 0 0 0 0 0 0
56466- 0 0 0 0 0 0 0 0 0 10 10 10
56467- 30 30 30 78 78 78 174 135 50 224 166 10
56468-239 182 13 246 190 14 246 190 14 246 190 14
56469-246 190 14 246 190 14 246 190 14 246 190 14
56470-246 190 14 246 190 14 246 190 14 246 190 14
56471-246 190 14 246 190 14 241 196 14 139 102 15
56472- 2 2 6 2 2 6 2 2 6 2 2 6
56473- 78 78 78 250 250 250 253 253 253 253 253 253
56474-253 253 253 253 253 253 253 253 253 253 253 253
56475-253 253 253 253 253 253 253 253 253 253 253 253
56476-253 253 253 253 253 253 253 253 253 253 253 253
56477-250 250 250 214 214 214 198 198 198 190 150 46
56478-219 162 10 236 178 12 234 174 13 224 166 10
56479-216 158 10 213 154 11 213 154 11 216 158 10
56480-226 170 11 239 182 13 246 190 14 246 190 14
56481-246 190 14 246 190 14 242 186 14 206 162 42
56482-101 101 101 58 58 58 30 30 30 14 14 14
56483- 6 6 6 0 0 0 0 0 0 0 0 0
56484- 0 0 0 0 0 0 0 0 0 0 0 0
56485- 0 0 0 0 0 0 0 0 0 0 0 0
56486- 0 0 0 0 0 0 0 0 0 10 10 10
56487- 30 30 30 74 74 74 174 135 50 216 158 10
56488-236 178 12 246 190 14 246 190 14 246 190 14
56489-246 190 14 246 190 14 246 190 14 246 190 14
56490-246 190 14 246 190 14 246 190 14 246 190 14
56491-246 190 14 246 190 14 241 196 14 226 184 13
56492- 61 42 6 2 2 6 2 2 6 2 2 6
56493- 22 22 22 238 238 238 253 253 253 253 253 253
56494-253 253 253 253 253 253 253 253 253 253 253 253
56495-253 253 253 253 253 253 253 253 253 253 253 253
56496-253 253 253 253 253 253 253 253 253 253 253 253
56497-253 253 253 226 226 226 187 187 187 180 133 36
56498-216 158 10 236 178 12 239 182 13 236 178 12
56499-230 174 11 226 170 11 226 170 11 230 174 11
56500-236 178 12 242 186 14 246 190 14 246 190 14
56501-246 190 14 246 190 14 246 186 14 239 182 13
56502-206 162 42 106 106 106 66 66 66 34 34 34
56503- 14 14 14 6 6 6 0 0 0 0 0 0
56504- 0 0 0 0 0 0 0 0 0 0 0 0
56505- 0 0 0 0 0 0 0 0 0 0 0 0
56506- 0 0 0 0 0 0 0 0 0 6 6 6
56507- 26 26 26 70 70 70 163 133 67 213 154 11
56508-236 178 12 246 190 14 246 190 14 246 190 14
56509-246 190 14 246 190 14 246 190 14 246 190 14
56510-246 190 14 246 190 14 246 190 14 246 190 14
56511-246 190 14 246 190 14 246 190 14 241 196 14
56512-190 146 13 18 14 6 2 2 6 2 2 6
56513- 46 46 46 246 246 246 253 253 253 253 253 253
56514-253 253 253 253 253 253 253 253 253 253 253 253
56515-253 253 253 253 253 253 253 253 253 253 253 253
56516-253 253 253 253 253 253 253 253 253 253 253 253
56517-253 253 253 221 221 221 86 86 86 156 107 11
56518-216 158 10 236 178 12 242 186 14 246 186 14
56519-242 186 14 239 182 13 239 182 13 242 186 14
56520-242 186 14 246 186 14 246 190 14 246 190 14
56521-246 190 14 246 190 14 246 190 14 246 190 14
56522-242 186 14 225 175 15 142 122 72 66 66 66
56523- 30 30 30 10 10 10 0 0 0 0 0 0
56524- 0 0 0 0 0 0 0 0 0 0 0 0
56525- 0 0 0 0 0 0 0 0 0 0 0 0
56526- 0 0 0 0 0 0 0 0 0 6 6 6
56527- 26 26 26 70 70 70 163 133 67 210 150 10
56528-236 178 12 246 190 14 246 190 14 246 190 14
56529-246 190 14 246 190 14 246 190 14 246 190 14
56530-246 190 14 246 190 14 246 190 14 246 190 14
56531-246 190 14 246 190 14 246 190 14 246 190 14
56532-232 195 16 121 92 8 34 34 34 106 106 106
56533-221 221 221 253 253 253 253 253 253 253 253 253
56534-253 253 253 253 253 253 253 253 253 253 253 253
56535-253 253 253 253 253 253 253 253 253 253 253 253
56536-253 253 253 253 253 253 253 253 253 253 253 253
56537-242 242 242 82 82 82 18 14 6 163 110 8
56538-216 158 10 236 178 12 242 186 14 246 190 14
56539-246 190 14 246 190 14 246 190 14 246 190 14
56540-246 190 14 246 190 14 246 190 14 246 190 14
56541-246 190 14 246 190 14 246 190 14 246 190 14
56542-246 190 14 246 190 14 242 186 14 163 133 67
56543- 46 46 46 18 18 18 6 6 6 0 0 0
56544- 0 0 0 0 0 0 0 0 0 0 0 0
56545- 0 0 0 0 0 0 0 0 0 0 0 0
56546- 0 0 0 0 0 0 0 0 0 10 10 10
56547- 30 30 30 78 78 78 163 133 67 210 150 10
56548-236 178 12 246 186 14 246 190 14 246 190 14
56549-246 190 14 246 190 14 246 190 14 246 190 14
56550-246 190 14 246 190 14 246 190 14 246 190 14
56551-246 190 14 246 190 14 246 190 14 246 190 14
56552-241 196 14 215 174 15 190 178 144 253 253 253
56553-253 253 253 253 253 253 253 253 253 253 253 253
56554-253 253 253 253 253 253 253 253 253 253 253 253
56555-253 253 253 253 253 253 253 253 253 253 253 253
56556-253 253 253 253 253 253 253 253 253 218 218 218
56557- 58 58 58 2 2 6 22 18 6 167 114 7
56558-216 158 10 236 178 12 246 186 14 246 190 14
56559-246 190 14 246 190 14 246 190 14 246 190 14
56560-246 190 14 246 190 14 246 190 14 246 190 14
56561-246 190 14 246 190 14 246 190 14 246 190 14
56562-246 190 14 246 186 14 242 186 14 190 150 46
56563- 54 54 54 22 22 22 6 6 6 0 0 0
56564- 0 0 0 0 0 0 0 0 0 0 0 0
56565- 0 0 0 0 0 0 0 0 0 0 0 0
56566- 0 0 0 0 0 0 0 0 0 14 14 14
56567- 38 38 38 86 86 86 180 133 36 213 154 11
56568-236 178 12 246 186 14 246 190 14 246 190 14
56569-246 190 14 246 190 14 246 190 14 246 190 14
56570-246 190 14 246 190 14 246 190 14 246 190 14
56571-246 190 14 246 190 14 246 190 14 246 190 14
56572-246 190 14 232 195 16 190 146 13 214 214 214
56573-253 253 253 253 253 253 253 253 253 253 253 253
56574-253 253 253 253 253 253 253 253 253 253 253 253
56575-253 253 253 253 253 253 253 253 253 253 253 253
56576-253 253 253 250 250 250 170 170 170 26 26 26
56577- 2 2 6 2 2 6 37 26 9 163 110 8
56578-219 162 10 239 182 13 246 186 14 246 190 14
56579-246 190 14 246 190 14 246 190 14 246 190 14
56580-246 190 14 246 190 14 246 190 14 246 190 14
56581-246 190 14 246 190 14 246 190 14 246 190 14
56582-246 186 14 236 178 12 224 166 10 142 122 72
56583- 46 46 46 18 18 18 6 6 6 0 0 0
56584- 0 0 0 0 0 0 0 0 0 0 0 0
56585- 0 0 0 0 0 0 0 0 0 0 0 0
56586- 0 0 0 0 0 0 6 6 6 18 18 18
56587- 50 50 50 109 106 95 192 133 9 224 166 10
56588-242 186 14 246 190 14 246 190 14 246 190 14
56589-246 190 14 246 190 14 246 190 14 246 190 14
56590-246 190 14 246 190 14 246 190 14 246 190 14
56591-246 190 14 246 190 14 246 190 14 246 190 14
56592-242 186 14 226 184 13 210 162 10 142 110 46
56593-226 226 226 253 253 253 253 253 253 253 253 253
56594-253 253 253 253 253 253 253 253 253 253 253 253
56595-253 253 253 253 253 253 253 253 253 253 253 253
56596-198 198 198 66 66 66 2 2 6 2 2 6
56597- 2 2 6 2 2 6 50 34 6 156 107 11
56598-219 162 10 239 182 13 246 186 14 246 190 14
56599-246 190 14 246 190 14 246 190 14 246 190 14
56600-246 190 14 246 190 14 246 190 14 246 190 14
56601-246 190 14 246 190 14 246 190 14 242 186 14
56602-234 174 13 213 154 11 154 122 46 66 66 66
56603- 30 30 30 10 10 10 0 0 0 0 0 0
56604- 0 0 0 0 0 0 0 0 0 0 0 0
56605- 0 0 0 0 0 0 0 0 0 0 0 0
56606- 0 0 0 0 0 0 6 6 6 22 22 22
56607- 58 58 58 154 121 60 206 145 10 234 174 13
56608-242 186 14 246 186 14 246 190 14 246 190 14
56609-246 190 14 246 190 14 246 190 14 246 190 14
56610-246 190 14 246 190 14 246 190 14 246 190 14
56611-246 190 14 246 190 14 246 190 14 246 190 14
56612-246 186 14 236 178 12 210 162 10 163 110 8
56613- 61 42 6 138 138 138 218 218 218 250 250 250
56614-253 253 253 253 253 253 253 253 253 250 250 250
56615-242 242 242 210 210 210 144 144 144 66 66 66
56616- 6 6 6 2 2 6 2 2 6 2 2 6
56617- 2 2 6 2 2 6 61 42 6 163 110 8
56618-216 158 10 236 178 12 246 190 14 246 190 14
56619-246 190 14 246 190 14 246 190 14 246 190 14
56620-246 190 14 246 190 14 246 190 14 246 190 14
56621-246 190 14 239 182 13 230 174 11 216 158 10
56622-190 142 34 124 112 88 70 70 70 38 38 38
56623- 18 18 18 6 6 6 0 0 0 0 0 0
56624- 0 0 0 0 0 0 0 0 0 0 0 0
56625- 0 0 0 0 0 0 0 0 0 0 0 0
56626- 0 0 0 0 0 0 6 6 6 22 22 22
56627- 62 62 62 168 124 44 206 145 10 224 166 10
56628-236 178 12 239 182 13 242 186 14 242 186 14
56629-246 186 14 246 190 14 246 190 14 246 190 14
56630-246 190 14 246 190 14 246 190 14 246 190 14
56631-246 190 14 246 190 14 246 190 14 246 190 14
56632-246 190 14 236 178 12 216 158 10 175 118 6
56633- 80 54 7 2 2 6 6 6 6 30 30 30
56634- 54 54 54 62 62 62 50 50 50 38 38 38
56635- 14 14 14 2 2 6 2 2 6 2 2 6
56636- 2 2 6 2 2 6 2 2 6 2 2 6
56637- 2 2 6 6 6 6 80 54 7 167 114 7
56638-213 154 11 236 178 12 246 190 14 246 190 14
56639-246 190 14 246 190 14 246 190 14 246 190 14
56640-246 190 14 242 186 14 239 182 13 239 182 13
56641-230 174 11 210 150 10 174 135 50 124 112 88
56642- 82 82 82 54 54 54 34 34 34 18 18 18
56643- 6 6 6 0 0 0 0 0 0 0 0 0
56644- 0 0 0 0 0 0 0 0 0 0 0 0
56645- 0 0 0 0 0 0 0 0 0 0 0 0
56646- 0 0 0 0 0 0 6 6 6 18 18 18
56647- 50 50 50 158 118 36 192 133 9 200 144 11
56648-216 158 10 219 162 10 224 166 10 226 170 11
56649-230 174 11 236 178 12 239 182 13 239 182 13
56650-242 186 14 246 186 14 246 190 14 246 190 14
56651-246 190 14 246 190 14 246 190 14 246 190 14
56652-246 186 14 230 174 11 210 150 10 163 110 8
56653-104 69 6 10 10 10 2 2 6 2 2 6
56654- 2 2 6 2 2 6 2 2 6 2 2 6
56655- 2 2 6 2 2 6 2 2 6 2 2 6
56656- 2 2 6 2 2 6 2 2 6 2 2 6
56657- 2 2 6 6 6 6 91 60 6 167 114 7
56658-206 145 10 230 174 11 242 186 14 246 190 14
56659-246 190 14 246 190 14 246 186 14 242 186 14
56660-239 182 13 230 174 11 224 166 10 213 154 11
56661-180 133 36 124 112 88 86 86 86 58 58 58
56662- 38 38 38 22 22 22 10 10 10 6 6 6
56663- 0 0 0 0 0 0 0 0 0 0 0 0
56664- 0 0 0 0 0 0 0 0 0 0 0 0
56665- 0 0 0 0 0 0 0 0 0 0 0 0
56666- 0 0 0 0 0 0 0 0 0 14 14 14
56667- 34 34 34 70 70 70 138 110 50 158 118 36
56668-167 114 7 180 123 7 192 133 9 197 138 11
56669-200 144 11 206 145 10 213 154 11 219 162 10
56670-224 166 10 230 174 11 239 182 13 242 186 14
56671-246 186 14 246 186 14 246 186 14 246 186 14
56672-239 182 13 216 158 10 185 133 11 152 99 6
56673-104 69 6 18 14 6 2 2 6 2 2 6
56674- 2 2 6 2 2 6 2 2 6 2 2 6
56675- 2 2 6 2 2 6 2 2 6 2 2 6
56676- 2 2 6 2 2 6 2 2 6 2 2 6
56677- 2 2 6 6 6 6 80 54 7 152 99 6
56678-192 133 9 219 162 10 236 178 12 239 182 13
56679-246 186 14 242 186 14 239 182 13 236 178 12
56680-224 166 10 206 145 10 192 133 9 154 121 60
56681- 94 94 94 62 62 62 42 42 42 22 22 22
56682- 14 14 14 6 6 6 0 0 0 0 0 0
56683- 0 0 0 0 0 0 0 0 0 0 0 0
56684- 0 0 0 0 0 0 0 0 0 0 0 0
56685- 0 0 0 0 0 0 0 0 0 0 0 0
56686- 0 0 0 0 0 0 0 0 0 6 6 6
56687- 18 18 18 34 34 34 58 58 58 78 78 78
56688-101 98 89 124 112 88 142 110 46 156 107 11
56689-163 110 8 167 114 7 175 118 6 180 123 7
56690-185 133 11 197 138 11 210 150 10 219 162 10
56691-226 170 11 236 178 12 236 178 12 234 174 13
56692-219 162 10 197 138 11 163 110 8 130 83 6
56693- 91 60 6 10 10 10 2 2 6 2 2 6
56694- 18 18 18 38 38 38 38 38 38 38 38 38
56695- 38 38 38 38 38 38 38 38 38 38 38 38
56696- 38 38 38 38 38 38 26 26 26 2 2 6
56697- 2 2 6 6 6 6 70 47 6 137 92 6
56698-175 118 6 200 144 11 219 162 10 230 174 11
56699-234 174 13 230 174 11 219 162 10 210 150 10
56700-192 133 9 163 110 8 124 112 88 82 82 82
56701- 50 50 50 30 30 30 14 14 14 6 6 6
56702- 0 0 0 0 0 0 0 0 0 0 0 0
56703- 0 0 0 0 0 0 0 0 0 0 0 0
56704- 0 0 0 0 0 0 0 0 0 0 0 0
56705- 0 0 0 0 0 0 0 0 0 0 0 0
56706- 0 0 0 0 0 0 0 0 0 0 0 0
56707- 6 6 6 14 14 14 22 22 22 34 34 34
56708- 42 42 42 58 58 58 74 74 74 86 86 86
56709-101 98 89 122 102 70 130 98 46 121 87 25
56710-137 92 6 152 99 6 163 110 8 180 123 7
56711-185 133 11 197 138 11 206 145 10 200 144 11
56712-180 123 7 156 107 11 130 83 6 104 69 6
56713- 50 34 6 54 54 54 110 110 110 101 98 89
56714- 86 86 86 82 82 82 78 78 78 78 78 78
56715- 78 78 78 78 78 78 78 78 78 78 78 78
56716- 78 78 78 82 82 82 86 86 86 94 94 94
56717-106 106 106 101 101 101 86 66 34 124 80 6
56718-156 107 11 180 123 7 192 133 9 200 144 11
56719-206 145 10 200 144 11 192 133 9 175 118 6
56720-139 102 15 109 106 95 70 70 70 42 42 42
56721- 22 22 22 10 10 10 0 0 0 0 0 0
56722- 0 0 0 0 0 0 0 0 0 0 0 0
56723- 0 0 0 0 0 0 0 0 0 0 0 0
56724- 0 0 0 0 0 0 0 0 0 0 0 0
56725- 0 0 0 0 0 0 0 0 0 0 0 0
56726- 0 0 0 0 0 0 0 0 0 0 0 0
56727- 0 0 0 0 0 0 6 6 6 10 10 10
56728- 14 14 14 22 22 22 30 30 30 38 38 38
56729- 50 50 50 62 62 62 74 74 74 90 90 90
56730-101 98 89 112 100 78 121 87 25 124 80 6
56731-137 92 6 152 99 6 152 99 6 152 99 6
56732-138 86 6 124 80 6 98 70 6 86 66 30
56733-101 98 89 82 82 82 58 58 58 46 46 46
56734- 38 38 38 34 34 34 34 34 34 34 34 34
56735- 34 34 34 34 34 34 34 34 34 34 34 34
56736- 34 34 34 34 34 34 38 38 38 42 42 42
56737- 54 54 54 82 82 82 94 86 76 91 60 6
56738-134 86 6 156 107 11 167 114 7 175 118 6
56739-175 118 6 167 114 7 152 99 6 121 87 25
56740-101 98 89 62 62 62 34 34 34 18 18 18
56741- 6 6 6 0 0 0 0 0 0 0 0 0
56742- 0 0 0 0 0 0 0 0 0 0 0 0
56743- 0 0 0 0 0 0 0 0 0 0 0 0
56744- 0 0 0 0 0 0 0 0 0 0 0 0
56745- 0 0 0 0 0 0 0 0 0 0 0 0
56746- 0 0 0 0 0 0 0 0 0 0 0 0
56747- 0 0 0 0 0 0 0 0 0 0 0 0
56748- 0 0 0 6 6 6 6 6 6 10 10 10
56749- 18 18 18 22 22 22 30 30 30 42 42 42
56750- 50 50 50 66 66 66 86 86 86 101 98 89
56751-106 86 58 98 70 6 104 69 6 104 69 6
56752-104 69 6 91 60 6 82 62 34 90 90 90
56753- 62 62 62 38 38 38 22 22 22 14 14 14
56754- 10 10 10 10 10 10 10 10 10 10 10 10
56755- 10 10 10 10 10 10 6 6 6 10 10 10
56756- 10 10 10 10 10 10 10 10 10 14 14 14
56757- 22 22 22 42 42 42 70 70 70 89 81 66
56758- 80 54 7 104 69 6 124 80 6 137 92 6
56759-134 86 6 116 81 8 100 82 52 86 86 86
56760- 58 58 58 30 30 30 14 14 14 6 6 6
56761- 0 0 0 0 0 0 0 0 0 0 0 0
56762- 0 0 0 0 0 0 0 0 0 0 0 0
56763- 0 0 0 0 0 0 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 0 0 0
56765- 0 0 0 0 0 0 0 0 0 0 0 0
56766- 0 0 0 0 0 0 0 0 0 0 0 0
56767- 0 0 0 0 0 0 0 0 0 0 0 0
56768- 0 0 0 0 0 0 0 0 0 0 0 0
56769- 0 0 0 6 6 6 10 10 10 14 14 14
56770- 18 18 18 26 26 26 38 38 38 54 54 54
56771- 70 70 70 86 86 86 94 86 76 89 81 66
56772- 89 81 66 86 86 86 74 74 74 50 50 50
56773- 30 30 30 14 14 14 6 6 6 0 0 0
56774- 0 0 0 0 0 0 0 0 0 0 0 0
56775- 0 0 0 0 0 0 0 0 0 0 0 0
56776- 0 0 0 0 0 0 0 0 0 0 0 0
56777- 6 6 6 18 18 18 34 34 34 58 58 58
56778- 82 82 82 89 81 66 89 81 66 89 81 66
56779- 94 86 66 94 86 76 74 74 74 50 50 50
56780- 26 26 26 14 14 14 6 6 6 0 0 0
56781- 0 0 0 0 0 0 0 0 0 0 0 0
56782- 0 0 0 0 0 0 0 0 0 0 0 0
56783- 0 0 0 0 0 0 0 0 0 0 0 0
56784- 0 0 0 0 0 0 0 0 0 0 0 0
56785- 0 0 0 0 0 0 0 0 0 0 0 0
56786- 0 0 0 0 0 0 0 0 0 0 0 0
56787- 0 0 0 0 0 0 0 0 0 0 0 0
56788- 0 0 0 0 0 0 0 0 0 0 0 0
56789- 0 0 0 0 0 0 0 0 0 0 0 0
56790- 6 6 6 6 6 6 14 14 14 18 18 18
56791- 30 30 30 38 38 38 46 46 46 54 54 54
56792- 50 50 50 42 42 42 30 30 30 18 18 18
56793- 10 10 10 0 0 0 0 0 0 0 0 0
56794- 0 0 0 0 0 0 0 0 0 0 0 0
56795- 0 0 0 0 0 0 0 0 0 0 0 0
56796- 0 0 0 0 0 0 0 0 0 0 0 0
56797- 0 0 0 6 6 6 14 14 14 26 26 26
56798- 38 38 38 50 50 50 58 58 58 58 58 58
56799- 54 54 54 42 42 42 30 30 30 18 18 18
56800- 10 10 10 0 0 0 0 0 0 0 0 0
56801- 0 0 0 0 0 0 0 0 0 0 0 0
56802- 0 0 0 0 0 0 0 0 0 0 0 0
56803- 0 0 0 0 0 0 0 0 0 0 0 0
56804- 0 0 0 0 0 0 0 0 0 0 0 0
56805- 0 0 0 0 0 0 0 0 0 0 0 0
56806- 0 0 0 0 0 0 0 0 0 0 0 0
56807- 0 0 0 0 0 0 0 0 0 0 0 0
56808- 0 0 0 0 0 0 0 0 0 0 0 0
56809- 0 0 0 0 0 0 0 0 0 0 0 0
56810- 0 0 0 0 0 0 0 0 0 6 6 6
56811- 6 6 6 10 10 10 14 14 14 18 18 18
56812- 18 18 18 14 14 14 10 10 10 6 6 6
56813- 0 0 0 0 0 0 0 0 0 0 0 0
56814- 0 0 0 0 0 0 0 0 0 0 0 0
56815- 0 0 0 0 0 0 0 0 0 0 0 0
56816- 0 0 0 0 0 0 0 0 0 0 0 0
56817- 0 0 0 0 0 0 0 0 0 6 6 6
56818- 14 14 14 18 18 18 22 22 22 22 22 22
56819- 18 18 18 14 14 14 10 10 10 6 6 6
56820- 0 0 0 0 0 0 0 0 0 0 0 0
56821- 0 0 0 0 0 0 0 0 0 0 0 0
56822- 0 0 0 0 0 0 0 0 0 0 0 0
56823- 0 0 0 0 0 0 0 0 0 0 0 0
56824- 0 0 0 0 0 0 0 0 0 0 0 0
56825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56838+4 4 4 4 4 4
56839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56852+4 4 4 4 4 4
56853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56866+4 4 4 4 4 4
56867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56880+4 4 4 4 4 4
56881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56894+4 4 4 4 4 4
56895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56908+4 4 4 4 4 4
56909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56913+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
56914+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
56915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56918+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
56919+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
56920+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
56921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56922+4 4 4 4 4 4
56923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56927+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
56928+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
56929+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56932+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
56933+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
56934+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
56935+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56936+4 4 4 4 4 4
56937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56941+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
56942+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
56943+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
56944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56946+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
56947+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
56948+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
56949+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
56950+4 4 4 4 4 4
56951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56954+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
56955+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
56956+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
56957+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
56958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56959+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
56960+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
56961+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
56962+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
56963+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
56964+4 4 4 4 4 4
56965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56968+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
56969+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
56970+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
56971+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
56972+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
56973+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
56974+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
56975+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
56976+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
56977+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
56978+4 4 4 4 4 4
56979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
56982+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
56983+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
56984+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
56985+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
56986+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
56987+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
56988+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
56989+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
56990+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
56991+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
56992+4 4 4 4 4 4
56993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56995+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
56996+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
56997+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
56998+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
56999+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
57000+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
57001+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
57002+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
57003+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
57004+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
57005+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
57006+4 4 4 4 4 4
57007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57009+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
57010+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
57011+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
57012+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
57013+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
57014+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
57015+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
57016+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
57017+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
57018+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
57019+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
57020+4 4 4 4 4 4
57021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57023+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
57024+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
57025+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
57026+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
57027+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
57028+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
57029+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
57030+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
57031+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
57032+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
57033+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57034+4 4 4 4 4 4
57035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57037+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
57038+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
57039+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
57040+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
57041+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
57042+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
57043+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57044+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57045+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57046+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57047+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57048+4 4 4 4 4 4
57049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57050+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57051+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57052+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57053+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57054+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57055+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57056+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57057+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57058+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57059+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57060+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57061+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57062+4 4 4 4 4 4
57063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57064+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57065+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57066+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57067+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57068+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57069+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57070+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57071+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57072+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57073+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57074+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57075+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57076+0 0 0 4 4 4
57077+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57078+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57079+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57080+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57081+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57082+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57083+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57084+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57085+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57086+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57087+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57088+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57089+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57090+2 0 0 0 0 0
57091+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57092+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57093+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57094+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57095+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57096+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57097+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57098+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57099+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57100+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57101+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57102+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57103+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57104+37 38 37 0 0 0
57105+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57106+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57107+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57108+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57109+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57110+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57111+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57112+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57113+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57114+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57115+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57116+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57117+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57118+85 115 134 4 0 0
57119+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57120+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57121+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57122+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57123+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57124+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57125+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57126+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57127+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57128+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57129+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57130+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57131+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57132+60 73 81 4 0 0
57133+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57134+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57135+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57136+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57137+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57138+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57139+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57140+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57141+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57142+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57143+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57144+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57145+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57146+16 19 21 4 0 0
57147+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57148+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57149+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57150+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57151+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57152+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57153+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57154+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57155+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57156+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57157+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57158+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57159+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57160+4 0 0 4 3 3
57161+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57162+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57163+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57165+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57166+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57167+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57168+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57169+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57170+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57171+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57172+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57173+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57174+3 2 2 4 4 4
57175+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57176+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57177+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57178+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57179+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57180+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57181+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57182+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57183+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57184+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57185+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57186+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57187+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57188+4 4 4 4 4 4
57189+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57190+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57191+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57192+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57193+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57194+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57195+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57196+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57197+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57198+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57199+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57200+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57201+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57202+4 4 4 4 4 4
57203+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57204+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57205+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57206+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57207+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57208+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57209+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57210+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57211+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57212+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57213+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57214+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57215+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57216+5 5 5 5 5 5
57217+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57218+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57219+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57220+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57221+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57222+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57223+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57224+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57225+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57226+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57227+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57228+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57229+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57230+5 5 5 4 4 4
57231+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57232+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57233+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57234+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57235+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57236+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57237+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57238+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57239+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57240+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57241+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57242+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57244+4 4 4 4 4 4
57245+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57246+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57247+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57248+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57249+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57250+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57251+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57252+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57253+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57254+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57255+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57256+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57258+4 4 4 4 4 4
57259+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57260+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57261+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57262+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57263+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57264+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57265+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57266+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57267+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57268+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57269+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57272+4 4 4 4 4 4
57273+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57274+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57275+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57276+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57277+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57278+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57279+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57280+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57281+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57282+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57283+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57286+4 4 4 4 4 4
57287+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57288+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57289+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57290+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57291+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57292+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57293+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57294+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57295+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57296+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57297+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57300+4 4 4 4 4 4
57301+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57302+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57303+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57304+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57305+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57306+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57307+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57308+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57309+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57310+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57311+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57314+4 4 4 4 4 4
57315+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57316+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57317+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57318+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57319+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57320+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57321+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57322+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57323+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57324+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57325+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57328+4 4 4 4 4 4
57329+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57330+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57331+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57332+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57333+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57334+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57335+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57336+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57337+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57338+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57339+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57342+4 4 4 4 4 4
57343+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57344+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57345+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57346+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57347+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57348+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57349+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57350+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57351+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57352+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57353+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57356+4 4 4 4 4 4
57357+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57358+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57359+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57360+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57361+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57362+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57363+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57364+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57365+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57366+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57367+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57370+4 4 4 4 4 4
57371+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57372+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57373+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57374+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57375+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57376+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57377+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57378+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57379+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57380+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57381+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57384+4 4 4 4 4 4
57385+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57386+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57387+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57388+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57389+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57390+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57391+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57392+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57393+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57394+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57395+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57398+4 4 4 4 4 4
57399+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57400+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57401+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57402+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57403+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57404+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57405+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57406+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57407+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57408+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57409+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57412+4 4 4 4 4 4
57413+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57414+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57415+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57416+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57417+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57418+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57419+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57420+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57421+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57422+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57423+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57426+4 4 4 4 4 4
57427+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57428+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57429+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57430+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57431+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57432+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57433+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57434+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57435+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57436+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57437+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57440+4 4 4 4 4 4
57441+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57442+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57443+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57444+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57445+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57446+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57447+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57448+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57449+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57450+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57451+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57454+4 4 4 4 4 4
57455+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57456+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57457+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57458+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57459+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57460+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57461+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57462+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57463+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57464+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57465+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57468+4 4 4 4 4 4
57469+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57470+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57471+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57472+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57473+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57474+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57475+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57476+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57477+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57478+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57479+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57482+4 4 4 4 4 4
57483+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57484+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57485+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57486+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57487+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57488+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57489+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57490+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57491+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57492+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57493+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57496+4 4 4 4 4 4
57497+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57498+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57499+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57500+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57501+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57502+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57503+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57504+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57505+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57506+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57507+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57510+4 4 4 4 4 4
57511+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57512+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57513+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57514+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57515+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57516+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57517+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57518+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57519+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57520+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57521+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57524+4 4 4 4 4 4
57525+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57526+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57527+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57528+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57529+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57530+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57531+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57532+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57533+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57534+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57535+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57538+4 4 4 4 4 4
57539+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57540+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57541+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57542+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57543+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57544+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57545+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57546+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57547+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57548+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57549+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57552+4 4 4 4 4 4
57553+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57554+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57555+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57556+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57557+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57558+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57559+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57560+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57561+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57562+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57563+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57566+4 4 4 4 4 4
57567+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57568+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57569+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57570+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57571+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57572+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57573+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57574+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57575+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57576+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57577+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57580+4 4 4 4 4 4
57581+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57582+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57583+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57584+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57585+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57586+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57587+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57588+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57589+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57590+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57591+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57594+4 4 4 4 4 4
57595+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
57596+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57597+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
57598+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
57599+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
57600+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
57601+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
57602+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
57603+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
57604+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
57605+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57608+4 4 4 4 4 4
57609+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
57610+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57611+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
57612+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
57613+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
57614+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
57615+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57616+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
57617+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
57618+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
57619+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57622+4 4 4 4 4 4
57623+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
57624+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
57625+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
57626+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
57627+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
57628+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
57629+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
57630+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
57631+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
57632+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
57633+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57636+4 4 4 4 4 4
57637+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
57638+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
57639+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57640+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
57641+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
57642+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
57643+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
57644+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
57645+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
57646+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
57647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57650+4 4 4 4 4 4
57651+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57652+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
57653+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
57654+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
57655+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
57656+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
57657+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
57658+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
57659+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
57660+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57664+4 4 4 4 4 4
57665+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
57666+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
57667+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
57668+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
57669+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
57670+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
57671+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
57672+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
57673+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
57674+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57678+4 4 4 4 4 4
57679+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
57680+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
57681+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
57682+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
57683+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
57684+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
57685+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
57686+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
57687+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57688+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57692+4 4 4 4 4 4
57693+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
57694+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57695+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
57696+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57697+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
57698+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
57699+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
57700+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
57701+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
57702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57706+4 4 4 4 4 4
57707+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
57708+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
57709+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
57710+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
57711+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
57712+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
57713+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
57714+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
57715+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
57716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57720+4 4 4 4 4 4
57721+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57722+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
57723+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
57724+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
57725+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
57726+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
57727+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
57728+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
57729+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57734+4 4 4 4 4 4
57735+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
57736+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
57737+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57738+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
57739+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
57740+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
57741+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
57742+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
57743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57748+4 4 4 4 4 4
57749+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57750+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
57751+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
57752+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
57753+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
57754+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
57755+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
57756+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57762+4 4 4 4 4 4
57763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57764+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
57765+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57766+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
57767+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
57768+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
57769+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
57770+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
57771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57776+4 4 4 4 4 4
57777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57778+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
57779+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
57780+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
57781+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
57782+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
57783+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
57784+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
57785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57790+4 4 4 4 4 4
57791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57792+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57793+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
57794+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57795+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
57796+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
57797+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
57798+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57804+4 4 4 4 4 4
57805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57807+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57808+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
57809+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
57810+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
57811+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
57812+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57818+4 4 4 4 4 4
57819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57822+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57823+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
57824+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
57825+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
57826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57832+4 4 4 4 4 4
57833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57836+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57837+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57838+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
57839+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
57840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57846+4 4 4 4 4 4
57847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57850+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57851+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57852+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57853+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
57854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57860+4 4 4 4 4 4
57861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57864+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
57865+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
57866+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
57867+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
57868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57874+4 4 4 4 4 4
57875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57879+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
57880+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57881+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57888+4 4 4 4 4 4
57889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57893+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
57894+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
57895+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57902+4 4 4 4 4 4
57903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57907+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
57908+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
57909+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57916+4 4 4 4 4 4
57917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57921+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
57922+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
57923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57930+4 4 4 4 4 4
57931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57935+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57936+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
57937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57944+4 4 4 4 4 4
57945diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
57946index fef20db..d28b1ab 100644
57947--- a/drivers/xen/xenfs/xenstored.c
57948+++ b/drivers/xen/xenfs/xenstored.c
57949@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
57950 static int xsd_kva_open(struct inode *inode, struct file *file)
57951 {
57952 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
57953+#ifdef CONFIG_GRKERNSEC_HIDESYM
57954+ NULL);
57955+#else
57956 xen_store_interface);
57957+#endif
57958+
57959 if (!file->private_data)
57960 return -ENOMEM;
57961 return 0;
57962diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
57963index eb14e05..5156de7 100644
57964--- a/fs/9p/vfs_addr.c
57965+++ b/fs/9p/vfs_addr.c
57966@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
57967
57968 retval = v9fs_file_write_internal(inode,
57969 v9inode->writeback_fid,
57970- (__force const char __user *)buffer,
57971+ (const char __force_user *)buffer,
57972 len, &offset, 0);
57973 if (retval > 0)
57974 retval = 0;
57975diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
57976index 9ee5343..5165e3c 100644
57977--- a/fs/9p/vfs_inode.c
57978+++ b/fs/9p/vfs_inode.c
57979@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
57980 void
57981 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
57982 {
57983- char *s = nd_get_link(nd);
57984+ const char *s = nd_get_link(nd);
57985
57986 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
57987 dentry, IS_ERR(s) ? "<error>" : s);
57988diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
57989index c055d56e..a46f4f5 100644
57990--- a/fs/Kconfig.binfmt
57991+++ b/fs/Kconfig.binfmt
57992@@ -106,7 +106,7 @@ config HAVE_AOUT
57993
57994 config BINFMT_AOUT
57995 tristate "Kernel support for a.out and ECOFF binaries"
57996- depends on HAVE_AOUT
57997+ depends on HAVE_AOUT && BROKEN
57998 ---help---
57999 A.out (Assembler.OUTput) is a set of formats for libraries and
58000 executables used in the earliest versions of UNIX. Linux used
58001diff --git a/fs/afs/inode.c b/fs/afs/inode.c
58002index 8a1d38e..300a14e 100644
58003--- a/fs/afs/inode.c
58004+++ b/fs/afs/inode.c
58005@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58006 struct afs_vnode *vnode;
58007 struct super_block *sb;
58008 struct inode *inode;
58009- static atomic_t afs_autocell_ino;
58010+ static atomic_unchecked_t afs_autocell_ino;
58011
58012 _enter("{%x:%u},%*.*s,",
58013 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
58014@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58015 data.fid.unique = 0;
58016 data.fid.vnode = 0;
58017
58018- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
58019+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
58020 afs_iget5_autocell_test, afs_iget5_set,
58021 &data);
58022 if (!inode) {
58023diff --git a/fs/aio.c b/fs/aio.c
58024index c428871..3f3041b 100644
58025--- a/fs/aio.c
58026+++ b/fs/aio.c
58027@@ -413,7 +413,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58028 size += sizeof(struct io_event) * nr_events;
58029
58030 nr_pages = PFN_UP(size);
58031- if (nr_pages < 0)
58032+ if (nr_pages <= 0)
58033 return -EINVAL;
58034
58035 file = aio_private_file(ctx, nr_pages);
58036diff --git a/fs/attr.c b/fs/attr.c
58037index 6530ced..4a827e2 100644
58038--- a/fs/attr.c
58039+++ b/fs/attr.c
58040@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
58041 unsigned long limit;
58042
58043 limit = rlimit(RLIMIT_FSIZE);
58044+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58045 if (limit != RLIM_INFINITY && offset > limit)
58046 goto out_sig;
58047 if (offset > inode->i_sb->s_maxbytes)
58048diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58049index 116fd38..c04182da 100644
58050--- a/fs/autofs4/waitq.c
58051+++ b/fs/autofs4/waitq.c
58052@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58053 {
58054 unsigned long sigpipe, flags;
58055 mm_segment_t fs;
58056- const char *data = (const char *)addr;
58057+ const char __user *data = (const char __force_user *)addr;
58058 ssize_t wr = 0;
58059
58060 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58061@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58062 return 1;
58063 }
58064
58065+#ifdef CONFIG_GRKERNSEC_HIDESYM
58066+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58067+#endif
58068+
58069 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58070 enum autofs_notify notify)
58071 {
58072@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58073
58074 /* If this is a direct mount request create a dummy name */
58075 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58076+#ifdef CONFIG_GRKERNSEC_HIDESYM
58077+ /* this name does get written to userland via autofs4_write() */
58078+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58079+#else
58080 qstr.len = sprintf(name, "%p", dentry);
58081+#endif
58082 else {
58083 qstr.len = autofs4_getpath(sbi, dentry, &name);
58084 if (!qstr.len) {
58085diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58086index 2722387..56059b5 100644
58087--- a/fs/befs/endian.h
58088+++ b/fs/befs/endian.h
58089@@ -11,7 +11,7 @@
58090
58091 #include <asm/byteorder.h>
58092
58093-static inline u64
58094+static inline u64 __intentional_overflow(-1)
58095 fs64_to_cpu(const struct super_block *sb, fs64 n)
58096 {
58097 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58098@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58099 return (__force fs64)cpu_to_be64(n);
58100 }
58101
58102-static inline u32
58103+static inline u32 __intentional_overflow(-1)
58104 fs32_to_cpu(const struct super_block *sb, fs32 n)
58105 {
58106 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58107@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58108 return (__force fs32)cpu_to_be32(n);
58109 }
58110
58111-static inline u16
58112+static inline u16 __intentional_overflow(-1)
58113 fs16_to_cpu(const struct super_block *sb, fs16 n)
58114 {
58115 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58116diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58117index 4c55668..eeae150 100644
58118--- a/fs/binfmt_aout.c
58119+++ b/fs/binfmt_aout.c
58120@@ -16,6 +16,7 @@
58121 #include <linux/string.h>
58122 #include <linux/fs.h>
58123 #include <linux/file.h>
58124+#include <linux/security.h>
58125 #include <linux/stat.h>
58126 #include <linux/fcntl.h>
58127 #include <linux/ptrace.h>
58128@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58129 #endif
58130 # define START_STACK(u) ((void __user *)u.start_stack)
58131
58132+ memset(&dump, 0, sizeof(dump));
58133+
58134 fs = get_fs();
58135 set_fs(KERNEL_DS);
58136 has_dumped = 1;
58137@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58138
58139 /* If the size of the dump file exceeds the rlimit, then see what would happen
58140 if we wrote the stack, but not the data area. */
58141+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58142 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58143 dump.u_dsize = 0;
58144
58145 /* Make sure we have enough room to write the stack and data areas. */
58146+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58147 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58148 dump.u_ssize = 0;
58149
58150@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58151 rlim = rlimit(RLIMIT_DATA);
58152 if (rlim >= RLIM_INFINITY)
58153 rlim = ~0;
58154+
58155+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58156 if (ex.a_data + ex.a_bss > rlim)
58157 return -ENOMEM;
58158
58159@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58160
58161 install_exec_creds(bprm);
58162
58163+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58164+ current->mm->pax_flags = 0UL;
58165+#endif
58166+
58167+#ifdef CONFIG_PAX_PAGEEXEC
58168+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58169+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58170+
58171+#ifdef CONFIG_PAX_EMUTRAMP
58172+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58173+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58174+#endif
58175+
58176+#ifdef CONFIG_PAX_MPROTECT
58177+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58178+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58179+#endif
58180+
58181+ }
58182+#endif
58183+
58184 if (N_MAGIC(ex) == OMAGIC) {
58185 unsigned long text_addr, map_size;
58186 loff_t pos;
58187@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58188 return error;
58189
58190 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58191- PROT_READ | PROT_WRITE | PROT_EXEC,
58192+ PROT_READ | PROT_WRITE,
58193 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58194 fd_offset + ex.a_text);
58195 if (error != N_DATADDR(ex))
58196diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58197index 995986b..dcc4ef2 100644
58198--- a/fs/binfmt_elf.c
58199+++ b/fs/binfmt_elf.c
58200@@ -34,6 +34,7 @@
58201 #include <linux/utsname.h>
58202 #include <linux/coredump.h>
58203 #include <linux/sched.h>
58204+#include <linux/xattr.h>
58205 #include <asm/uaccess.h>
58206 #include <asm/param.h>
58207 #include <asm/page.h>
58208@@ -47,7 +48,7 @@
58209
58210 static int load_elf_binary(struct linux_binprm *bprm);
58211 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58212- int, int, unsigned long);
58213+ int, int, unsigned long) __intentional_overflow(-1);
58214
58215 #ifdef CONFIG_USELIB
58216 static int load_elf_library(struct file *);
58217@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58218 #define elf_core_dump NULL
58219 #endif
58220
58221+#ifdef CONFIG_PAX_MPROTECT
58222+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58223+#endif
58224+
58225+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58226+static void elf_handle_mmap(struct file *file);
58227+#endif
58228+
58229 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58230 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58231 #else
58232@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58233 .load_binary = load_elf_binary,
58234 .load_shlib = load_elf_library,
58235 .core_dump = elf_core_dump,
58236+
58237+#ifdef CONFIG_PAX_MPROTECT
58238+ .handle_mprotect= elf_handle_mprotect,
58239+#endif
58240+
58241+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58242+ .handle_mmap = elf_handle_mmap,
58243+#endif
58244+
58245 .min_coredump = ELF_EXEC_PAGESIZE,
58246 };
58247
58248@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58249
58250 static int set_brk(unsigned long start, unsigned long end)
58251 {
58252+ unsigned long e = end;
58253+
58254 start = ELF_PAGEALIGN(start);
58255 end = ELF_PAGEALIGN(end);
58256 if (end > start) {
58257@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58258 if (BAD_ADDR(addr))
58259 return addr;
58260 }
58261- current->mm->start_brk = current->mm->brk = end;
58262+ current->mm->start_brk = current->mm->brk = e;
58263 return 0;
58264 }
58265
58266@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58267 elf_addr_t __user *u_rand_bytes;
58268 const char *k_platform = ELF_PLATFORM;
58269 const char *k_base_platform = ELF_BASE_PLATFORM;
58270- unsigned char k_rand_bytes[16];
58271+ u32 k_rand_bytes[4];
58272 int items;
58273 elf_addr_t *elf_info;
58274 int ei_index = 0;
58275 const struct cred *cred = current_cred();
58276 struct vm_area_struct *vma;
58277+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58278
58279 /*
58280 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58281@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58282 * Generate 16 random bytes for userspace PRNG seeding.
58283 */
58284 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58285- u_rand_bytes = (elf_addr_t __user *)
58286- STACK_ALLOC(p, sizeof(k_rand_bytes));
58287+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58288+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58289+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58290+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58291+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58292+ u_rand_bytes = (elf_addr_t __user *) p;
58293 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58294 return -EFAULT;
58295
58296@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58297 return -EFAULT;
58298 current->mm->env_end = p;
58299
58300+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58301+
58302 /* Put the elf_info on the stack in the right place. */
58303 sp = (elf_addr_t __user *)envp + 1;
58304- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58305+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58306 return -EFAULT;
58307 return 0;
58308 }
58309@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
58310 an ELF header */
58311
58312 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58313- struct file *interpreter, unsigned long *interp_map_addr,
58314+ struct file *interpreter,
58315 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
58316 {
58317 struct elf_phdr *eppnt;
58318- unsigned long load_addr = 0;
58319+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58320 int load_addr_set = 0;
58321 unsigned long last_bss = 0, elf_bss = 0;
58322- unsigned long error = ~0UL;
58323+ unsigned long error = -EINVAL;
58324 unsigned long total_size;
58325 int i;
58326
58327@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58328 goto out;
58329 }
58330
58331+#ifdef CONFIG_PAX_SEGMEXEC
58332+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58333+ pax_task_size = SEGMEXEC_TASK_SIZE;
58334+#endif
58335+
58336 eppnt = interp_elf_phdata;
58337 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58338 if (eppnt->p_type == PT_LOAD) {
58339@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58340 map_addr = elf_map(interpreter, load_addr + vaddr,
58341 eppnt, elf_prot, elf_type, total_size);
58342 total_size = 0;
58343- if (!*interp_map_addr)
58344- *interp_map_addr = map_addr;
58345 error = map_addr;
58346 if (BAD_ADDR(map_addr))
58347 goto out;
58348@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58349 k = load_addr + eppnt->p_vaddr;
58350 if (BAD_ADDR(k) ||
58351 eppnt->p_filesz > eppnt->p_memsz ||
58352- eppnt->p_memsz > TASK_SIZE ||
58353- TASK_SIZE - eppnt->p_memsz < k) {
58354+ eppnt->p_memsz > pax_task_size ||
58355+ pax_task_size - eppnt->p_memsz < k) {
58356 error = -ENOMEM;
58357 goto out;
58358 }
58359@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58360 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58361
58362 /* Map the last of the bss segment */
58363- error = vm_brk(elf_bss, last_bss - elf_bss);
58364- if (BAD_ADDR(error))
58365- goto out;
58366+ if (last_bss > elf_bss) {
58367+ error = vm_brk(elf_bss, last_bss - elf_bss);
58368+ if (BAD_ADDR(error))
58369+ goto out;
58370+ }
58371 }
58372
58373 error = load_addr;
58374@@ -634,6 +666,336 @@ out:
58375 return error;
58376 }
58377
58378+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58379+#ifdef CONFIG_PAX_SOFTMODE
58380+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58381+{
58382+ unsigned long pax_flags = 0UL;
58383+
58384+#ifdef CONFIG_PAX_PAGEEXEC
58385+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58386+ pax_flags |= MF_PAX_PAGEEXEC;
58387+#endif
58388+
58389+#ifdef CONFIG_PAX_SEGMEXEC
58390+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58391+ pax_flags |= MF_PAX_SEGMEXEC;
58392+#endif
58393+
58394+#ifdef CONFIG_PAX_EMUTRAMP
58395+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58396+ pax_flags |= MF_PAX_EMUTRAMP;
58397+#endif
58398+
58399+#ifdef CONFIG_PAX_MPROTECT
58400+ if (elf_phdata->p_flags & PF_MPROTECT)
58401+ pax_flags |= MF_PAX_MPROTECT;
58402+#endif
58403+
58404+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58405+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58406+ pax_flags |= MF_PAX_RANDMMAP;
58407+#endif
58408+
58409+ return pax_flags;
58410+}
58411+#endif
58412+
58413+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58414+{
58415+ unsigned long pax_flags = 0UL;
58416+
58417+#ifdef CONFIG_PAX_PAGEEXEC
58418+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58419+ pax_flags |= MF_PAX_PAGEEXEC;
58420+#endif
58421+
58422+#ifdef CONFIG_PAX_SEGMEXEC
58423+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58424+ pax_flags |= MF_PAX_SEGMEXEC;
58425+#endif
58426+
58427+#ifdef CONFIG_PAX_EMUTRAMP
58428+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58429+ pax_flags |= MF_PAX_EMUTRAMP;
58430+#endif
58431+
58432+#ifdef CONFIG_PAX_MPROTECT
58433+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58434+ pax_flags |= MF_PAX_MPROTECT;
58435+#endif
58436+
58437+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58438+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58439+ pax_flags |= MF_PAX_RANDMMAP;
58440+#endif
58441+
58442+ return pax_flags;
58443+}
58444+#endif
58445+
58446+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58447+#ifdef CONFIG_PAX_SOFTMODE
58448+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58449+{
58450+ unsigned long pax_flags = 0UL;
58451+
58452+#ifdef CONFIG_PAX_PAGEEXEC
58453+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58454+ pax_flags |= MF_PAX_PAGEEXEC;
58455+#endif
58456+
58457+#ifdef CONFIG_PAX_SEGMEXEC
58458+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58459+ pax_flags |= MF_PAX_SEGMEXEC;
58460+#endif
58461+
58462+#ifdef CONFIG_PAX_EMUTRAMP
58463+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58464+ pax_flags |= MF_PAX_EMUTRAMP;
58465+#endif
58466+
58467+#ifdef CONFIG_PAX_MPROTECT
58468+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58469+ pax_flags |= MF_PAX_MPROTECT;
58470+#endif
58471+
58472+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58473+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58474+ pax_flags |= MF_PAX_RANDMMAP;
58475+#endif
58476+
58477+ return pax_flags;
58478+}
58479+#endif
58480+
58481+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58482+{
58483+ unsigned long pax_flags = 0UL;
58484+
58485+#ifdef CONFIG_PAX_PAGEEXEC
58486+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58487+ pax_flags |= MF_PAX_PAGEEXEC;
58488+#endif
58489+
58490+#ifdef CONFIG_PAX_SEGMEXEC
58491+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58492+ pax_flags |= MF_PAX_SEGMEXEC;
58493+#endif
58494+
58495+#ifdef CONFIG_PAX_EMUTRAMP
58496+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58497+ pax_flags |= MF_PAX_EMUTRAMP;
58498+#endif
58499+
58500+#ifdef CONFIG_PAX_MPROTECT
58501+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58502+ pax_flags |= MF_PAX_MPROTECT;
58503+#endif
58504+
58505+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58506+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58507+ pax_flags |= MF_PAX_RANDMMAP;
58508+#endif
58509+
58510+ return pax_flags;
58511+}
58512+#endif
58513+
58514+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58515+static unsigned long pax_parse_defaults(void)
58516+{
58517+ unsigned long pax_flags = 0UL;
58518+
58519+#ifdef CONFIG_PAX_SOFTMODE
58520+ if (pax_softmode)
58521+ return pax_flags;
58522+#endif
58523+
58524+#ifdef CONFIG_PAX_PAGEEXEC
58525+ pax_flags |= MF_PAX_PAGEEXEC;
58526+#endif
58527+
58528+#ifdef CONFIG_PAX_SEGMEXEC
58529+ pax_flags |= MF_PAX_SEGMEXEC;
58530+#endif
58531+
58532+#ifdef CONFIG_PAX_MPROTECT
58533+ pax_flags |= MF_PAX_MPROTECT;
58534+#endif
58535+
58536+#ifdef CONFIG_PAX_RANDMMAP
58537+ if (randomize_va_space)
58538+ pax_flags |= MF_PAX_RANDMMAP;
58539+#endif
58540+
58541+ return pax_flags;
58542+}
58543+
58544+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58545+{
58546+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58547+
58548+#ifdef CONFIG_PAX_EI_PAX
58549+
58550+#ifdef CONFIG_PAX_SOFTMODE
58551+ if (pax_softmode)
58552+ return pax_flags;
58553+#endif
58554+
58555+ pax_flags = 0UL;
58556+
58557+#ifdef CONFIG_PAX_PAGEEXEC
58558+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58559+ pax_flags |= MF_PAX_PAGEEXEC;
58560+#endif
58561+
58562+#ifdef CONFIG_PAX_SEGMEXEC
58563+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58564+ pax_flags |= MF_PAX_SEGMEXEC;
58565+#endif
58566+
58567+#ifdef CONFIG_PAX_EMUTRAMP
58568+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58569+ pax_flags |= MF_PAX_EMUTRAMP;
58570+#endif
58571+
58572+#ifdef CONFIG_PAX_MPROTECT
58573+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58574+ pax_flags |= MF_PAX_MPROTECT;
58575+#endif
58576+
58577+#ifdef CONFIG_PAX_ASLR
58578+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58579+ pax_flags |= MF_PAX_RANDMMAP;
58580+#endif
58581+
58582+#endif
58583+
58584+ return pax_flags;
58585+
58586+}
58587+
58588+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
58589+{
58590+
58591+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58592+ unsigned long i;
58593+
58594+ for (i = 0UL; i < elf_ex->e_phnum; i++)
58595+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
58596+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
58597+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
58598+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
58599+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
58600+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
58601+ return PAX_PARSE_FLAGS_FALLBACK;
58602+
58603+#ifdef CONFIG_PAX_SOFTMODE
58604+ if (pax_softmode)
58605+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
58606+ else
58607+#endif
58608+
58609+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
58610+ break;
58611+ }
58612+#endif
58613+
58614+ return PAX_PARSE_FLAGS_FALLBACK;
58615+}
58616+
58617+static unsigned long pax_parse_xattr_pax(struct file * const file)
58618+{
58619+
58620+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58621+ ssize_t xattr_size, i;
58622+ unsigned char xattr_value[sizeof("pemrs") - 1];
58623+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
58624+
58625+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
58626+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
58627+ return PAX_PARSE_FLAGS_FALLBACK;
58628+
58629+ for (i = 0; i < xattr_size; i++)
58630+ switch (xattr_value[i]) {
58631+ default:
58632+ return PAX_PARSE_FLAGS_FALLBACK;
58633+
58634+#define parse_flag(option1, option2, flag) \
58635+ case option1: \
58636+ if (pax_flags_hardmode & MF_PAX_##flag) \
58637+ return PAX_PARSE_FLAGS_FALLBACK;\
58638+ pax_flags_hardmode |= MF_PAX_##flag; \
58639+ break; \
58640+ case option2: \
58641+ if (pax_flags_softmode & MF_PAX_##flag) \
58642+ return PAX_PARSE_FLAGS_FALLBACK;\
58643+ pax_flags_softmode |= MF_PAX_##flag; \
58644+ break;
58645+
58646+ parse_flag('p', 'P', PAGEEXEC);
58647+ parse_flag('e', 'E', EMUTRAMP);
58648+ parse_flag('m', 'M', MPROTECT);
58649+ parse_flag('r', 'R', RANDMMAP);
58650+ parse_flag('s', 'S', SEGMEXEC);
58651+
58652+#undef parse_flag
58653+ }
58654+
58655+ if (pax_flags_hardmode & pax_flags_softmode)
58656+ return PAX_PARSE_FLAGS_FALLBACK;
58657+
58658+#ifdef CONFIG_PAX_SOFTMODE
58659+ if (pax_softmode)
58660+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
58661+ else
58662+#endif
58663+
58664+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
58665+#else
58666+ return PAX_PARSE_FLAGS_FALLBACK;
58667+#endif
58668+
58669+}
58670+
58671+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
58672+{
58673+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
58674+
58675+ pax_flags = pax_parse_defaults();
58676+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
58677+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
58678+ xattr_pax_flags = pax_parse_xattr_pax(file);
58679+
58680+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58681+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58682+ pt_pax_flags != xattr_pax_flags)
58683+ return -EINVAL;
58684+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58685+ pax_flags = xattr_pax_flags;
58686+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58687+ pax_flags = pt_pax_flags;
58688+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58689+ pax_flags = ei_pax_flags;
58690+
58691+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
58692+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58693+ if ((__supported_pte_mask & _PAGE_NX))
58694+ pax_flags &= ~MF_PAX_SEGMEXEC;
58695+ else
58696+ pax_flags &= ~MF_PAX_PAGEEXEC;
58697+ }
58698+#endif
58699+
58700+ if (0 > pax_check_flags(&pax_flags))
58701+ return -EINVAL;
58702+
58703+ current->mm->pax_flags = pax_flags;
58704+ return 0;
58705+}
58706+#endif
58707+
58708 /*
58709 * These are the functions used to load ELF style executables and shared
58710 * libraries. There is no binary dependent code anywhere else.
58711@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
58712 {
58713 unsigned long random_variable = 0;
58714
58715+#ifdef CONFIG_PAX_RANDUSTACK
58716+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
58717+ return stack_top - current->mm->delta_stack;
58718+#endif
58719+
58720 if ((current->flags & PF_RANDOMIZE) &&
58721 !(current->personality & ADDR_NO_RANDOMIZE)) {
58722 random_variable = (unsigned long) get_random_int();
58723@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58724 unsigned long load_addr = 0, load_bias = 0;
58725 int load_addr_set = 0;
58726 char * elf_interpreter = NULL;
58727- unsigned long error;
58728+ unsigned long error = 0;
58729 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
58730 unsigned long elf_bss, elf_brk;
58731 int retval, i;
58732@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58733 struct elfhdr interp_elf_ex;
58734 } *loc;
58735 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
58736+ unsigned long pax_task_size;
58737
58738 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
58739 if (!loc) {
58740@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
58741 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
58742 may depend on the personality. */
58743 SET_PERSONALITY2(loc->elf_ex, &arch_state);
58744+
58745+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58746+ current->mm->pax_flags = 0UL;
58747+#endif
58748+
58749+#ifdef CONFIG_PAX_DLRESOLVE
58750+ current->mm->call_dl_resolve = 0UL;
58751+#endif
58752+
58753+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
58754+ current->mm->call_syscall = 0UL;
58755+#endif
58756+
58757+#ifdef CONFIG_PAX_ASLR
58758+ current->mm->delta_mmap = 0UL;
58759+ current->mm->delta_stack = 0UL;
58760+#endif
58761+
58762+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58763+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
58764+ send_sig(SIGKILL, current, 0);
58765+ goto out_free_dentry;
58766+ }
58767+#endif
58768+
58769+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58770+ pax_set_initial_flags(bprm);
58771+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58772+ if (pax_set_initial_flags_func)
58773+ (pax_set_initial_flags_func)(bprm);
58774+#endif
58775+
58776+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58777+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
58778+ current->mm->context.user_cs_limit = PAGE_SIZE;
58779+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
58780+ }
58781+#endif
58782+
58783+#ifdef CONFIG_PAX_SEGMEXEC
58784+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
58785+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
58786+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
58787+ pax_task_size = SEGMEXEC_TASK_SIZE;
58788+ current->mm->def_flags |= VM_NOHUGEPAGE;
58789+ } else
58790+#endif
58791+
58792+ pax_task_size = TASK_SIZE;
58793+
58794+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
58795+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58796+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
58797+ put_cpu();
58798+ }
58799+#endif
58800+
58801+#ifdef CONFIG_PAX_ASLR
58802+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58803+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
58804+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
58805+ }
58806+#endif
58807+
58808+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58809+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58810+ executable_stack = EXSTACK_DISABLE_X;
58811+ current->personality &= ~READ_IMPLIES_EXEC;
58812+ } else
58813+#endif
58814+
58815 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
58816 current->personality |= READ_IMPLIES_EXEC;
58817
58818@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
58819 #else
58820 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
58821 #endif
58822+
58823+#ifdef CONFIG_PAX_RANDMMAP
58824+ /* PaX: randomize base address at the default exe base if requested */
58825+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
58826+#ifdef CONFIG_SPARC64
58827+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
58828+#else
58829+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
58830+#endif
58831+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
58832+ elf_flags |= MAP_FIXED;
58833+ }
58834+#endif
58835+
58836 }
58837
58838 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
58839@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
58840 * allowed task size. Note that p_filesz must always be
58841 * <= p_memsz so it is only necessary to check p_memsz.
58842 */
58843- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58844- elf_ppnt->p_memsz > TASK_SIZE ||
58845- TASK_SIZE - elf_ppnt->p_memsz < k) {
58846+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58847+ elf_ppnt->p_memsz > pax_task_size ||
58848+ pax_task_size - elf_ppnt->p_memsz < k) {
58849 /* set_brk can never work. Avoid overflows. */
58850 retval = -EINVAL;
58851 goto out_free_dentry;
58852@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
58853 if (retval)
58854 goto out_free_dentry;
58855 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
58856- retval = -EFAULT; /* Nobody gets to see this, but.. */
58857- goto out_free_dentry;
58858+ /*
58859+ * This bss-zeroing can fail if the ELF
58860+ * file specifies odd protections. So
58861+ * we don't check the return value
58862+ */
58863 }
58864
58865+#ifdef CONFIG_PAX_RANDMMAP
58866+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58867+ unsigned long start, size, flags;
58868+ vm_flags_t vm_flags;
58869+
58870+ start = ELF_PAGEALIGN(elf_brk);
58871+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
58872+ flags = MAP_FIXED | MAP_PRIVATE;
58873+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
58874+
58875+ down_write(&current->mm->mmap_sem);
58876+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
58877+ retval = -ENOMEM;
58878+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
58879+// if (current->personality & ADDR_NO_RANDOMIZE)
58880+// vm_flags |= VM_READ | VM_MAYREAD;
58881+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
58882+ retval = IS_ERR_VALUE(start) ? start : 0;
58883+ }
58884+ up_write(&current->mm->mmap_sem);
58885+ if (retval == 0)
58886+ retval = set_brk(start + size, start + size + PAGE_SIZE);
58887+ if (retval < 0)
58888+ goto out_free_dentry;
58889+ }
58890+#endif
58891+
58892 if (elf_interpreter) {
58893- unsigned long interp_map_addr = 0;
58894-
58895 elf_entry = load_elf_interp(&loc->interp_elf_ex,
58896 interpreter,
58897- &interp_map_addr,
58898 load_bias, interp_elf_phdata);
58899 if (!IS_ERR((void *)elf_entry)) {
58900 /*
58901@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
58902 * Decide what to dump of a segment, part, all or none.
58903 */
58904 static unsigned long vma_dump_size(struct vm_area_struct *vma,
58905- unsigned long mm_flags)
58906+ unsigned long mm_flags, long signr)
58907 {
58908 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
58909
58910@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
58911 if (vma->vm_file == NULL)
58912 return 0;
58913
58914- if (FILTER(MAPPED_PRIVATE))
58915+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
58916 goto whole;
58917
58918 /*
58919@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
58920 {
58921 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
58922 int i = 0;
58923- do
58924+ do {
58925 i += 2;
58926- while (auxv[i - 2] != AT_NULL);
58927+ } while (auxv[i - 2] != AT_NULL);
58928 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
58929 }
58930
58931@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
58932 {
58933 mm_segment_t old_fs = get_fs();
58934 set_fs(KERNEL_DS);
58935- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
58936+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
58937 set_fs(old_fs);
58938 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
58939 }
58940@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
58941 vma = next_vma(vma, gate_vma)) {
58942 unsigned long dump_size;
58943
58944- dump_size = vma_dump_size(vma, cprm->mm_flags);
58945+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
58946 vma_filesz[i++] = dump_size;
58947 vma_data_size += dump_size;
58948 }
58949@@ -2314,6 +2794,167 @@ out:
58950
58951 #endif /* CONFIG_ELF_CORE */
58952
58953+#ifdef CONFIG_PAX_MPROTECT
58954+/* PaX: non-PIC ELF libraries need relocations on their executable segments
58955+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
58956+ * we'll remove VM_MAYWRITE for good on RELRO segments.
58957+ *
58958+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
58959+ * basis because we want to allow the common case and not the special ones.
58960+ */
58961+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
58962+{
58963+ struct elfhdr elf_h;
58964+ struct elf_phdr elf_p;
58965+ unsigned long i;
58966+ unsigned long oldflags;
58967+ bool is_textrel_rw, is_textrel_rx, is_relro;
58968+
58969+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
58970+ return;
58971+
58972+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
58973+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
58974+
58975+#ifdef CONFIG_PAX_ELFRELOCS
58976+ /* possible TEXTREL */
58977+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
58978+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
58979+#else
58980+ is_textrel_rw = false;
58981+ is_textrel_rx = false;
58982+#endif
58983+
58984+ /* possible RELRO */
58985+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
58986+
58987+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
58988+ return;
58989+
58990+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
58991+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
58992+
58993+#ifdef CONFIG_PAX_ETEXECRELOCS
58994+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
58995+#else
58996+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
58997+#endif
58998+
58999+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59000+ !elf_check_arch(&elf_h) ||
59001+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59002+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59003+ return;
59004+
59005+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59006+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59007+ return;
59008+ switch (elf_p.p_type) {
59009+ case PT_DYNAMIC:
59010+ if (!is_textrel_rw && !is_textrel_rx)
59011+ continue;
59012+ i = 0UL;
59013+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
59014+ elf_dyn dyn;
59015+
59016+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
59017+ break;
59018+ if (dyn.d_tag == DT_NULL)
59019+ break;
59020+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59021+ gr_log_textrel(vma);
59022+ if (is_textrel_rw)
59023+ vma->vm_flags |= VM_MAYWRITE;
59024+ else
59025+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59026+ vma->vm_flags &= ~VM_MAYWRITE;
59027+ break;
59028+ }
59029+ i++;
59030+ }
59031+ is_textrel_rw = false;
59032+ is_textrel_rx = false;
59033+ continue;
59034+
59035+ case PT_GNU_RELRO:
59036+ if (!is_relro)
59037+ continue;
59038+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59039+ vma->vm_flags &= ~VM_MAYWRITE;
59040+ is_relro = false;
59041+ continue;
59042+
59043+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59044+ case PT_PAX_FLAGS: {
59045+ const char *msg_mprotect = "", *msg_emutramp = "";
59046+ char *buffer_lib, *buffer_exe;
59047+
59048+ if (elf_p.p_flags & PF_NOMPROTECT)
59049+ msg_mprotect = "MPROTECT disabled";
59050+
59051+#ifdef CONFIG_PAX_EMUTRAMP
59052+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59053+ msg_emutramp = "EMUTRAMP enabled";
59054+#endif
59055+
59056+ if (!msg_mprotect[0] && !msg_emutramp[0])
59057+ continue;
59058+
59059+ if (!printk_ratelimit())
59060+ continue;
59061+
59062+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59063+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59064+ if (buffer_lib && buffer_exe) {
59065+ char *path_lib, *path_exe;
59066+
59067+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59068+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59069+
59070+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59071+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59072+
59073+ }
59074+ free_page((unsigned long)buffer_exe);
59075+ free_page((unsigned long)buffer_lib);
59076+ continue;
59077+ }
59078+#endif
59079+
59080+ }
59081+ }
59082+}
59083+#endif
59084+
59085+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59086+
59087+extern int grsec_enable_log_rwxmaps;
59088+
59089+static void elf_handle_mmap(struct file *file)
59090+{
59091+ struct elfhdr elf_h;
59092+ struct elf_phdr elf_p;
59093+ unsigned long i;
59094+
59095+ if (!grsec_enable_log_rwxmaps)
59096+ return;
59097+
59098+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59099+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59100+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59101+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59102+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59103+ return;
59104+
59105+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59106+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59107+ return;
59108+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59109+ gr_log_ptgnustack(file);
59110+ }
59111+}
59112+#endif
59113+
59114 static int __init init_elf_binfmt(void)
59115 {
59116 register_binfmt(&elf_format);
59117diff --git a/fs/block_dev.c b/fs/block_dev.c
59118index b48c41b..e070416 100644
59119--- a/fs/block_dev.c
59120+++ b/fs/block_dev.c
59121@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59122 else if (bdev->bd_contains == bdev)
59123 return true; /* is a whole device which isn't held */
59124
59125- else if (whole->bd_holder == bd_may_claim)
59126+ else if (whole->bd_holder == (void *)bd_may_claim)
59127 return true; /* is a partition of a device that is being partitioned */
59128 else if (whole->bd_holder != NULL)
59129 return false; /* is a partition of a held device */
59130diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59131index f54511d..58acdec 100644
59132--- a/fs/btrfs/ctree.c
59133+++ b/fs/btrfs/ctree.c
59134@@ -1173,9 +1173,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59135 free_extent_buffer(buf);
59136 add_root_to_dirty_list(root);
59137 } else {
59138- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59139- parent_start = parent->start;
59140- else
59141+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59142+ if (parent)
59143+ parent_start = parent->start;
59144+ else
59145+ parent_start = 0;
59146+ } else
59147 parent_start = 0;
59148
59149 WARN_ON(trans->transid != btrfs_header_generation(parent));
59150diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59151index de4e70f..b41dc45 100644
59152--- a/fs/btrfs/delayed-inode.c
59153+++ b/fs/btrfs/delayed-inode.c
59154@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59155
59156 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59157 {
59158- int seq = atomic_inc_return(&delayed_root->items_seq);
59159+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59160 if ((atomic_dec_return(&delayed_root->items) <
59161 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59162 waitqueue_active(&delayed_root->wait))
59163@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59164
59165 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59166 {
59167- int val = atomic_read(&delayed_root->items_seq);
59168+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59169
59170 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59171 return 1;
59172@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59173 int seq;
59174 int ret;
59175
59176- seq = atomic_read(&delayed_root->items_seq);
59177+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59178
59179 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59180 if (ret)
59181diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59182index f70119f..ab5894d 100644
59183--- a/fs/btrfs/delayed-inode.h
59184+++ b/fs/btrfs/delayed-inode.h
59185@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59186 */
59187 struct list_head prepare_list;
59188 atomic_t items; /* for delayed items */
59189- atomic_t items_seq; /* for delayed items */
59190+ atomic_unchecked_t items_seq; /* for delayed items */
59191 int nodes; /* for delayed nodes */
59192 wait_queue_head_t wait;
59193 };
59194@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59195 struct btrfs_delayed_root *delayed_root)
59196 {
59197 atomic_set(&delayed_root->items, 0);
59198- atomic_set(&delayed_root->items_seq, 0);
59199+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59200 delayed_root->nodes = 0;
59201 spin_lock_init(&delayed_root->lock);
59202 init_waitqueue_head(&delayed_root->wait);
59203diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59204index 6f49b28..483410f 100644
59205--- a/fs/btrfs/super.c
59206+++ b/fs/btrfs/super.c
59207@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59208 function, line, errstr);
59209 return;
59210 }
59211- ACCESS_ONCE(trans->transaction->aborted) = errno;
59212+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59213 /* Wake up anybody who may be waiting on this transaction */
59214 wake_up(&root->fs_info->transaction_wait);
59215 wake_up(&root->fs_info->transaction_blocked_wait);
59216diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59217index 92db3f6..898a561 100644
59218--- a/fs/btrfs/sysfs.c
59219+++ b/fs/btrfs/sysfs.c
59220@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59221 for (set = 0; set < FEAT_MAX; set++) {
59222 int i;
59223 struct attribute *attrs[2];
59224- struct attribute_group agroup = {
59225+ attribute_group_no_const agroup = {
59226 .name = "features",
59227 .attrs = attrs,
59228 };
59229diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
59230index 2299bfd..4098e72 100644
59231--- a/fs/btrfs/tests/free-space-tests.c
59232+++ b/fs/btrfs/tests/free-space-tests.c
59233@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59234 * extent entry.
59235 */
59236 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
59237- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59238+ pax_open_kernel();
59239+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59240+ pax_close_kernel();
59241
59242 /*
59243 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
59244@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59245 if (ret)
59246 return ret;
59247
59248- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59249+ pax_open_kernel();
59250+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59251+ pax_close_kernel();
59252 __btrfs_remove_free_space_cache(cache->free_space_ctl);
59253
59254 return 0;
59255diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59256index 154990c..d0cf699 100644
59257--- a/fs/btrfs/tree-log.h
59258+++ b/fs/btrfs/tree-log.h
59259@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59260 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59261 struct btrfs_trans_handle *trans)
59262 {
59263- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59264+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59265 }
59266
59267 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59268diff --git a/fs/buffer.c b/fs/buffer.c
59269index 20805db..2e8fc69 100644
59270--- a/fs/buffer.c
59271+++ b/fs/buffer.c
59272@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
59273 bh_cachep = kmem_cache_create("buffer_head",
59274 sizeof(struct buffer_head), 0,
59275 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59276- SLAB_MEM_SPREAD),
59277+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59278 NULL);
59279
59280 /*
59281diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59282index fbb08e9..0fda764 100644
59283--- a/fs/cachefiles/bind.c
59284+++ b/fs/cachefiles/bind.c
59285@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59286 args);
59287
59288 /* start by checking things over */
59289- ASSERT(cache->fstop_percent >= 0 &&
59290- cache->fstop_percent < cache->fcull_percent &&
59291+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59292 cache->fcull_percent < cache->frun_percent &&
59293 cache->frun_percent < 100);
59294
59295- ASSERT(cache->bstop_percent >= 0 &&
59296- cache->bstop_percent < cache->bcull_percent &&
59297+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59298 cache->bcull_percent < cache->brun_percent &&
59299 cache->brun_percent < 100);
59300
59301diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59302index ce1b115..4a6852c 100644
59303--- a/fs/cachefiles/daemon.c
59304+++ b/fs/cachefiles/daemon.c
59305@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59306 if (n > buflen)
59307 return -EMSGSIZE;
59308
59309- if (copy_to_user(_buffer, buffer, n) != 0)
59310+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59311 return -EFAULT;
59312
59313 return n;
59314@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59315 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59316 return -EIO;
59317
59318- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59319+ if (datalen > PAGE_SIZE - 1)
59320 return -EOPNOTSUPP;
59321
59322 /* drag the command string into the kernel so we can parse it */
59323@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59324 if (args[0] != '%' || args[1] != '\0')
59325 return -EINVAL;
59326
59327- if (fstop < 0 || fstop >= cache->fcull_percent)
59328+ if (fstop >= cache->fcull_percent)
59329 return cachefiles_daemon_range_error(cache, args);
59330
59331 cache->fstop_percent = fstop;
59332@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59333 if (args[0] != '%' || args[1] != '\0')
59334 return -EINVAL;
59335
59336- if (bstop < 0 || bstop >= cache->bcull_percent)
59337+ if (bstop >= cache->bcull_percent)
59338 return cachefiles_daemon_range_error(cache, args);
59339
59340 cache->bstop_percent = bstop;
59341diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59342index 8c52472..c4e3a69 100644
59343--- a/fs/cachefiles/internal.h
59344+++ b/fs/cachefiles/internal.h
59345@@ -66,7 +66,7 @@ struct cachefiles_cache {
59346 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59347 struct rb_root active_nodes; /* active nodes (can't be culled) */
59348 rwlock_t active_lock; /* lock for active_nodes */
59349- atomic_t gravecounter; /* graveyard uniquifier */
59350+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59351 unsigned frun_percent; /* when to stop culling (% files) */
59352 unsigned fcull_percent; /* when to start culling (% files) */
59353 unsigned fstop_percent; /* when to stop allocating (% files) */
59354@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59355 * proc.c
59356 */
59357 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59358-extern atomic_t cachefiles_lookup_histogram[HZ];
59359-extern atomic_t cachefiles_mkdir_histogram[HZ];
59360-extern atomic_t cachefiles_create_histogram[HZ];
59361+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59362+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59363+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59364
59365 extern int __init cachefiles_proc_init(void);
59366 extern void cachefiles_proc_cleanup(void);
59367 static inline
59368-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59369+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59370 {
59371 unsigned long jif = jiffies - start_jif;
59372 if (jif >= HZ)
59373 jif = HZ - 1;
59374- atomic_inc(&histogram[jif]);
59375+ atomic_inc_unchecked(&histogram[jif]);
59376 }
59377
59378 #else
59379diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59380index 7f8e83f..8951aa4 100644
59381--- a/fs/cachefiles/namei.c
59382+++ b/fs/cachefiles/namei.c
59383@@ -309,7 +309,7 @@ try_again:
59384 /* first step is to make up a grave dentry in the graveyard */
59385 sprintf(nbuffer, "%08x%08x",
59386 (uint32_t) get_seconds(),
59387- (uint32_t) atomic_inc_return(&cache->gravecounter));
59388+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59389
59390 /* do the multiway lock magic */
59391 trap = lock_rename(cache->graveyard, dir);
59392diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59393index eccd339..4c1d995 100644
59394--- a/fs/cachefiles/proc.c
59395+++ b/fs/cachefiles/proc.c
59396@@ -14,9 +14,9 @@
59397 #include <linux/seq_file.h>
59398 #include "internal.h"
59399
59400-atomic_t cachefiles_lookup_histogram[HZ];
59401-atomic_t cachefiles_mkdir_histogram[HZ];
59402-atomic_t cachefiles_create_histogram[HZ];
59403+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59404+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59405+atomic_unchecked_t cachefiles_create_histogram[HZ];
59406
59407 /*
59408 * display the latency histogram
59409@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59410 return 0;
59411 default:
59412 index = (unsigned long) v - 3;
59413- x = atomic_read(&cachefiles_lookup_histogram[index]);
59414- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59415- z = atomic_read(&cachefiles_create_histogram[index]);
59416+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59417+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59418+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59419 if (x == 0 && y == 0 && z == 0)
59420 return 0;
59421
59422diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59423index c241603..56bae60 100644
59424--- a/fs/ceph/dir.c
59425+++ b/fs/ceph/dir.c
59426@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59427 struct dentry *dentry, *last;
59428 struct ceph_dentry_info *di;
59429 int err = 0;
59430+ char d_name[DNAME_INLINE_LEN];
59431+ const unsigned char *name;
59432
59433 /* claim ref on last dentry we returned */
59434 last = fi->dentry;
59435@@ -192,7 +194,12 @@ more:
59436
59437 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
59438 dentry, dentry, dentry->d_inode);
59439- if (!dir_emit(ctx, dentry->d_name.name,
59440+ name = dentry->d_name.name;
59441+ if (name == dentry->d_iname) {
59442+ memcpy(d_name, name, dentry->d_name.len);
59443+ name = d_name;
59444+ }
59445+ if (!dir_emit(ctx, name,
59446 dentry->d_name.len,
59447 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59448 dentry->d_inode->i_mode >> 12)) {
59449@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59450 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59451 struct ceph_mds_client *mdsc = fsc->mdsc;
59452 unsigned frag = fpos_frag(ctx->pos);
59453- int off = fpos_off(ctx->pos);
59454+ unsigned int off = fpos_off(ctx->pos);
59455 int err;
59456 u32 ftype;
59457 struct ceph_mds_reply_info_parsed *rinfo;
59458diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59459index 50f06cd..c7eba3e 100644
59460--- a/fs/ceph/super.c
59461+++ b/fs/ceph/super.c
59462@@ -896,7 +896,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59463 /*
59464 * construct our own bdi so we can control readahead, etc.
59465 */
59466-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59467+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59468
59469 static int ceph_register_bdi(struct super_block *sb,
59470 struct ceph_fs_client *fsc)
59471@@ -913,7 +913,7 @@ static int ceph_register_bdi(struct super_block *sb,
59472 default_backing_dev_info.ra_pages;
59473
59474 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59475- atomic_long_inc_return(&bdi_seq));
59476+ atomic_long_inc_return_unchecked(&bdi_seq));
59477 if (!err)
59478 sb->s_bdi = &fsc->backing_dev_info;
59479 return err;
59480diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59481index 7febcf2..62a5721 100644
59482--- a/fs/cifs/cifs_debug.c
59483+++ b/fs/cifs/cifs_debug.c
59484@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59485
59486 if (strtobool(&c, &bv) == 0) {
59487 #ifdef CONFIG_CIFS_STATS2
59488- atomic_set(&totBufAllocCount, 0);
59489- atomic_set(&totSmBufAllocCount, 0);
59490+ atomic_set_unchecked(&totBufAllocCount, 0);
59491+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59492 #endif /* CONFIG_CIFS_STATS2 */
59493 spin_lock(&cifs_tcp_ses_lock);
59494 list_for_each(tmp1, &cifs_tcp_ses_list) {
59495@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59496 tcon = list_entry(tmp3,
59497 struct cifs_tcon,
59498 tcon_list);
59499- atomic_set(&tcon->num_smbs_sent, 0);
59500+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59501 if (server->ops->clear_stats)
59502 server->ops->clear_stats(tcon);
59503 }
59504@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59505 smBufAllocCount.counter, cifs_min_small);
59506 #ifdef CONFIG_CIFS_STATS2
59507 seq_printf(m, "Total Large %d Small %d Allocations\n",
59508- atomic_read(&totBufAllocCount),
59509- atomic_read(&totSmBufAllocCount));
59510+ atomic_read_unchecked(&totBufAllocCount),
59511+ atomic_read_unchecked(&totSmBufAllocCount));
59512 #endif /* CONFIG_CIFS_STATS2 */
59513
59514 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59515@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59516 if (tcon->need_reconnect)
59517 seq_puts(m, "\tDISCONNECTED ");
59518 seq_printf(m, "\nSMBs: %d",
59519- atomic_read(&tcon->num_smbs_sent));
59520+ atomic_read_unchecked(&tcon->num_smbs_sent));
59521 if (server->ops->print_stats)
59522 server->ops->print_stats(m, tcon);
59523 }
59524diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59525index d72fe37..ded5511 100644
59526--- a/fs/cifs/cifsfs.c
59527+++ b/fs/cifs/cifsfs.c
59528@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
59529 */
59530 cifs_req_cachep = kmem_cache_create("cifs_request",
59531 CIFSMaxBufSize + max_hdr_size, 0,
59532- SLAB_HWCACHE_ALIGN, NULL);
59533+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
59534 if (cifs_req_cachep == NULL)
59535 return -ENOMEM;
59536
59537@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
59538 efficient to alloc 1 per page off the slab compared to 17K (5page)
59539 alloc of large cifs buffers even when page debugging is on */
59540 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
59541- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
59542+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
59543 NULL);
59544 if (cifs_sm_req_cachep == NULL) {
59545 mempool_destroy(cifs_req_poolp);
59546@@ -1204,8 +1204,8 @@ init_cifs(void)
59547 atomic_set(&bufAllocCount, 0);
59548 atomic_set(&smBufAllocCount, 0);
59549 #ifdef CONFIG_CIFS_STATS2
59550- atomic_set(&totBufAllocCount, 0);
59551- atomic_set(&totSmBufAllocCount, 0);
59552+ atomic_set_unchecked(&totBufAllocCount, 0);
59553+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59554 #endif /* CONFIG_CIFS_STATS2 */
59555
59556 atomic_set(&midCount, 0);
59557diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
59558index 22b289a..bbbba08 100644
59559--- a/fs/cifs/cifsglob.h
59560+++ b/fs/cifs/cifsglob.h
59561@@ -823,35 +823,35 @@ struct cifs_tcon {
59562 __u16 Flags; /* optional support bits */
59563 enum statusEnum tidStatus;
59564 #ifdef CONFIG_CIFS_STATS
59565- atomic_t num_smbs_sent;
59566+ atomic_unchecked_t num_smbs_sent;
59567 union {
59568 struct {
59569- atomic_t num_writes;
59570- atomic_t num_reads;
59571- atomic_t num_flushes;
59572- atomic_t num_oplock_brks;
59573- atomic_t num_opens;
59574- atomic_t num_closes;
59575- atomic_t num_deletes;
59576- atomic_t num_mkdirs;
59577- atomic_t num_posixopens;
59578- atomic_t num_posixmkdirs;
59579- atomic_t num_rmdirs;
59580- atomic_t num_renames;
59581- atomic_t num_t2renames;
59582- atomic_t num_ffirst;
59583- atomic_t num_fnext;
59584- atomic_t num_fclose;
59585- atomic_t num_hardlinks;
59586- atomic_t num_symlinks;
59587- atomic_t num_locks;
59588- atomic_t num_acl_get;
59589- atomic_t num_acl_set;
59590+ atomic_unchecked_t num_writes;
59591+ atomic_unchecked_t num_reads;
59592+ atomic_unchecked_t num_flushes;
59593+ atomic_unchecked_t num_oplock_brks;
59594+ atomic_unchecked_t num_opens;
59595+ atomic_unchecked_t num_closes;
59596+ atomic_unchecked_t num_deletes;
59597+ atomic_unchecked_t num_mkdirs;
59598+ atomic_unchecked_t num_posixopens;
59599+ atomic_unchecked_t num_posixmkdirs;
59600+ atomic_unchecked_t num_rmdirs;
59601+ atomic_unchecked_t num_renames;
59602+ atomic_unchecked_t num_t2renames;
59603+ atomic_unchecked_t num_ffirst;
59604+ atomic_unchecked_t num_fnext;
59605+ atomic_unchecked_t num_fclose;
59606+ atomic_unchecked_t num_hardlinks;
59607+ atomic_unchecked_t num_symlinks;
59608+ atomic_unchecked_t num_locks;
59609+ atomic_unchecked_t num_acl_get;
59610+ atomic_unchecked_t num_acl_set;
59611 } cifs_stats;
59612 #ifdef CONFIG_CIFS_SMB2
59613 struct {
59614- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59615- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59616+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59617+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59618 } smb2_stats;
59619 #endif /* CONFIG_CIFS_SMB2 */
59620 } stats;
59621@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
59622 }
59623
59624 #ifdef CONFIG_CIFS_STATS
59625-#define cifs_stats_inc atomic_inc
59626+#define cifs_stats_inc atomic_inc_unchecked
59627
59628 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
59629 unsigned int bytes)
59630@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
59631 /* Various Debug counters */
59632 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
59633 #ifdef CONFIG_CIFS_STATS2
59634-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
59635-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
59636+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
59637+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
59638 #endif
59639 GLOBAL_EXTERN atomic_t smBufAllocCount;
59640 GLOBAL_EXTERN atomic_t midCount;
59641diff --git a/fs/cifs/file.c b/fs/cifs/file.c
59642index 74f1287..7ef0237 100644
59643--- a/fs/cifs/file.c
59644+++ b/fs/cifs/file.c
59645@@ -2060,10 +2060,14 @@ static int cifs_writepages(struct address_space *mapping,
59646 index = mapping->writeback_index; /* Start from prev offset */
59647 end = -1;
59648 } else {
59649- index = wbc->range_start >> PAGE_CACHE_SHIFT;
59650- end = wbc->range_end >> PAGE_CACHE_SHIFT;
59651- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
59652+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
59653 range_whole = true;
59654+ index = 0;
59655+ end = ULONG_MAX;
59656+ } else {
59657+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
59658+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
59659+ }
59660 scanned = true;
59661 }
59662 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
59663diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
59664index 3379463..3af418a 100644
59665--- a/fs/cifs/misc.c
59666+++ b/fs/cifs/misc.c
59667@@ -170,7 +170,7 @@ cifs_buf_get(void)
59668 memset(ret_buf, 0, buf_size + 3);
59669 atomic_inc(&bufAllocCount);
59670 #ifdef CONFIG_CIFS_STATS2
59671- atomic_inc(&totBufAllocCount);
59672+ atomic_inc_unchecked(&totBufAllocCount);
59673 #endif /* CONFIG_CIFS_STATS2 */
59674 }
59675
59676@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
59677 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
59678 atomic_inc(&smBufAllocCount);
59679 #ifdef CONFIG_CIFS_STATS2
59680- atomic_inc(&totSmBufAllocCount);
59681+ atomic_inc_unchecked(&totSmBufAllocCount);
59682 #endif /* CONFIG_CIFS_STATS2 */
59683
59684 }
59685diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
59686index d297903..1cb7516 100644
59687--- a/fs/cifs/smb1ops.c
59688+++ b/fs/cifs/smb1ops.c
59689@@ -622,27 +622,27 @@ static void
59690 cifs_clear_stats(struct cifs_tcon *tcon)
59691 {
59692 #ifdef CONFIG_CIFS_STATS
59693- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
59694- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
59695- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
59696- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59697- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
59698- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
59699- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59700- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
59701- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
59702- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
59703- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
59704- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
59705- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
59706- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
59707- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
59708- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
59709- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
59710- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
59711- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
59712- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
59713- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
59714+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
59715+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
59716+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
59717+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59718+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
59719+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
59720+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59721+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
59722+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
59723+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
59724+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
59725+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
59726+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
59727+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
59728+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
59729+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
59730+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
59731+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
59732+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
59733+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
59734+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
59735 #endif
59736 }
59737
59738@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59739 {
59740 #ifdef CONFIG_CIFS_STATS
59741 seq_printf(m, " Oplocks breaks: %d",
59742- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
59743+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
59744 seq_printf(m, "\nReads: %d Bytes: %llu",
59745- atomic_read(&tcon->stats.cifs_stats.num_reads),
59746+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
59747 (long long)(tcon->bytes_read));
59748 seq_printf(m, "\nWrites: %d Bytes: %llu",
59749- atomic_read(&tcon->stats.cifs_stats.num_writes),
59750+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
59751 (long long)(tcon->bytes_written));
59752 seq_printf(m, "\nFlushes: %d",
59753- atomic_read(&tcon->stats.cifs_stats.num_flushes));
59754+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
59755 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
59756- atomic_read(&tcon->stats.cifs_stats.num_locks),
59757- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
59758- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
59759+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
59760+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
59761+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
59762 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
59763- atomic_read(&tcon->stats.cifs_stats.num_opens),
59764- atomic_read(&tcon->stats.cifs_stats.num_closes),
59765- atomic_read(&tcon->stats.cifs_stats.num_deletes));
59766+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
59767+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
59768+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
59769 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
59770- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
59771- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
59772+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
59773+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
59774 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
59775- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
59776- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
59777+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
59778+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
59779 seq_printf(m, "\nRenames: %d T2 Renames %d",
59780- atomic_read(&tcon->stats.cifs_stats.num_renames),
59781- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
59782+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
59783+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
59784 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
59785- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
59786- atomic_read(&tcon->stats.cifs_stats.num_fnext),
59787- atomic_read(&tcon->stats.cifs_stats.num_fclose));
59788+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
59789+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
59790+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
59791 #endif
59792 }
59793
59794diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
59795index 96b5d40..e5db0c1 100644
59796--- a/fs/cifs/smb2ops.c
59797+++ b/fs/cifs/smb2ops.c
59798@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
59799 #ifdef CONFIG_CIFS_STATS
59800 int i;
59801 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
59802- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59803- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59804+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59805+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59806 }
59807 #endif
59808 }
59809@@ -459,65 +459,65 @@ static void
59810 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59811 {
59812 #ifdef CONFIG_CIFS_STATS
59813- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59814- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59815+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59816+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59817 seq_printf(m, "\nNegotiates: %d sent %d failed",
59818- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
59819- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
59820+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
59821+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
59822 seq_printf(m, "\nSessionSetups: %d sent %d failed",
59823- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
59824- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
59825+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
59826+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
59827 seq_printf(m, "\nLogoffs: %d sent %d failed",
59828- atomic_read(&sent[SMB2_LOGOFF_HE]),
59829- atomic_read(&failed[SMB2_LOGOFF_HE]));
59830+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
59831+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
59832 seq_printf(m, "\nTreeConnects: %d sent %d failed",
59833- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
59834- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
59835+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
59836+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
59837 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
59838- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
59839- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
59840+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
59841+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
59842 seq_printf(m, "\nCreates: %d sent %d failed",
59843- atomic_read(&sent[SMB2_CREATE_HE]),
59844- atomic_read(&failed[SMB2_CREATE_HE]));
59845+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
59846+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
59847 seq_printf(m, "\nCloses: %d sent %d failed",
59848- atomic_read(&sent[SMB2_CLOSE_HE]),
59849- atomic_read(&failed[SMB2_CLOSE_HE]));
59850+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
59851+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
59852 seq_printf(m, "\nFlushes: %d sent %d failed",
59853- atomic_read(&sent[SMB2_FLUSH_HE]),
59854- atomic_read(&failed[SMB2_FLUSH_HE]));
59855+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
59856+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
59857 seq_printf(m, "\nReads: %d sent %d failed",
59858- atomic_read(&sent[SMB2_READ_HE]),
59859- atomic_read(&failed[SMB2_READ_HE]));
59860+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
59861+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
59862 seq_printf(m, "\nWrites: %d sent %d failed",
59863- atomic_read(&sent[SMB2_WRITE_HE]),
59864- atomic_read(&failed[SMB2_WRITE_HE]));
59865+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
59866+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
59867 seq_printf(m, "\nLocks: %d sent %d failed",
59868- atomic_read(&sent[SMB2_LOCK_HE]),
59869- atomic_read(&failed[SMB2_LOCK_HE]));
59870+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
59871+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
59872 seq_printf(m, "\nIOCTLs: %d sent %d failed",
59873- atomic_read(&sent[SMB2_IOCTL_HE]),
59874- atomic_read(&failed[SMB2_IOCTL_HE]));
59875+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
59876+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
59877 seq_printf(m, "\nCancels: %d sent %d failed",
59878- atomic_read(&sent[SMB2_CANCEL_HE]),
59879- atomic_read(&failed[SMB2_CANCEL_HE]));
59880+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
59881+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
59882 seq_printf(m, "\nEchos: %d sent %d failed",
59883- atomic_read(&sent[SMB2_ECHO_HE]),
59884- atomic_read(&failed[SMB2_ECHO_HE]));
59885+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
59886+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
59887 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
59888- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
59889- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
59890+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
59891+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
59892 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
59893- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
59894- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
59895+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
59896+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
59897 seq_printf(m, "\nQueryInfos: %d sent %d failed",
59898- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
59899- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
59900+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
59901+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
59902 seq_printf(m, "\nSetInfos: %d sent %d failed",
59903- atomic_read(&sent[SMB2_SET_INFO_HE]),
59904- atomic_read(&failed[SMB2_SET_INFO_HE]));
59905+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
59906+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
59907 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
59908- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
59909- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
59910+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
59911+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
59912 #endif
59913 }
59914
59915diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
59916index 3417340..b942390 100644
59917--- a/fs/cifs/smb2pdu.c
59918+++ b/fs/cifs/smb2pdu.c
59919@@ -2144,8 +2144,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
59920 default:
59921 cifs_dbg(VFS, "info level %u isn't supported\n",
59922 srch_inf->info_level);
59923- rc = -EINVAL;
59924- goto qdir_exit;
59925+ return -EINVAL;
59926 }
59927
59928 req->FileIndex = cpu_to_le32(index);
59929diff --git a/fs/coda/cache.c b/fs/coda/cache.c
59930index 46ee6f2..89a9e7f 100644
59931--- a/fs/coda/cache.c
59932+++ b/fs/coda/cache.c
59933@@ -24,7 +24,7 @@
59934 #include "coda_linux.h"
59935 #include "coda_cache.h"
59936
59937-static atomic_t permission_epoch = ATOMIC_INIT(0);
59938+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
59939
59940 /* replace or extend an acl cache hit */
59941 void coda_cache_enter(struct inode *inode, int mask)
59942@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
59943 struct coda_inode_info *cii = ITOC(inode);
59944
59945 spin_lock(&cii->c_lock);
59946- cii->c_cached_epoch = atomic_read(&permission_epoch);
59947+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
59948 if (!uid_eq(cii->c_uid, current_fsuid())) {
59949 cii->c_uid = current_fsuid();
59950 cii->c_cached_perm = mask;
59951@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
59952 {
59953 struct coda_inode_info *cii = ITOC(inode);
59954 spin_lock(&cii->c_lock);
59955- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
59956+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
59957 spin_unlock(&cii->c_lock);
59958 }
59959
59960 /* remove all acl caches */
59961 void coda_cache_clear_all(struct super_block *sb)
59962 {
59963- atomic_inc(&permission_epoch);
59964+ atomic_inc_unchecked(&permission_epoch);
59965 }
59966
59967
59968@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
59969 spin_lock(&cii->c_lock);
59970 hit = (mask & cii->c_cached_perm) == mask &&
59971 uid_eq(cii->c_uid, current_fsuid()) &&
59972- cii->c_cached_epoch == atomic_read(&permission_epoch);
59973+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
59974 spin_unlock(&cii->c_lock);
59975
59976 return hit;
59977diff --git a/fs/compat.c b/fs/compat.c
59978index 6fd272d..dd34ba2 100644
59979--- a/fs/compat.c
59980+++ b/fs/compat.c
59981@@ -54,7 +54,7 @@
59982 #include <asm/ioctls.h>
59983 #include "internal.h"
59984
59985-int compat_log = 1;
59986+int compat_log = 0;
59987
59988 int compat_printk(const char *fmt, ...)
59989 {
59990@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
59991
59992 set_fs(KERNEL_DS);
59993 /* The __user pointer cast is valid because of the set_fs() */
59994- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
59995+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
59996 set_fs(oldfs);
59997 /* truncating is ok because it's a user address */
59998 if (!ret)
59999@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
60000 goto out;
60001
60002 ret = -EINVAL;
60003- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60004+ if (nr_segs > UIO_MAXIOV)
60005 goto out;
60006 if (nr_segs > fast_segs) {
60007 ret = -ENOMEM;
60008@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
60009 struct compat_readdir_callback {
60010 struct dir_context ctx;
60011 struct compat_old_linux_dirent __user *dirent;
60012+ struct file * file;
60013 int result;
60014 };
60015
60016@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
60017 buf->result = -EOVERFLOW;
60018 return -EOVERFLOW;
60019 }
60020+
60021+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60022+ return 0;
60023+
60024 buf->result++;
60025 dirent = buf->dirent;
60026 if (!access_ok(VERIFY_WRITE, dirent,
60027@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60028 if (!f.file)
60029 return -EBADF;
60030
60031+ buf.file = f.file;
60032 error = iterate_dir(f.file, &buf.ctx);
60033 if (buf.result)
60034 error = buf.result;
60035@@ -913,6 +919,7 @@ struct compat_getdents_callback {
60036 struct dir_context ctx;
60037 struct compat_linux_dirent __user *current_dir;
60038 struct compat_linux_dirent __user *previous;
60039+ struct file * file;
60040 int count;
60041 int error;
60042 };
60043@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
60044 buf->error = -EOVERFLOW;
60045 return -EOVERFLOW;
60046 }
60047+
60048+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60049+ return 0;
60050+
60051 dirent = buf->previous;
60052 if (dirent) {
60053 if (__put_user(offset, &dirent->d_off))
60054@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60055 if (!f.file)
60056 return -EBADF;
60057
60058+ buf.file = f.file;
60059 error = iterate_dir(f.file, &buf.ctx);
60060 if (error >= 0)
60061 error = buf.error;
60062@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
60063 struct dir_context ctx;
60064 struct linux_dirent64 __user *current_dir;
60065 struct linux_dirent64 __user *previous;
60066+ struct file * file;
60067 int count;
60068 int error;
60069 };
60070@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
60071 buf->error = -EINVAL; /* only used if we fail.. */
60072 if (reclen > buf->count)
60073 return -EINVAL;
60074+
60075+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60076+ return 0;
60077+
60078 dirent = buf->previous;
60079
60080 if (dirent) {
60081@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60082 if (!f.file)
60083 return -EBADF;
60084
60085+ buf.file = f.file;
60086 error = iterate_dir(f.file, &buf.ctx);
60087 if (error >= 0)
60088 error = buf.error;
60089diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60090index 4d24d17..4f8c09e 100644
60091--- a/fs/compat_binfmt_elf.c
60092+++ b/fs/compat_binfmt_elf.c
60093@@ -30,11 +30,13 @@
60094 #undef elf_phdr
60095 #undef elf_shdr
60096 #undef elf_note
60097+#undef elf_dyn
60098 #undef elf_addr_t
60099 #define elfhdr elf32_hdr
60100 #define elf_phdr elf32_phdr
60101 #define elf_shdr elf32_shdr
60102 #define elf_note elf32_note
60103+#define elf_dyn Elf32_Dyn
60104 #define elf_addr_t Elf32_Addr
60105
60106 /*
60107diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60108index afec645..9c65620 100644
60109--- a/fs/compat_ioctl.c
60110+++ b/fs/compat_ioctl.c
60111@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60112 return -EFAULT;
60113 if (__get_user(udata, &ss32->iomem_base))
60114 return -EFAULT;
60115- ss.iomem_base = compat_ptr(udata);
60116+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60117 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60118 __get_user(ss.port_high, &ss32->port_high))
60119 return -EFAULT;
60120@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60121 for (i = 0; i < nmsgs; i++) {
60122 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60123 return -EFAULT;
60124- if (get_user(datap, &umsgs[i].buf) ||
60125- put_user(compat_ptr(datap), &tmsgs[i].buf))
60126+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60127+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60128 return -EFAULT;
60129 }
60130 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60131@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60132 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60133 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60134 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60135- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60136+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60137 return -EFAULT;
60138
60139 return ioctl_preallocate(file, p);
60140@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60141 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60142 {
60143 unsigned int a, b;
60144- a = *(unsigned int *)p;
60145- b = *(unsigned int *)q;
60146+ a = *(const unsigned int *)p;
60147+ b = *(const unsigned int *)q;
60148 if (a > b)
60149 return 1;
60150 if (a < b)
60151diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60152index c9c298b..544d100 100644
60153--- a/fs/configfs/dir.c
60154+++ b/fs/configfs/dir.c
60155@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60156 }
60157 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60158 struct configfs_dirent *next;
60159- const char *name;
60160+ const unsigned char * name;
60161+ char d_name[sizeof(next->s_dentry->d_iname)];
60162 int len;
60163 struct inode *inode = NULL;
60164
60165@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60166 continue;
60167
60168 name = configfs_get_name(next);
60169- len = strlen(name);
60170+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60171+ len = next->s_dentry->d_name.len;
60172+ memcpy(d_name, name, len);
60173+ name = d_name;
60174+ } else
60175+ len = strlen(name);
60176
60177 /*
60178 * We'll have a dentry and an inode for
60179diff --git a/fs/coredump.c b/fs/coredump.c
60180index b5c86ff..0dac262 100644
60181--- a/fs/coredump.c
60182+++ b/fs/coredump.c
60183@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
60184 struct pipe_inode_info *pipe = file->private_data;
60185
60186 pipe_lock(pipe);
60187- pipe->readers++;
60188- pipe->writers--;
60189+ atomic_inc(&pipe->readers);
60190+ atomic_dec(&pipe->writers);
60191 wake_up_interruptible_sync(&pipe->wait);
60192 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60193 pipe_unlock(pipe);
60194@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
60195 * We actually want wait_event_freezable() but then we need
60196 * to clear TIF_SIGPENDING and improve dump_interrupted().
60197 */
60198- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60199+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60200
60201 pipe_lock(pipe);
60202- pipe->readers--;
60203- pipe->writers++;
60204+ atomic_dec(&pipe->readers);
60205+ atomic_inc(&pipe->writers);
60206 pipe_unlock(pipe);
60207 }
60208
60209@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
60210 struct files_struct *displaced;
60211 bool need_nonrelative = false;
60212 bool core_dumped = false;
60213- static atomic_t core_dump_count = ATOMIC_INIT(0);
60214+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60215+ long signr = siginfo->si_signo;
60216+ int dumpable;
60217 struct coredump_params cprm = {
60218 .siginfo = siginfo,
60219 .regs = signal_pt_regs(),
60220@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
60221 .mm_flags = mm->flags,
60222 };
60223
60224- audit_core_dumps(siginfo->si_signo);
60225+ audit_core_dumps(signr);
60226+
60227+ dumpable = __get_dumpable(cprm.mm_flags);
60228+
60229+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60230+ gr_handle_brute_attach(dumpable);
60231
60232 binfmt = mm->binfmt;
60233 if (!binfmt || !binfmt->core_dump)
60234 goto fail;
60235- if (!__get_dumpable(cprm.mm_flags))
60236+ if (!dumpable)
60237 goto fail;
60238
60239 cred = prepare_creds();
60240@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
60241 need_nonrelative = true;
60242 }
60243
60244- retval = coredump_wait(siginfo->si_signo, &core_state);
60245+ retval = coredump_wait(signr, &core_state);
60246 if (retval < 0)
60247 goto fail_creds;
60248
60249@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
60250 }
60251 cprm.limit = RLIM_INFINITY;
60252
60253- dump_count = atomic_inc_return(&core_dump_count);
60254+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60255 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60256 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60257 task_tgid_vnr(current), current->comm);
60258@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
60259 } else {
60260 struct inode *inode;
60261
60262+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60263+
60264 if (cprm.limit < binfmt->min_coredump)
60265 goto fail_unlock;
60266
60267@@ -681,7 +690,7 @@ close_fail:
60268 filp_close(cprm.file, NULL);
60269 fail_dropcount:
60270 if (ispipe)
60271- atomic_dec(&core_dump_count);
60272+ atomic_dec_unchecked(&core_dump_count);
60273 fail_unlock:
60274 kfree(cn.corename);
60275 coredump_finish(mm, core_dumped);
60276@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60277 struct file *file = cprm->file;
60278 loff_t pos = file->f_pos;
60279 ssize_t n;
60280+
60281+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60282 if (cprm->written + nr > cprm->limit)
60283 return 0;
60284 while (nr) {
60285diff --git a/fs/dcache.c b/fs/dcache.c
60286index e368d4f..b40ba59 100644
60287--- a/fs/dcache.c
60288+++ b/fs/dcache.c
60289@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
60290 * dentry_iput drops the locks, at which point nobody (except
60291 * transient RCU lookups) can reach this dentry.
60292 */
60293- BUG_ON((int)dentry->d_lockref.count > 0);
60294+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
60295 this_cpu_dec(nr_dentry);
60296 if (dentry->d_op && dentry->d_op->d_release)
60297 dentry->d_op->d_release(dentry);
60298@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60299 struct dentry *parent = dentry->d_parent;
60300 if (IS_ROOT(dentry))
60301 return NULL;
60302- if (unlikely((int)dentry->d_lockref.count < 0))
60303+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
60304 return NULL;
60305 if (likely(spin_trylock(&parent->d_lock)))
60306 return parent;
60307@@ -638,7 +638,7 @@ repeat:
60308 dentry->d_flags |= DCACHE_REFERENCED;
60309 dentry_lru_add(dentry);
60310
60311- dentry->d_lockref.count--;
60312+ __lockref_dec(&dentry->d_lockref);
60313 spin_unlock(&dentry->d_lock);
60314 return;
60315
60316@@ -653,7 +653,7 @@ EXPORT_SYMBOL(dput);
60317 /* This must be called with d_lock held */
60318 static inline void __dget_dlock(struct dentry *dentry)
60319 {
60320- dentry->d_lockref.count++;
60321+ __lockref_inc(&dentry->d_lockref);
60322 }
60323
60324 static inline void __dget(struct dentry *dentry)
60325@@ -694,8 +694,8 @@ repeat:
60326 goto repeat;
60327 }
60328 rcu_read_unlock();
60329- BUG_ON(!ret->d_lockref.count);
60330- ret->d_lockref.count++;
60331+ BUG_ON(!__lockref_read(&ret->d_lockref));
60332+ __lockref_inc(&ret->d_lockref);
60333 spin_unlock(&ret->d_lock);
60334 return ret;
60335 }
60336@@ -773,9 +773,9 @@ restart:
60337 spin_lock(&inode->i_lock);
60338 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
60339 spin_lock(&dentry->d_lock);
60340- if (!dentry->d_lockref.count) {
60341+ if (!__lockref_read(&dentry->d_lockref)) {
60342 struct dentry *parent = lock_parent(dentry);
60343- if (likely(!dentry->d_lockref.count)) {
60344+ if (likely(!__lockref_read(&dentry->d_lockref))) {
60345 __dentry_kill(dentry);
60346 dput(parent);
60347 goto restart;
60348@@ -810,7 +810,7 @@ static void shrink_dentry_list(struct list_head *list)
60349 * We found an inuse dentry which was not removed from
60350 * the LRU because of laziness during lookup. Do not free it.
60351 */
60352- if ((int)dentry->d_lockref.count > 0) {
60353+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
60354 spin_unlock(&dentry->d_lock);
60355 if (parent)
60356 spin_unlock(&parent->d_lock);
60357@@ -848,8 +848,8 @@ static void shrink_dentry_list(struct list_head *list)
60358 dentry = parent;
60359 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60360 parent = lock_parent(dentry);
60361- if (dentry->d_lockref.count != 1) {
60362- dentry->d_lockref.count--;
60363+ if (__lockref_read(&dentry->d_lockref) != 1) {
60364+ __lockref_inc(&dentry->d_lockref);
60365 spin_unlock(&dentry->d_lock);
60366 if (parent)
60367 spin_unlock(&parent->d_lock);
60368@@ -889,7 +889,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
60369 * counts, just remove them from the LRU. Otherwise give them
60370 * another pass through the LRU.
60371 */
60372- if (dentry->d_lockref.count) {
60373+ if (__lockref_read(&dentry->d_lockref) > 0) {
60374 d_lru_isolate(dentry);
60375 spin_unlock(&dentry->d_lock);
60376 return LRU_REMOVED;
60377@@ -1225,7 +1225,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60378 } else {
60379 if (dentry->d_flags & DCACHE_LRU_LIST)
60380 d_lru_del(dentry);
60381- if (!dentry->d_lockref.count) {
60382+ if (!__lockref_read(&dentry->d_lockref)) {
60383 d_shrink_add(dentry, &data->dispose);
60384 data->found++;
60385 }
60386@@ -1273,7 +1273,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60387 return D_WALK_CONTINUE;
60388
60389 /* root with refcount 1 is fine */
60390- if (dentry == _data && dentry->d_lockref.count == 1)
60391+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60392 return D_WALK_CONTINUE;
60393
60394 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60395@@ -1282,7 +1282,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60396 dentry->d_inode ?
60397 dentry->d_inode->i_ino : 0UL,
60398 dentry,
60399- dentry->d_lockref.count,
60400+ __lockref_read(&dentry->d_lockref),
60401 dentry->d_sb->s_type->name,
60402 dentry->d_sb->s_id);
60403 WARN_ON(1);
60404@@ -1423,7 +1423,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60405 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60406 if (name->len > DNAME_INLINE_LEN-1) {
60407 size_t size = offsetof(struct external_name, name[1]);
60408- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
60409+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
60410 if (!p) {
60411 kmem_cache_free(dentry_cache, dentry);
60412 return NULL;
60413@@ -1443,7 +1443,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60414 smp_wmb();
60415 dentry->d_name.name = dname;
60416
60417- dentry->d_lockref.count = 1;
60418+ __lockref_set(&dentry->d_lockref, 1);
60419 dentry->d_flags = 0;
60420 spin_lock_init(&dentry->d_lock);
60421 seqcount_init(&dentry->d_seq);
60422@@ -1452,6 +1452,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60423 dentry->d_sb = sb;
60424 dentry->d_op = NULL;
60425 dentry->d_fsdata = NULL;
60426+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
60427+ atomic_set(&dentry->chroot_refcnt, 0);
60428+#endif
60429 INIT_HLIST_BL_NODE(&dentry->d_hash);
60430 INIT_LIST_HEAD(&dentry->d_lru);
60431 INIT_LIST_HEAD(&dentry->d_subdirs);
60432@@ -2151,7 +2154,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60433 goto next;
60434 }
60435
60436- dentry->d_lockref.count++;
60437+ __lockref_inc(&dentry->d_lockref);
60438 found = dentry;
60439 spin_unlock(&dentry->d_lock);
60440 break;
60441@@ -2250,7 +2253,7 @@ again:
60442 spin_lock(&dentry->d_lock);
60443 inode = dentry->d_inode;
60444 isdir = S_ISDIR(inode->i_mode);
60445- if (dentry->d_lockref.count == 1) {
60446+ if (__lockref_read(&dentry->d_lockref) == 1) {
60447 if (!spin_trylock(&inode->i_lock)) {
60448 spin_unlock(&dentry->d_lock);
60449 cpu_relax();
60450@@ -3203,7 +3206,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60451
60452 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60453 dentry->d_flags |= DCACHE_GENOCIDE;
60454- dentry->d_lockref.count--;
60455+ __lockref_dec(&dentry->d_lockref);
60456 }
60457 }
60458 return D_WALK_CONTINUE;
60459@@ -3319,7 +3322,8 @@ void __init vfs_caches_init(unsigned long mempages)
60460 mempages -= reserve;
60461
60462 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60463- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60464+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60465+ SLAB_NO_SANITIZE, NULL);
60466
60467 dcache_init();
60468 inode_init();
60469diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60470index 6f0ce53..780f4f8 100644
60471--- a/fs/debugfs/inode.c
60472+++ b/fs/debugfs/inode.c
60473@@ -425,7 +425,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
60474 */
60475 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60476 {
60477+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60478+ return __create_file(name, S_IFDIR | S_IRWXU,
60479+#else
60480 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
60481+#endif
60482 parent, NULL, NULL);
60483 }
60484 EXPORT_SYMBOL_GPL(debugfs_create_dir);
60485diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60486index 1686dc2..9611c50 100644
60487--- a/fs/ecryptfs/inode.c
60488+++ b/fs/ecryptfs/inode.c
60489@@ -664,7 +664,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60490 old_fs = get_fs();
60491 set_fs(get_ds());
60492 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60493- (char __user *)lower_buf,
60494+ (char __force_user *)lower_buf,
60495 PATH_MAX);
60496 set_fs(old_fs);
60497 if (rc < 0)
60498diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60499index e4141f2..d8263e8 100644
60500--- a/fs/ecryptfs/miscdev.c
60501+++ b/fs/ecryptfs/miscdev.c
60502@@ -304,7 +304,7 @@ check_list:
60503 goto out_unlock_msg_ctx;
60504 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60505 if (msg_ctx->msg) {
60506- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60507+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60508 goto out_unlock_msg_ctx;
60509 i += packet_length_size;
60510 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60511diff --git a/fs/exec.c b/fs/exec.c
60512index ad8798e..5f872c9 100644
60513--- a/fs/exec.c
60514+++ b/fs/exec.c
60515@@ -56,8 +56,20 @@
60516 #include <linux/pipe_fs_i.h>
60517 #include <linux/oom.h>
60518 #include <linux/compat.h>
60519+#include <linux/random.h>
60520+#include <linux/seq_file.h>
60521+#include <linux/coredump.h>
60522+#include <linux/mman.h>
60523+
60524+#ifdef CONFIG_PAX_REFCOUNT
60525+#include <linux/kallsyms.h>
60526+#include <linux/kdebug.h>
60527+#endif
60528+
60529+#include <trace/events/fs.h>
60530
60531 #include <asm/uaccess.h>
60532+#include <asm/sections.h>
60533 #include <asm/mmu_context.h>
60534 #include <asm/tlb.h>
60535
60536@@ -66,19 +78,34 @@
60537
60538 #include <trace/events/sched.h>
60539
60540+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60541+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
60542+{
60543+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
60544+}
60545+#endif
60546+
60547+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
60548+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60549+EXPORT_SYMBOL(pax_set_initial_flags_func);
60550+#endif
60551+
60552 int suid_dumpable = 0;
60553
60554 static LIST_HEAD(formats);
60555 static DEFINE_RWLOCK(binfmt_lock);
60556
60557+extern int gr_process_kernel_exec_ban(void);
60558+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
60559+
60560 void __register_binfmt(struct linux_binfmt * fmt, int insert)
60561 {
60562 BUG_ON(!fmt);
60563 if (WARN_ON(!fmt->load_binary))
60564 return;
60565 write_lock(&binfmt_lock);
60566- insert ? list_add(&fmt->lh, &formats) :
60567- list_add_tail(&fmt->lh, &formats);
60568+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
60569+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
60570 write_unlock(&binfmt_lock);
60571 }
60572
60573@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
60574 void unregister_binfmt(struct linux_binfmt * fmt)
60575 {
60576 write_lock(&binfmt_lock);
60577- list_del(&fmt->lh);
60578+ pax_list_del((struct list_head *)&fmt->lh);
60579 write_unlock(&binfmt_lock);
60580 }
60581
60582@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60583 int write)
60584 {
60585 struct page *page;
60586- int ret;
60587
60588-#ifdef CONFIG_STACK_GROWSUP
60589- if (write) {
60590- ret = expand_downwards(bprm->vma, pos);
60591- if (ret < 0)
60592- return NULL;
60593- }
60594-#endif
60595- ret = get_user_pages(current, bprm->mm, pos,
60596- 1, write, 1, &page, NULL);
60597- if (ret <= 0)
60598+ if (0 > expand_downwards(bprm->vma, pos))
60599+ return NULL;
60600+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
60601 return NULL;
60602
60603 if (write) {
60604@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60605 if (size <= ARG_MAX)
60606 return page;
60607
60608+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60609+ // only allow 512KB for argv+env on suid/sgid binaries
60610+ // to prevent easy ASLR exhaustion
60611+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
60612+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
60613+ (size > (512 * 1024))) {
60614+ put_page(page);
60615+ return NULL;
60616+ }
60617+#endif
60618+
60619 /*
60620 * Limit to 1/4-th the stack size for the argv+env strings.
60621 * This ensures that:
60622@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60623 vma->vm_end = STACK_TOP_MAX;
60624 vma->vm_start = vma->vm_end - PAGE_SIZE;
60625 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
60626+
60627+#ifdef CONFIG_PAX_SEGMEXEC
60628+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
60629+#endif
60630+
60631 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
60632 INIT_LIST_HEAD(&vma->anon_vma_chain);
60633
60634@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60635 arch_bprm_mm_init(mm, vma);
60636 up_write(&mm->mmap_sem);
60637 bprm->p = vma->vm_end - sizeof(void *);
60638+
60639+#ifdef CONFIG_PAX_RANDUSTACK
60640+ if (randomize_va_space)
60641+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
60642+#endif
60643+
60644 return 0;
60645 err:
60646 up_write(&mm->mmap_sem);
60647@@ -396,7 +437,7 @@ struct user_arg_ptr {
60648 } ptr;
60649 };
60650
60651-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60652+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60653 {
60654 const char __user *native;
60655
60656@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60657 compat_uptr_t compat;
60658
60659 if (get_user(compat, argv.ptr.compat + nr))
60660- return ERR_PTR(-EFAULT);
60661+ return (const char __force_user *)ERR_PTR(-EFAULT);
60662
60663 return compat_ptr(compat);
60664 }
60665 #endif
60666
60667 if (get_user(native, argv.ptr.native + nr))
60668- return ERR_PTR(-EFAULT);
60669+ return (const char __force_user *)ERR_PTR(-EFAULT);
60670
60671 return native;
60672 }
60673@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
60674 if (!p)
60675 break;
60676
60677- if (IS_ERR(p))
60678+ if (IS_ERR((const char __force_kernel *)p))
60679 return -EFAULT;
60680
60681 if (i >= max)
60682@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
60683
60684 ret = -EFAULT;
60685 str = get_user_arg_ptr(argv, argc);
60686- if (IS_ERR(str))
60687+ if (IS_ERR((const char __force_kernel *)str))
60688 goto out;
60689
60690 len = strnlen_user(str, MAX_ARG_STRLEN);
60691@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
60692 int r;
60693 mm_segment_t oldfs = get_fs();
60694 struct user_arg_ptr argv = {
60695- .ptr.native = (const char __user *const __user *)__argv,
60696+ .ptr.native = (const char __user * const __force_user *)__argv,
60697 };
60698
60699 set_fs(KERNEL_DS);
60700@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60701 unsigned long new_end = old_end - shift;
60702 struct mmu_gather tlb;
60703
60704- BUG_ON(new_start > new_end);
60705+ if (new_start >= new_end || new_start < mmap_min_addr)
60706+ return -ENOMEM;
60707
60708 /*
60709 * ensure there are no vmas between where we want to go
60710@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60711 if (vma != find_vma(mm, new_start))
60712 return -EFAULT;
60713
60714+#ifdef CONFIG_PAX_SEGMEXEC
60715+ BUG_ON(pax_find_mirror_vma(vma));
60716+#endif
60717+
60718 /*
60719 * cover the whole range: [new_start, old_end)
60720 */
60721@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60722 stack_top = arch_align_stack(stack_top);
60723 stack_top = PAGE_ALIGN(stack_top);
60724
60725- if (unlikely(stack_top < mmap_min_addr) ||
60726- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
60727- return -ENOMEM;
60728-
60729 stack_shift = vma->vm_end - stack_top;
60730
60731 bprm->p -= stack_shift;
60732@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
60733 bprm->exec -= stack_shift;
60734
60735 down_write(&mm->mmap_sem);
60736+
60737+ /* Move stack pages down in memory. */
60738+ if (stack_shift) {
60739+ ret = shift_arg_pages(vma, stack_shift);
60740+ if (ret)
60741+ goto out_unlock;
60742+ }
60743+
60744 vm_flags = VM_STACK_FLAGS;
60745
60746+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60747+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60748+ vm_flags &= ~VM_EXEC;
60749+
60750+#ifdef CONFIG_PAX_MPROTECT
60751+ if (mm->pax_flags & MF_PAX_MPROTECT)
60752+ vm_flags &= ~VM_MAYEXEC;
60753+#endif
60754+
60755+ }
60756+#endif
60757+
60758 /*
60759 * Adjust stack execute permissions; explicitly enable for
60760 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
60761@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60762 goto out_unlock;
60763 BUG_ON(prev != vma);
60764
60765- /* Move stack pages down in memory. */
60766- if (stack_shift) {
60767- ret = shift_arg_pages(vma, stack_shift);
60768- if (ret)
60769- goto out_unlock;
60770- }
60771-
60772 /* mprotect_fixup is overkill to remove the temporary stack flags */
60773 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
60774
60775@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
60776 #endif
60777 current->mm->start_stack = bprm->p;
60778 ret = expand_stack(vma, stack_base);
60779+
60780+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
60781+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
60782+ unsigned long size;
60783+ vm_flags_t vm_flags;
60784+
60785+ size = STACK_TOP - vma->vm_end;
60786+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
60787+
60788+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
60789+
60790+#ifdef CONFIG_X86
60791+ if (!ret) {
60792+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
60793+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
60794+ }
60795+#endif
60796+
60797+ }
60798+#endif
60799+
60800 if (ret)
60801 ret = -EFAULT;
60802
60803@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
60804 if (err)
60805 goto exit;
60806
60807- if (name->name[0] != '\0')
60808+ if (name->name[0] != '\0') {
60809 fsnotify_open(file);
60810+ trace_open_exec(name->name);
60811+ }
60812
60813 out:
60814 return file;
60815@@ -809,7 +887,7 @@ int kernel_read(struct file *file, loff_t offset,
60816 old_fs = get_fs();
60817 set_fs(get_ds());
60818 /* The cast to a user pointer is valid due to the set_fs() */
60819- result = vfs_read(file, (void __user *)addr, count, &pos);
60820+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
60821 set_fs(old_fs);
60822 return result;
60823 }
60824@@ -854,6 +932,7 @@ static int exec_mmap(struct mm_struct *mm)
60825 tsk->mm = mm;
60826 tsk->active_mm = mm;
60827 activate_mm(active_mm, mm);
60828+ populate_stack();
60829 tsk->mm->vmacache_seqnum = 0;
60830 vmacache_flush(tsk);
60831 task_unlock(tsk);
60832@@ -1252,7 +1331,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
60833 }
60834 rcu_read_unlock();
60835
60836- if (p->fs->users > n_fs)
60837+ if (atomic_read(&p->fs->users) > n_fs)
60838 bprm->unsafe |= LSM_UNSAFE_SHARE;
60839 else
60840 p->fs->in_exec = 1;
60841@@ -1433,6 +1512,31 @@ static int exec_binprm(struct linux_binprm *bprm)
60842 return ret;
60843 }
60844
60845+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60846+static DEFINE_PER_CPU(u64, exec_counter);
60847+static int __init init_exec_counters(void)
60848+{
60849+ unsigned int cpu;
60850+
60851+ for_each_possible_cpu(cpu) {
60852+ per_cpu(exec_counter, cpu) = (u64)cpu;
60853+ }
60854+
60855+ return 0;
60856+}
60857+early_initcall(init_exec_counters);
60858+static inline void increment_exec_counter(void)
60859+{
60860+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
60861+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
60862+}
60863+#else
60864+static inline void increment_exec_counter(void) {}
60865+#endif
60866+
60867+extern void gr_handle_exec_args(struct linux_binprm *bprm,
60868+ struct user_arg_ptr argv);
60869+
60870 /*
60871 * sys_execve() executes a new program.
60872 */
60873@@ -1441,6 +1545,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60874 struct user_arg_ptr envp,
60875 int flags)
60876 {
60877+#ifdef CONFIG_GRKERNSEC
60878+ struct file *old_exec_file;
60879+ struct acl_subject_label *old_acl;
60880+ struct rlimit old_rlim[RLIM_NLIMITS];
60881+#endif
60882 char *pathbuf = NULL;
60883 struct linux_binprm *bprm;
60884 struct file *file;
60885@@ -1450,6 +1559,8 @@ static int do_execveat_common(int fd, struct filename *filename,
60886 if (IS_ERR(filename))
60887 return PTR_ERR(filename);
60888
60889+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
60890+
60891 /*
60892 * We move the actual failure in case of RLIMIT_NPROC excess from
60893 * set*uid() to execve() because too many poorly written programs
60894@@ -1487,6 +1598,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60895 if (IS_ERR(file))
60896 goto out_unmark;
60897
60898+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
60899+ retval = -EPERM;
60900+ goto out_unmark;
60901+ }
60902+
60903 sched_exec();
60904
60905 bprm->file = file;
60906@@ -1513,6 +1629,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60907 }
60908 bprm->interp = bprm->filename;
60909
60910+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
60911+ retval = -EACCES;
60912+ goto out_unmark;
60913+ }
60914+
60915 retval = bprm_mm_init(bprm);
60916 if (retval)
60917 goto out_unmark;
60918@@ -1529,24 +1650,70 @@ static int do_execveat_common(int fd, struct filename *filename,
60919 if (retval < 0)
60920 goto out;
60921
60922+#ifdef CONFIG_GRKERNSEC
60923+ old_acl = current->acl;
60924+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
60925+ old_exec_file = current->exec_file;
60926+ get_file(file);
60927+ current->exec_file = file;
60928+#endif
60929+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60930+ /* limit suid stack to 8MB
60931+ * we saved the old limits above and will restore them if this exec fails
60932+ */
60933+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
60934+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
60935+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
60936+#endif
60937+
60938+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
60939+ retval = -EPERM;
60940+ goto out_fail;
60941+ }
60942+
60943+ if (!gr_tpe_allow(file)) {
60944+ retval = -EACCES;
60945+ goto out_fail;
60946+ }
60947+
60948+ if (gr_check_crash_exec(file)) {
60949+ retval = -EACCES;
60950+ goto out_fail;
60951+ }
60952+
60953+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
60954+ bprm->unsafe);
60955+ if (retval < 0)
60956+ goto out_fail;
60957+
60958 retval = copy_strings_kernel(1, &bprm->filename, bprm);
60959 if (retval < 0)
60960- goto out;
60961+ goto out_fail;
60962
60963 bprm->exec = bprm->p;
60964 retval = copy_strings(bprm->envc, envp, bprm);
60965 if (retval < 0)
60966- goto out;
60967+ goto out_fail;
60968
60969 retval = copy_strings(bprm->argc, argv, bprm);
60970 if (retval < 0)
60971- goto out;
60972+ goto out_fail;
60973+
60974+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
60975+
60976+ gr_handle_exec_args(bprm, argv);
60977
60978 retval = exec_binprm(bprm);
60979 if (retval < 0)
60980- goto out;
60981+ goto out_fail;
60982+#ifdef CONFIG_GRKERNSEC
60983+ if (old_exec_file)
60984+ fput(old_exec_file);
60985+#endif
60986
60987 /* execve succeeded */
60988+
60989+ increment_exec_counter();
60990 current->fs->in_exec = 0;
60991 current->in_execve = 0;
60992 acct_update_integrals(current);
60993@@ -1558,6 +1725,14 @@ static int do_execveat_common(int fd, struct filename *filename,
60994 put_files_struct(displaced);
60995 return retval;
60996
60997+out_fail:
60998+#ifdef CONFIG_GRKERNSEC
60999+ current->acl = old_acl;
61000+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
61001+ fput(current->exec_file);
61002+ current->exec_file = old_exec_file;
61003+#endif
61004+
61005 out:
61006 if (bprm->mm) {
61007 acct_arg_size(bprm, 0);
61008@@ -1704,3 +1879,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
61009 argv, envp, flags);
61010 }
61011 #endif
61012+
61013+int pax_check_flags(unsigned long *flags)
61014+{
61015+ int retval = 0;
61016+
61017+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61018+ if (*flags & MF_PAX_SEGMEXEC)
61019+ {
61020+ *flags &= ~MF_PAX_SEGMEXEC;
61021+ retval = -EINVAL;
61022+ }
61023+#endif
61024+
61025+ if ((*flags & MF_PAX_PAGEEXEC)
61026+
61027+#ifdef CONFIG_PAX_PAGEEXEC
61028+ && (*flags & MF_PAX_SEGMEXEC)
61029+#endif
61030+
61031+ )
61032+ {
61033+ *flags &= ~MF_PAX_PAGEEXEC;
61034+ retval = -EINVAL;
61035+ }
61036+
61037+ if ((*flags & MF_PAX_MPROTECT)
61038+
61039+#ifdef CONFIG_PAX_MPROTECT
61040+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61041+#endif
61042+
61043+ )
61044+ {
61045+ *flags &= ~MF_PAX_MPROTECT;
61046+ retval = -EINVAL;
61047+ }
61048+
61049+ if ((*flags & MF_PAX_EMUTRAMP)
61050+
61051+#ifdef CONFIG_PAX_EMUTRAMP
61052+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61053+#endif
61054+
61055+ )
61056+ {
61057+ *flags &= ~MF_PAX_EMUTRAMP;
61058+ retval = -EINVAL;
61059+ }
61060+
61061+ return retval;
61062+}
61063+
61064+EXPORT_SYMBOL(pax_check_flags);
61065+
61066+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61067+char *pax_get_path(const struct path *path, char *buf, int buflen)
61068+{
61069+ char *pathname = d_path(path, buf, buflen);
61070+
61071+ if (IS_ERR(pathname))
61072+ goto toolong;
61073+
61074+ pathname = mangle_path(buf, pathname, "\t\n\\");
61075+ if (!pathname)
61076+ goto toolong;
61077+
61078+ *pathname = 0;
61079+ return buf;
61080+
61081+toolong:
61082+ return "<path too long>";
61083+}
61084+EXPORT_SYMBOL(pax_get_path);
61085+
61086+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61087+{
61088+ struct task_struct *tsk = current;
61089+ struct mm_struct *mm = current->mm;
61090+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61091+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61092+ char *path_exec = NULL;
61093+ char *path_fault = NULL;
61094+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61095+ siginfo_t info = { };
61096+
61097+ if (buffer_exec && buffer_fault) {
61098+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61099+
61100+ down_read(&mm->mmap_sem);
61101+ vma = mm->mmap;
61102+ while (vma && (!vma_exec || !vma_fault)) {
61103+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61104+ vma_exec = vma;
61105+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61106+ vma_fault = vma;
61107+ vma = vma->vm_next;
61108+ }
61109+ if (vma_exec)
61110+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61111+ if (vma_fault) {
61112+ start = vma_fault->vm_start;
61113+ end = vma_fault->vm_end;
61114+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61115+ if (vma_fault->vm_file)
61116+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61117+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61118+ path_fault = "<heap>";
61119+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61120+ path_fault = "<stack>";
61121+ else
61122+ path_fault = "<anonymous mapping>";
61123+ }
61124+ up_read(&mm->mmap_sem);
61125+ }
61126+ if (tsk->signal->curr_ip)
61127+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61128+ else
61129+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61130+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61131+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61132+ free_page((unsigned long)buffer_exec);
61133+ free_page((unsigned long)buffer_fault);
61134+ pax_report_insns(regs, pc, sp);
61135+ info.si_signo = SIGKILL;
61136+ info.si_errno = 0;
61137+ info.si_code = SI_KERNEL;
61138+ info.si_pid = 0;
61139+ info.si_uid = 0;
61140+ do_coredump(&info);
61141+}
61142+#endif
61143+
61144+#ifdef CONFIG_PAX_REFCOUNT
61145+void pax_report_refcount_overflow(struct pt_regs *regs)
61146+{
61147+ if (current->signal->curr_ip)
61148+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61149+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61150+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61151+ else
61152+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61153+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61154+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61155+ preempt_disable();
61156+ show_regs(regs);
61157+ preempt_enable();
61158+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61159+}
61160+#endif
61161+
61162+#ifdef CONFIG_PAX_USERCOPY
61163+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61164+static noinline int check_stack_object(const void *obj, unsigned long len)
61165+{
61166+ const void * const stack = task_stack_page(current);
61167+ const void * const stackend = stack + THREAD_SIZE;
61168+
61169+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61170+ const void *frame = NULL;
61171+ const void *oldframe;
61172+#endif
61173+
61174+ if (obj + len < obj)
61175+ return -1;
61176+
61177+ if (obj + len <= stack || stackend <= obj)
61178+ return 0;
61179+
61180+ if (obj < stack || stackend < obj + len)
61181+ return -1;
61182+
61183+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61184+ oldframe = __builtin_frame_address(1);
61185+ if (oldframe)
61186+ frame = __builtin_frame_address(2);
61187+ /*
61188+ low ----------------------------------------------> high
61189+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61190+ ^----------------^
61191+ allow copies only within here
61192+ */
61193+ while (stack <= frame && frame < stackend) {
61194+ /* if obj + len extends past the last frame, this
61195+ check won't pass and the next frame will be 0,
61196+ causing us to bail out and correctly report
61197+ the copy as invalid
61198+ */
61199+ if (obj + len <= frame)
61200+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61201+ oldframe = frame;
61202+ frame = *(const void * const *)frame;
61203+ }
61204+ return -1;
61205+#else
61206+ return 1;
61207+#endif
61208+}
61209+
61210+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61211+{
61212+ if (current->signal->curr_ip)
61213+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61214+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61215+ else
61216+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61217+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61218+ dump_stack();
61219+ gr_handle_kernel_exploit();
61220+ do_group_exit(SIGKILL);
61221+}
61222+#endif
61223+
61224+#ifdef CONFIG_PAX_USERCOPY
61225+
61226+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61227+{
61228+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61229+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61230+#ifdef CONFIG_MODULES
61231+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61232+#else
61233+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61234+#endif
61235+
61236+#else
61237+ unsigned long textlow = (unsigned long)_stext;
61238+ unsigned long texthigh = (unsigned long)_etext;
61239+
61240+#ifdef CONFIG_X86_64
61241+ /* check against linear mapping as well */
61242+ if (high > (unsigned long)__va(__pa(textlow)) &&
61243+ low < (unsigned long)__va(__pa(texthigh)))
61244+ return true;
61245+#endif
61246+
61247+#endif
61248+
61249+ if (high <= textlow || low >= texthigh)
61250+ return false;
61251+ else
61252+ return true;
61253+}
61254+#endif
61255+
61256+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61257+{
61258+#ifdef CONFIG_PAX_USERCOPY
61259+ const char *type;
61260+#endif
61261+
61262+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
61263+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61264+ unsigned long currentsp = (unsigned long)&stackstart;
61265+ if (unlikely((currentsp < stackstart + 512 ||
61266+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61267+ BUG();
61268+#endif
61269+
61270+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61271+ if (const_size)
61272+ return;
61273+#endif
61274+
61275+#ifdef CONFIG_PAX_USERCOPY
61276+ if (!n)
61277+ return;
61278+
61279+ type = check_heap_object(ptr, n);
61280+ if (!type) {
61281+ int ret = check_stack_object(ptr, n);
61282+ if (ret == 1 || ret == 2)
61283+ return;
61284+ if (ret == 0) {
61285+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61286+ type = "<kernel text>";
61287+ else
61288+ return;
61289+ } else
61290+ type = "<process stack>";
61291+ }
61292+
61293+ pax_report_usercopy(ptr, n, to_user, type);
61294+#endif
61295+
61296+}
61297+EXPORT_SYMBOL(__check_object_size);
61298+
61299+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61300+void pax_track_stack(void)
61301+{
61302+ unsigned long sp = (unsigned long)&sp;
61303+ if (sp < current_thread_info()->lowest_stack &&
61304+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
61305+ current_thread_info()->lowest_stack = sp;
61306+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61307+ BUG();
61308+}
61309+EXPORT_SYMBOL(pax_track_stack);
61310+#endif
61311+
61312+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61313+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61314+{
61315+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61316+ dump_stack();
61317+ do_group_exit(SIGKILL);
61318+}
61319+EXPORT_SYMBOL(report_size_overflow);
61320+#endif
61321diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61322index 9f9992b..8b59411 100644
61323--- a/fs/ext2/balloc.c
61324+++ b/fs/ext2/balloc.c
61325@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61326
61327 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61328 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61329- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61330+ if (free_blocks < root_blocks + 1 &&
61331 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61332 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61333- !in_group_p (sbi->s_resgid))) {
61334+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61335 return 0;
61336 }
61337 return 1;
61338diff --git a/fs/ext2/super.c b/fs/ext2/super.c
61339index ae55fdd..5e64c27 100644
61340--- a/fs/ext2/super.c
61341+++ b/fs/ext2/super.c
61342@@ -268,10 +268,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
61343 #ifdef CONFIG_EXT2_FS_XATTR
61344 if (test_opt(sb, XATTR_USER))
61345 seq_puts(seq, ",user_xattr");
61346- if (!test_opt(sb, XATTR_USER) &&
61347- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
61348+ if (!test_opt(sb, XATTR_USER))
61349 seq_puts(seq, ",nouser_xattr");
61350- }
61351 #endif
61352
61353 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61354@@ -850,8 +848,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
61355 if (def_mount_opts & EXT2_DEFM_UID16)
61356 set_opt(sbi->s_mount_opt, NO_UID32);
61357 #ifdef CONFIG_EXT2_FS_XATTR
61358- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
61359- set_opt(sbi->s_mount_opt, XATTR_USER);
61360+ /* always enable user xattrs */
61361+ set_opt(sbi->s_mount_opt, XATTR_USER);
61362 #endif
61363 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61364 if (def_mount_opts & EXT2_DEFM_ACL)
61365diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61366index 9142614..97484fa 100644
61367--- a/fs/ext2/xattr.c
61368+++ b/fs/ext2/xattr.c
61369@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61370 struct buffer_head *bh = NULL;
61371 struct ext2_xattr_entry *entry;
61372 char *end;
61373- size_t rest = buffer_size;
61374+ size_t rest = buffer_size, total_size = 0;
61375 int error;
61376
61377 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61378@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61379 buffer += size;
61380 }
61381 rest -= size;
61382+ total_size += size;
61383 }
61384 }
61385- error = buffer_size - rest; /* total size */
61386+ error = total_size;
61387
61388 cleanup:
61389 brelse(bh);
61390diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61391index 158b5d4..2432610 100644
61392--- a/fs/ext3/balloc.c
61393+++ b/fs/ext3/balloc.c
61394@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61395
61396 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61397 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61398- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61399+ if (free_blocks < root_blocks + 1 &&
61400 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61401 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61402- !in_group_p (sbi->s_resgid))) {
61403+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61404 return 0;
61405 }
61406 return 1;
61407diff --git a/fs/ext3/super.c b/fs/ext3/super.c
61408index 9b4e7d7..048d025 100644
61409--- a/fs/ext3/super.c
61410+++ b/fs/ext3/super.c
61411@@ -653,10 +653,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
61412 #ifdef CONFIG_EXT3_FS_XATTR
61413 if (test_opt(sb, XATTR_USER))
61414 seq_puts(seq, ",user_xattr");
61415- if (!test_opt(sb, XATTR_USER) &&
61416- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
61417+ if (!test_opt(sb, XATTR_USER))
61418 seq_puts(seq, ",nouser_xattr");
61419- }
61420 #endif
61421 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61422 if (test_opt(sb, POSIX_ACL))
61423@@ -1758,8 +1756,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
61424 if (def_mount_opts & EXT3_DEFM_UID16)
61425 set_opt(sbi->s_mount_opt, NO_UID32);
61426 #ifdef CONFIG_EXT3_FS_XATTR
61427- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
61428- set_opt(sbi->s_mount_opt, XATTR_USER);
61429+ /* always enable user xattrs */
61430+ set_opt(sbi->s_mount_opt, XATTR_USER);
61431 #endif
61432 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61433 if (def_mount_opts & EXT3_DEFM_ACL)
61434diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61435index c6874be..f8a6ae8 100644
61436--- a/fs/ext3/xattr.c
61437+++ b/fs/ext3/xattr.c
61438@@ -330,7 +330,7 @@ static int
61439 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61440 char *buffer, size_t buffer_size)
61441 {
61442- size_t rest = buffer_size;
61443+ size_t rest = buffer_size, total_size = 0;
61444
61445 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61446 const struct xattr_handler *handler =
61447@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61448 buffer += size;
61449 }
61450 rest -= size;
61451+ total_size += size;
61452 }
61453 }
61454- return buffer_size - rest;
61455+ return total_size;
61456 }
61457
61458 static int
61459diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61460index 83a6f49..d4e4d03 100644
61461--- a/fs/ext4/balloc.c
61462+++ b/fs/ext4/balloc.c
61463@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61464 /* Hm, nope. Are (enough) root reserved clusters available? */
61465 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61466 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61467- capable(CAP_SYS_RESOURCE) ||
61468- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61469+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61470+ capable_nolog(CAP_SYS_RESOURCE)) {
61471
61472 if (free_clusters >= (nclusters + dirty_clusters +
61473 resv_clusters))
61474diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61475index a75fba6..8235fca 100644
61476--- a/fs/ext4/ext4.h
61477+++ b/fs/ext4/ext4.h
61478@@ -1274,19 +1274,19 @@ struct ext4_sb_info {
61479 unsigned long s_mb_last_start;
61480
61481 /* stats for buddy allocator */
61482- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61483- atomic_t s_bal_success; /* we found long enough chunks */
61484- atomic_t s_bal_allocated; /* in blocks */
61485- atomic_t s_bal_ex_scanned; /* total extents scanned */
61486- atomic_t s_bal_goals; /* goal hits */
61487- atomic_t s_bal_breaks; /* too long searches */
61488- atomic_t s_bal_2orders; /* 2^order hits */
61489+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61490+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61491+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61492+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61493+ atomic_unchecked_t s_bal_goals; /* goal hits */
61494+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61495+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61496 spinlock_t s_bal_lock;
61497 unsigned long s_mb_buddies_generated;
61498 unsigned long long s_mb_generation_time;
61499- atomic_t s_mb_lost_chunks;
61500- atomic_t s_mb_preallocated;
61501- atomic_t s_mb_discarded;
61502+ atomic_unchecked_t s_mb_lost_chunks;
61503+ atomic_unchecked_t s_mb_preallocated;
61504+ atomic_unchecked_t s_mb_discarded;
61505 atomic_t s_lock_busy;
61506
61507 /* locality groups */
61508diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61509index 8d1e602..abf497b 100644
61510--- a/fs/ext4/mballoc.c
61511+++ b/fs/ext4/mballoc.c
61512@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
61513 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
61514
61515 if (EXT4_SB(sb)->s_mb_stats)
61516- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
61517+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
61518
61519 break;
61520 }
61521@@ -2211,7 +2211,7 @@ repeat:
61522 ac->ac_status = AC_STATUS_CONTINUE;
61523 ac->ac_flags |= EXT4_MB_HINT_FIRST;
61524 cr = 3;
61525- atomic_inc(&sbi->s_mb_lost_chunks);
61526+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
61527 goto repeat;
61528 }
61529 }
61530@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
61531 if (sbi->s_mb_stats) {
61532 ext4_msg(sb, KERN_INFO,
61533 "mballoc: %u blocks %u reqs (%u success)",
61534- atomic_read(&sbi->s_bal_allocated),
61535- atomic_read(&sbi->s_bal_reqs),
61536- atomic_read(&sbi->s_bal_success));
61537+ atomic_read_unchecked(&sbi->s_bal_allocated),
61538+ atomic_read_unchecked(&sbi->s_bal_reqs),
61539+ atomic_read_unchecked(&sbi->s_bal_success));
61540 ext4_msg(sb, KERN_INFO,
61541 "mballoc: %u extents scanned, %u goal hits, "
61542 "%u 2^N hits, %u breaks, %u lost",
61543- atomic_read(&sbi->s_bal_ex_scanned),
61544- atomic_read(&sbi->s_bal_goals),
61545- atomic_read(&sbi->s_bal_2orders),
61546- atomic_read(&sbi->s_bal_breaks),
61547- atomic_read(&sbi->s_mb_lost_chunks));
61548+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
61549+ atomic_read_unchecked(&sbi->s_bal_goals),
61550+ atomic_read_unchecked(&sbi->s_bal_2orders),
61551+ atomic_read_unchecked(&sbi->s_bal_breaks),
61552+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
61553 ext4_msg(sb, KERN_INFO,
61554 "mballoc: %lu generated and it took %Lu",
61555 sbi->s_mb_buddies_generated,
61556 sbi->s_mb_generation_time);
61557 ext4_msg(sb, KERN_INFO,
61558 "mballoc: %u preallocated, %u discarded",
61559- atomic_read(&sbi->s_mb_preallocated),
61560- atomic_read(&sbi->s_mb_discarded));
61561+ atomic_read_unchecked(&sbi->s_mb_preallocated),
61562+ atomic_read_unchecked(&sbi->s_mb_discarded));
61563 }
61564
61565 free_percpu(sbi->s_locality_groups);
61566@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
61567 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
61568
61569 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
61570- atomic_inc(&sbi->s_bal_reqs);
61571- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61572+ atomic_inc_unchecked(&sbi->s_bal_reqs);
61573+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61574 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
61575- atomic_inc(&sbi->s_bal_success);
61576- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
61577+ atomic_inc_unchecked(&sbi->s_bal_success);
61578+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
61579 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
61580 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
61581- atomic_inc(&sbi->s_bal_goals);
61582+ atomic_inc_unchecked(&sbi->s_bal_goals);
61583 if (ac->ac_found > sbi->s_mb_max_to_scan)
61584- atomic_inc(&sbi->s_bal_breaks);
61585+ atomic_inc_unchecked(&sbi->s_bal_breaks);
61586 }
61587
61588 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
61589@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
61590 trace_ext4_mb_new_inode_pa(ac, pa);
61591
61592 ext4_mb_use_inode_pa(ac, pa);
61593- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
61594+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
61595
61596 ei = EXT4_I(ac->ac_inode);
61597 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61598@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
61599 trace_ext4_mb_new_group_pa(ac, pa);
61600
61601 ext4_mb_use_group_pa(ac, pa);
61602- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61603+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61604
61605 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61606 lg = ac->ac_lg;
61607@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
61608 * from the bitmap and continue.
61609 */
61610 }
61611- atomic_add(free, &sbi->s_mb_discarded);
61612+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
61613
61614 return err;
61615 }
61616@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
61617 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
61618 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
61619 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
61620- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61621+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61622 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
61623
61624 return 0;
61625diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
61626index 8313ca3..8a37d08 100644
61627--- a/fs/ext4/mmp.c
61628+++ b/fs/ext4/mmp.c
61629@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
61630 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
61631 const char *function, unsigned int line, const char *msg)
61632 {
61633- __ext4_warning(sb, function, line, msg);
61634+ __ext4_warning(sb, function, line, "%s", msg);
61635 __ext4_warning(sb, function, line,
61636 "MMP failure info: last update time: %llu, last update "
61637 "node: %s, last update device: %s\n",
61638diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
61639index 8a8ec62..1b02de5 100644
61640--- a/fs/ext4/resize.c
61641+++ b/fs/ext4/resize.c
61642@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61643
61644 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
61645 for (count2 = count; count > 0; count -= count2, block += count2) {
61646- ext4_fsblk_t start;
61647+ ext4_fsblk_t start, diff;
61648 struct buffer_head *bh;
61649 ext4_group_t group;
61650 int err;
61651@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61652 start = ext4_group_first_block_no(sb, group);
61653 group -= flex_gd->groups[0].group;
61654
61655- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
61656- if (count2 > count)
61657- count2 = count;
61658-
61659 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
61660 BUG_ON(flex_gd->count > 1);
61661 continue;
61662@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61663 err = ext4_journal_get_write_access(handle, bh);
61664 if (err)
61665 return err;
61666+
61667+ diff = block - start;
61668+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
61669+ if (count2 > count)
61670+ count2 = count;
61671+
61672 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
61673- block - start, count2);
61674- ext4_set_bits(bh->b_data, block - start, count2);
61675+ diff, count2);
61676+ ext4_set_bits(bh->b_data, diff, count2);
61677
61678 err = ext4_handle_dirty_metadata(handle, NULL, bh);
61679 if (unlikely(err))
61680diff --git a/fs/ext4/super.c b/fs/ext4/super.c
61681index fc29b2c..6c8b255 100644
61682--- a/fs/ext4/super.c
61683+++ b/fs/ext4/super.c
61684@@ -1252,7 +1252,7 @@ static ext4_fsblk_t get_sb_block(void **data)
61685 }
61686
61687 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
61688-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61689+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61690 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
61691
61692 #ifdef CONFIG_QUOTA
61693@@ -2440,7 +2440,7 @@ struct ext4_attr {
61694 int offset;
61695 int deprecated_val;
61696 } u;
61697-};
61698+} __do_const;
61699
61700 static int parse_strtoull(const char *buf,
61701 unsigned long long max, unsigned long long *value)
61702diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
61703index 1e09fc7..0400dd4 100644
61704--- a/fs/ext4/xattr.c
61705+++ b/fs/ext4/xattr.c
61706@@ -399,7 +399,7 @@ static int
61707 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61708 char *buffer, size_t buffer_size)
61709 {
61710- size_t rest = buffer_size;
61711+ size_t rest = buffer_size, total_size = 0;
61712
61713 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
61714 const struct xattr_handler *handler =
61715@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61716 buffer += size;
61717 }
61718 rest -= size;
61719+ total_size += size;
61720 }
61721 }
61722- return buffer_size - rest;
61723+ return total_size;
61724 }
61725
61726 static int
61727diff --git a/fs/fcntl.c b/fs/fcntl.c
61728index ee85cd4..9dd0d20 100644
61729--- a/fs/fcntl.c
61730+++ b/fs/fcntl.c
61731@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
61732 int force)
61733 {
61734 security_file_set_fowner(filp);
61735+ if (gr_handle_chroot_fowner(pid, type))
61736+ return;
61737+ if (gr_check_protected_task_fowner(pid, type))
61738+ return;
61739 f_modown(filp, pid, type, force);
61740 }
61741 EXPORT_SYMBOL(__f_setown);
61742diff --git a/fs/fhandle.c b/fs/fhandle.c
61743index 999ff5c..2281df9 100644
61744--- a/fs/fhandle.c
61745+++ b/fs/fhandle.c
61746@@ -8,6 +8,7 @@
61747 #include <linux/fs_struct.h>
61748 #include <linux/fsnotify.h>
61749 #include <linux/personality.h>
61750+#include <linux/grsecurity.h>
61751 #include <asm/uaccess.h>
61752 #include "internal.h"
61753 #include "mount.h"
61754@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
61755 } else
61756 retval = 0;
61757 /* copy the mount id */
61758- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
61759- sizeof(*mnt_id)) ||
61760+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
61761 copy_to_user(ufh, handle,
61762 sizeof(struct file_handle) + handle_bytes))
61763 retval = -EFAULT;
61764@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61765 * the directory. Ideally we would like CAP_DAC_SEARCH.
61766 * But we don't have that
61767 */
61768- if (!capable(CAP_DAC_READ_SEARCH)) {
61769+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
61770 retval = -EPERM;
61771 goto out_err;
61772 }
61773@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61774 goto out_err;
61775 }
61776 /* copy the full handle */
61777- if (copy_from_user(handle, ufh,
61778- sizeof(struct file_handle) +
61779+ *handle = f_handle;
61780+ if (copy_from_user(&handle->f_handle,
61781+ &ufh->f_handle,
61782 f_handle.handle_bytes)) {
61783 retval = -EFAULT;
61784 goto out_handle;
61785diff --git a/fs/file.c b/fs/file.c
61786index ee738ea..f6c1562 100644
61787--- a/fs/file.c
61788+++ b/fs/file.c
61789@@ -16,6 +16,7 @@
61790 #include <linux/slab.h>
61791 #include <linux/vmalloc.h>
61792 #include <linux/file.h>
61793+#include <linux/security.h>
61794 #include <linux/fdtable.h>
61795 #include <linux/bitops.h>
61796 #include <linux/interrupt.h>
61797@@ -139,7 +140,7 @@ out:
61798 * Return <0 error code on error; 1 on successful completion.
61799 * The files->file_lock should be held on entry, and will be held on exit.
61800 */
61801-static int expand_fdtable(struct files_struct *files, int nr)
61802+static int expand_fdtable(struct files_struct *files, unsigned int nr)
61803 __releases(files->file_lock)
61804 __acquires(files->file_lock)
61805 {
61806@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
61807 * expanded and execution may have blocked.
61808 * The files->file_lock should be held on entry, and will be held on exit.
61809 */
61810-static int expand_files(struct files_struct *files, int nr)
61811+static int expand_files(struct files_struct *files, unsigned int nr)
61812 {
61813 struct fdtable *fdt;
61814
61815@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
61816 if (!file)
61817 return __close_fd(files, fd);
61818
61819+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
61820 if (fd >= rlimit(RLIMIT_NOFILE))
61821 return -EBADF;
61822
61823@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
61824 if (unlikely(oldfd == newfd))
61825 return -EINVAL;
61826
61827+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
61828 if (newfd >= rlimit(RLIMIT_NOFILE))
61829 return -EBADF;
61830
61831@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
61832 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
61833 {
61834 int err;
61835+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
61836 if (from >= rlimit(RLIMIT_NOFILE))
61837 return -EINVAL;
61838 err = alloc_fd(from, flags);
61839diff --git a/fs/filesystems.c b/fs/filesystems.c
61840index 5797d45..7d7d79a 100644
61841--- a/fs/filesystems.c
61842+++ b/fs/filesystems.c
61843@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
61844 int len = dot ? dot - name : strlen(name);
61845
61846 fs = __get_fs_type(name, len);
61847+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61848+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
61849+#else
61850 if (!fs && (request_module("fs-%.*s", len, name) == 0))
61851+#endif
61852 fs = __get_fs_type(name, len);
61853
61854 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
61855diff --git a/fs/fs_struct.c b/fs/fs_struct.c
61856index 7dca743..2f2786d 100644
61857--- a/fs/fs_struct.c
61858+++ b/fs/fs_struct.c
61859@@ -4,6 +4,7 @@
61860 #include <linux/path.h>
61861 #include <linux/slab.h>
61862 #include <linux/fs_struct.h>
61863+#include <linux/grsecurity.h>
61864 #include "internal.h"
61865
61866 /*
61867@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
61868 struct path old_root;
61869
61870 path_get(path);
61871+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
61872 spin_lock(&fs->lock);
61873 write_seqcount_begin(&fs->seq);
61874 old_root = fs->root;
61875 fs->root = *path;
61876+ gr_set_chroot_entries(current, path);
61877 write_seqcount_end(&fs->seq);
61878 spin_unlock(&fs->lock);
61879- if (old_root.dentry)
61880+ if (old_root.dentry) {
61881+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
61882 path_put(&old_root);
61883+ }
61884 }
61885
61886 /*
61887@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
61888 int hits = 0;
61889 spin_lock(&fs->lock);
61890 write_seqcount_begin(&fs->seq);
61891+ /* this root replacement is only done by pivot_root,
61892+ leave grsec's chroot tagging alone for this task
61893+ so that a pivoted root isn't treated as a chroot
61894+ */
61895 hits += replace_path(&fs->root, old_root, new_root);
61896 hits += replace_path(&fs->pwd, old_root, new_root);
61897 write_seqcount_end(&fs->seq);
61898@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
61899
61900 void free_fs_struct(struct fs_struct *fs)
61901 {
61902+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
61903 path_put(&fs->root);
61904 path_put(&fs->pwd);
61905 kmem_cache_free(fs_cachep, fs);
61906@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
61907 task_lock(tsk);
61908 spin_lock(&fs->lock);
61909 tsk->fs = NULL;
61910- kill = !--fs->users;
61911+ gr_clear_chroot_entries(tsk);
61912+ kill = !atomic_dec_return(&fs->users);
61913 spin_unlock(&fs->lock);
61914 task_unlock(tsk);
61915 if (kill)
61916@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
61917 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
61918 /* We don't need to lock fs - think why ;-) */
61919 if (fs) {
61920- fs->users = 1;
61921+ atomic_set(&fs->users, 1);
61922 fs->in_exec = 0;
61923 spin_lock_init(&fs->lock);
61924 seqcount_init(&fs->seq);
61925@@ -121,6 +132,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
61926 spin_lock(&old->lock);
61927 fs->root = old->root;
61928 path_get(&fs->root);
61929+ /* instead of calling gr_set_chroot_entries here,
61930+ we call it from every caller of this function
61931+ */
61932 fs->pwd = old->pwd;
61933 path_get(&fs->pwd);
61934 spin_unlock(&old->lock);
61935@@ -139,8 +153,9 @@ int unshare_fs_struct(void)
61936
61937 task_lock(current);
61938 spin_lock(&fs->lock);
61939- kill = !--fs->users;
61940+ kill = !atomic_dec_return(&fs->users);
61941 current->fs = new_fs;
61942+ gr_set_chroot_entries(current, &new_fs->root);
61943 spin_unlock(&fs->lock);
61944 task_unlock(current);
61945
61946@@ -153,13 +168,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
61947
61948 int current_umask(void)
61949 {
61950- return current->fs->umask;
61951+ return current->fs->umask | gr_acl_umask();
61952 }
61953 EXPORT_SYMBOL(current_umask);
61954
61955 /* to be mentioned only in INIT_TASK */
61956 struct fs_struct init_fs = {
61957- .users = 1,
61958+ .users = ATOMIC_INIT(1),
61959 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
61960 .seq = SEQCNT_ZERO(init_fs.seq),
61961 .umask = 0022,
61962diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
61963index 89acec7..a575262 100644
61964--- a/fs/fscache/cookie.c
61965+++ b/fs/fscache/cookie.c
61966@@ -19,7 +19,7 @@
61967
61968 struct kmem_cache *fscache_cookie_jar;
61969
61970-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
61971+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
61972
61973 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
61974 static int fscache_alloc_object(struct fscache_cache *cache,
61975@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
61976 parent ? (char *) parent->def->name : "<no-parent>",
61977 def->name, netfs_data, enable);
61978
61979- fscache_stat(&fscache_n_acquires);
61980+ fscache_stat_unchecked(&fscache_n_acquires);
61981
61982 /* if there's no parent cookie, then we don't create one here either */
61983 if (!parent) {
61984- fscache_stat(&fscache_n_acquires_null);
61985+ fscache_stat_unchecked(&fscache_n_acquires_null);
61986 _leave(" [no parent]");
61987 return NULL;
61988 }
61989@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
61990 /* allocate and initialise a cookie */
61991 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
61992 if (!cookie) {
61993- fscache_stat(&fscache_n_acquires_oom);
61994+ fscache_stat_unchecked(&fscache_n_acquires_oom);
61995 _leave(" [ENOMEM]");
61996 return NULL;
61997 }
61998@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
61999
62000 switch (cookie->def->type) {
62001 case FSCACHE_COOKIE_TYPE_INDEX:
62002- fscache_stat(&fscache_n_cookie_index);
62003+ fscache_stat_unchecked(&fscache_n_cookie_index);
62004 break;
62005 case FSCACHE_COOKIE_TYPE_DATAFILE:
62006- fscache_stat(&fscache_n_cookie_data);
62007+ fscache_stat_unchecked(&fscache_n_cookie_data);
62008 break;
62009 default:
62010- fscache_stat(&fscache_n_cookie_special);
62011+ fscache_stat_unchecked(&fscache_n_cookie_special);
62012 break;
62013 }
62014
62015@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62016 } else {
62017 atomic_dec(&parent->n_children);
62018 __fscache_cookie_put(cookie);
62019- fscache_stat(&fscache_n_acquires_nobufs);
62020+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62021 _leave(" = NULL");
62022 return NULL;
62023 }
62024@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62025 }
62026 }
62027
62028- fscache_stat(&fscache_n_acquires_ok);
62029+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62030 _leave(" = %p", cookie);
62031 return cookie;
62032 }
62033@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62034 cache = fscache_select_cache_for_object(cookie->parent);
62035 if (!cache) {
62036 up_read(&fscache_addremove_sem);
62037- fscache_stat(&fscache_n_acquires_no_cache);
62038+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62039 _leave(" = -ENOMEDIUM [no cache]");
62040 return -ENOMEDIUM;
62041 }
62042@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62043 object = cache->ops->alloc_object(cache, cookie);
62044 fscache_stat_d(&fscache_n_cop_alloc_object);
62045 if (IS_ERR(object)) {
62046- fscache_stat(&fscache_n_object_no_alloc);
62047+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62048 ret = PTR_ERR(object);
62049 goto error;
62050 }
62051
62052- fscache_stat(&fscache_n_object_alloc);
62053+ fscache_stat_unchecked(&fscache_n_object_alloc);
62054
62055- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62056+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62057
62058 _debug("ALLOC OBJ%x: %s {%lx}",
62059 object->debug_id, cookie->def->name, object->events);
62060@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62061
62062 _enter("{%s}", cookie->def->name);
62063
62064- fscache_stat(&fscache_n_invalidates);
62065+ fscache_stat_unchecked(&fscache_n_invalidates);
62066
62067 /* Only permit invalidation of data files. Invalidating an index will
62068 * require the caller to release all its attachments to the tree rooted
62069@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62070 {
62071 struct fscache_object *object;
62072
62073- fscache_stat(&fscache_n_updates);
62074+ fscache_stat_unchecked(&fscache_n_updates);
62075
62076 if (!cookie) {
62077- fscache_stat(&fscache_n_updates_null);
62078+ fscache_stat_unchecked(&fscache_n_updates_null);
62079 _leave(" [no cookie]");
62080 return;
62081 }
62082@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62083 */
62084 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62085 {
62086- fscache_stat(&fscache_n_relinquishes);
62087+ fscache_stat_unchecked(&fscache_n_relinquishes);
62088 if (retire)
62089- fscache_stat(&fscache_n_relinquishes_retire);
62090+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62091
62092 if (!cookie) {
62093- fscache_stat(&fscache_n_relinquishes_null);
62094+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62095 _leave(" [no cookie]");
62096 return;
62097 }
62098@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62099 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62100 goto inconsistent;
62101
62102- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62103+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62104
62105 __fscache_use_cookie(cookie);
62106 if (fscache_submit_op(object, op) < 0)
62107diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62108index 7872a62..d91b19f 100644
62109--- a/fs/fscache/internal.h
62110+++ b/fs/fscache/internal.h
62111@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62112 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62113 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62114 struct fscache_operation *,
62115- atomic_t *,
62116- atomic_t *,
62117+ atomic_unchecked_t *,
62118+ atomic_unchecked_t *,
62119 void (*)(struct fscache_operation *));
62120 extern void fscache_invalidate_writes(struct fscache_cookie *);
62121
62122@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62123 * stats.c
62124 */
62125 #ifdef CONFIG_FSCACHE_STATS
62126-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62127-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62128+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62129+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62130
62131-extern atomic_t fscache_n_op_pend;
62132-extern atomic_t fscache_n_op_run;
62133-extern atomic_t fscache_n_op_enqueue;
62134-extern atomic_t fscache_n_op_deferred_release;
62135-extern atomic_t fscache_n_op_release;
62136-extern atomic_t fscache_n_op_gc;
62137-extern atomic_t fscache_n_op_cancelled;
62138-extern atomic_t fscache_n_op_rejected;
62139+extern atomic_unchecked_t fscache_n_op_pend;
62140+extern atomic_unchecked_t fscache_n_op_run;
62141+extern atomic_unchecked_t fscache_n_op_enqueue;
62142+extern atomic_unchecked_t fscache_n_op_deferred_release;
62143+extern atomic_unchecked_t fscache_n_op_release;
62144+extern atomic_unchecked_t fscache_n_op_gc;
62145+extern atomic_unchecked_t fscache_n_op_cancelled;
62146+extern atomic_unchecked_t fscache_n_op_rejected;
62147
62148-extern atomic_t fscache_n_attr_changed;
62149-extern atomic_t fscache_n_attr_changed_ok;
62150-extern atomic_t fscache_n_attr_changed_nobufs;
62151-extern atomic_t fscache_n_attr_changed_nomem;
62152-extern atomic_t fscache_n_attr_changed_calls;
62153+extern atomic_unchecked_t fscache_n_attr_changed;
62154+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62155+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62156+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62157+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62158
62159-extern atomic_t fscache_n_allocs;
62160-extern atomic_t fscache_n_allocs_ok;
62161-extern atomic_t fscache_n_allocs_wait;
62162-extern atomic_t fscache_n_allocs_nobufs;
62163-extern atomic_t fscache_n_allocs_intr;
62164-extern atomic_t fscache_n_allocs_object_dead;
62165-extern atomic_t fscache_n_alloc_ops;
62166-extern atomic_t fscache_n_alloc_op_waits;
62167+extern atomic_unchecked_t fscache_n_allocs;
62168+extern atomic_unchecked_t fscache_n_allocs_ok;
62169+extern atomic_unchecked_t fscache_n_allocs_wait;
62170+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62171+extern atomic_unchecked_t fscache_n_allocs_intr;
62172+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62173+extern atomic_unchecked_t fscache_n_alloc_ops;
62174+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62175
62176-extern atomic_t fscache_n_retrievals;
62177-extern atomic_t fscache_n_retrievals_ok;
62178-extern atomic_t fscache_n_retrievals_wait;
62179-extern atomic_t fscache_n_retrievals_nodata;
62180-extern atomic_t fscache_n_retrievals_nobufs;
62181-extern atomic_t fscache_n_retrievals_intr;
62182-extern atomic_t fscache_n_retrievals_nomem;
62183-extern atomic_t fscache_n_retrievals_object_dead;
62184-extern atomic_t fscache_n_retrieval_ops;
62185-extern atomic_t fscache_n_retrieval_op_waits;
62186+extern atomic_unchecked_t fscache_n_retrievals;
62187+extern atomic_unchecked_t fscache_n_retrievals_ok;
62188+extern atomic_unchecked_t fscache_n_retrievals_wait;
62189+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62190+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62191+extern atomic_unchecked_t fscache_n_retrievals_intr;
62192+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62193+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62194+extern atomic_unchecked_t fscache_n_retrieval_ops;
62195+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62196
62197-extern atomic_t fscache_n_stores;
62198-extern atomic_t fscache_n_stores_ok;
62199-extern atomic_t fscache_n_stores_again;
62200-extern atomic_t fscache_n_stores_nobufs;
62201-extern atomic_t fscache_n_stores_oom;
62202-extern atomic_t fscache_n_store_ops;
62203-extern atomic_t fscache_n_store_calls;
62204-extern atomic_t fscache_n_store_pages;
62205-extern atomic_t fscache_n_store_radix_deletes;
62206-extern atomic_t fscache_n_store_pages_over_limit;
62207+extern atomic_unchecked_t fscache_n_stores;
62208+extern atomic_unchecked_t fscache_n_stores_ok;
62209+extern atomic_unchecked_t fscache_n_stores_again;
62210+extern atomic_unchecked_t fscache_n_stores_nobufs;
62211+extern atomic_unchecked_t fscache_n_stores_oom;
62212+extern atomic_unchecked_t fscache_n_store_ops;
62213+extern atomic_unchecked_t fscache_n_store_calls;
62214+extern atomic_unchecked_t fscache_n_store_pages;
62215+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62216+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62217
62218-extern atomic_t fscache_n_store_vmscan_not_storing;
62219-extern atomic_t fscache_n_store_vmscan_gone;
62220-extern atomic_t fscache_n_store_vmscan_busy;
62221-extern atomic_t fscache_n_store_vmscan_cancelled;
62222-extern atomic_t fscache_n_store_vmscan_wait;
62223+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62224+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62225+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62226+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62227+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62228
62229-extern atomic_t fscache_n_marks;
62230-extern atomic_t fscache_n_uncaches;
62231+extern atomic_unchecked_t fscache_n_marks;
62232+extern atomic_unchecked_t fscache_n_uncaches;
62233
62234-extern atomic_t fscache_n_acquires;
62235-extern atomic_t fscache_n_acquires_null;
62236-extern atomic_t fscache_n_acquires_no_cache;
62237-extern atomic_t fscache_n_acquires_ok;
62238-extern atomic_t fscache_n_acquires_nobufs;
62239-extern atomic_t fscache_n_acquires_oom;
62240+extern atomic_unchecked_t fscache_n_acquires;
62241+extern atomic_unchecked_t fscache_n_acquires_null;
62242+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62243+extern atomic_unchecked_t fscache_n_acquires_ok;
62244+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62245+extern atomic_unchecked_t fscache_n_acquires_oom;
62246
62247-extern atomic_t fscache_n_invalidates;
62248-extern atomic_t fscache_n_invalidates_run;
62249+extern atomic_unchecked_t fscache_n_invalidates;
62250+extern atomic_unchecked_t fscache_n_invalidates_run;
62251
62252-extern atomic_t fscache_n_updates;
62253-extern atomic_t fscache_n_updates_null;
62254-extern atomic_t fscache_n_updates_run;
62255+extern atomic_unchecked_t fscache_n_updates;
62256+extern atomic_unchecked_t fscache_n_updates_null;
62257+extern atomic_unchecked_t fscache_n_updates_run;
62258
62259-extern atomic_t fscache_n_relinquishes;
62260-extern atomic_t fscache_n_relinquishes_null;
62261-extern atomic_t fscache_n_relinquishes_waitcrt;
62262-extern atomic_t fscache_n_relinquishes_retire;
62263+extern atomic_unchecked_t fscache_n_relinquishes;
62264+extern atomic_unchecked_t fscache_n_relinquishes_null;
62265+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62266+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62267
62268-extern atomic_t fscache_n_cookie_index;
62269-extern atomic_t fscache_n_cookie_data;
62270-extern atomic_t fscache_n_cookie_special;
62271+extern atomic_unchecked_t fscache_n_cookie_index;
62272+extern atomic_unchecked_t fscache_n_cookie_data;
62273+extern atomic_unchecked_t fscache_n_cookie_special;
62274
62275-extern atomic_t fscache_n_object_alloc;
62276-extern atomic_t fscache_n_object_no_alloc;
62277-extern atomic_t fscache_n_object_lookups;
62278-extern atomic_t fscache_n_object_lookups_negative;
62279-extern atomic_t fscache_n_object_lookups_positive;
62280-extern atomic_t fscache_n_object_lookups_timed_out;
62281-extern atomic_t fscache_n_object_created;
62282-extern atomic_t fscache_n_object_avail;
62283-extern atomic_t fscache_n_object_dead;
62284+extern atomic_unchecked_t fscache_n_object_alloc;
62285+extern atomic_unchecked_t fscache_n_object_no_alloc;
62286+extern atomic_unchecked_t fscache_n_object_lookups;
62287+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62288+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62289+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62290+extern atomic_unchecked_t fscache_n_object_created;
62291+extern atomic_unchecked_t fscache_n_object_avail;
62292+extern atomic_unchecked_t fscache_n_object_dead;
62293
62294-extern atomic_t fscache_n_checkaux_none;
62295-extern atomic_t fscache_n_checkaux_okay;
62296-extern atomic_t fscache_n_checkaux_update;
62297-extern atomic_t fscache_n_checkaux_obsolete;
62298+extern atomic_unchecked_t fscache_n_checkaux_none;
62299+extern atomic_unchecked_t fscache_n_checkaux_okay;
62300+extern atomic_unchecked_t fscache_n_checkaux_update;
62301+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62302
62303 extern atomic_t fscache_n_cop_alloc_object;
62304 extern atomic_t fscache_n_cop_lookup_object;
62305@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62306 atomic_inc(stat);
62307 }
62308
62309+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62310+{
62311+ atomic_inc_unchecked(stat);
62312+}
62313+
62314 static inline void fscache_stat_d(atomic_t *stat)
62315 {
62316 atomic_dec(stat);
62317@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62318
62319 #define __fscache_stat(stat) (NULL)
62320 #define fscache_stat(stat) do {} while (0)
62321+#define fscache_stat_unchecked(stat) do {} while (0)
62322 #define fscache_stat_d(stat) do {} while (0)
62323 #endif
62324
62325diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62326index da032da..0076ce7 100644
62327--- a/fs/fscache/object.c
62328+++ b/fs/fscache/object.c
62329@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62330 _debug("LOOKUP \"%s\" in \"%s\"",
62331 cookie->def->name, object->cache->tag->name);
62332
62333- fscache_stat(&fscache_n_object_lookups);
62334+ fscache_stat_unchecked(&fscache_n_object_lookups);
62335 fscache_stat(&fscache_n_cop_lookup_object);
62336 ret = object->cache->ops->lookup_object(object);
62337 fscache_stat_d(&fscache_n_cop_lookup_object);
62338@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62339 if (ret == -ETIMEDOUT) {
62340 /* probably stuck behind another object, so move this one to
62341 * the back of the queue */
62342- fscache_stat(&fscache_n_object_lookups_timed_out);
62343+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62344 _leave(" [timeout]");
62345 return NO_TRANSIT;
62346 }
62347@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62348 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62349
62350 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62351- fscache_stat(&fscache_n_object_lookups_negative);
62352+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62353
62354 /* Allow write requests to begin stacking up and read requests to begin
62355 * returning ENODATA.
62356@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62357 /* if we were still looking up, then we must have a positive lookup
62358 * result, in which case there may be data available */
62359 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62360- fscache_stat(&fscache_n_object_lookups_positive);
62361+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62362
62363 /* We do (presumably) have data */
62364 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62365@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62366 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62367 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62368 } else {
62369- fscache_stat(&fscache_n_object_created);
62370+ fscache_stat_unchecked(&fscache_n_object_created);
62371 }
62372
62373 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62374@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62375 fscache_stat_d(&fscache_n_cop_lookup_complete);
62376
62377 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62378- fscache_stat(&fscache_n_object_avail);
62379+ fscache_stat_unchecked(&fscache_n_object_avail);
62380
62381 _leave("");
62382 return transit_to(JUMPSTART_DEPS);
62383@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62384
62385 /* this just shifts the object release to the work processor */
62386 fscache_put_object(object);
62387- fscache_stat(&fscache_n_object_dead);
62388+ fscache_stat_unchecked(&fscache_n_object_dead);
62389
62390 _leave("");
62391 return transit_to(OBJECT_DEAD);
62392@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62393 enum fscache_checkaux result;
62394
62395 if (!object->cookie->def->check_aux) {
62396- fscache_stat(&fscache_n_checkaux_none);
62397+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62398 return FSCACHE_CHECKAUX_OKAY;
62399 }
62400
62401@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62402 switch (result) {
62403 /* entry okay as is */
62404 case FSCACHE_CHECKAUX_OKAY:
62405- fscache_stat(&fscache_n_checkaux_okay);
62406+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62407 break;
62408
62409 /* entry requires update */
62410 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62411- fscache_stat(&fscache_n_checkaux_update);
62412+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62413 break;
62414
62415 /* entry requires deletion */
62416 case FSCACHE_CHECKAUX_OBSOLETE:
62417- fscache_stat(&fscache_n_checkaux_obsolete);
62418+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62419 break;
62420
62421 default:
62422@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62423 {
62424 const struct fscache_state *s;
62425
62426- fscache_stat(&fscache_n_invalidates_run);
62427+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62428 fscache_stat(&fscache_n_cop_invalidate_object);
62429 s = _fscache_invalidate_object(object, event);
62430 fscache_stat_d(&fscache_n_cop_invalidate_object);
62431@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62432 {
62433 _enter("{OBJ%x},%d", object->debug_id, event);
62434
62435- fscache_stat(&fscache_n_updates_run);
62436+ fscache_stat_unchecked(&fscache_n_updates_run);
62437 fscache_stat(&fscache_n_cop_update_object);
62438 object->cache->ops->update_object(object);
62439 fscache_stat_d(&fscache_n_cop_update_object);
62440diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62441index e7b87a0..a85d47a 100644
62442--- a/fs/fscache/operation.c
62443+++ b/fs/fscache/operation.c
62444@@ -17,7 +17,7 @@
62445 #include <linux/slab.h>
62446 #include "internal.h"
62447
62448-atomic_t fscache_op_debug_id;
62449+atomic_unchecked_t fscache_op_debug_id;
62450 EXPORT_SYMBOL(fscache_op_debug_id);
62451
62452 /**
62453@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62454 ASSERTCMP(atomic_read(&op->usage), >, 0);
62455 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62456
62457- fscache_stat(&fscache_n_op_enqueue);
62458+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62459 switch (op->flags & FSCACHE_OP_TYPE) {
62460 case FSCACHE_OP_ASYNC:
62461 _debug("queue async");
62462@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62463 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62464 if (op->processor)
62465 fscache_enqueue_operation(op);
62466- fscache_stat(&fscache_n_op_run);
62467+ fscache_stat_unchecked(&fscache_n_op_run);
62468 }
62469
62470 /*
62471@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62472 if (object->n_in_progress > 0) {
62473 atomic_inc(&op->usage);
62474 list_add_tail(&op->pend_link, &object->pending_ops);
62475- fscache_stat(&fscache_n_op_pend);
62476+ fscache_stat_unchecked(&fscache_n_op_pend);
62477 } else if (!list_empty(&object->pending_ops)) {
62478 atomic_inc(&op->usage);
62479 list_add_tail(&op->pend_link, &object->pending_ops);
62480- fscache_stat(&fscache_n_op_pend);
62481+ fscache_stat_unchecked(&fscache_n_op_pend);
62482 fscache_start_operations(object);
62483 } else {
62484 ASSERTCMP(object->n_in_progress, ==, 0);
62485@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62486 object->n_exclusive++; /* reads and writes must wait */
62487 atomic_inc(&op->usage);
62488 list_add_tail(&op->pend_link, &object->pending_ops);
62489- fscache_stat(&fscache_n_op_pend);
62490+ fscache_stat_unchecked(&fscache_n_op_pend);
62491 ret = 0;
62492 } else {
62493 /* If we're in any other state, there must have been an I/O
62494@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62495 if (object->n_exclusive > 0) {
62496 atomic_inc(&op->usage);
62497 list_add_tail(&op->pend_link, &object->pending_ops);
62498- fscache_stat(&fscache_n_op_pend);
62499+ fscache_stat_unchecked(&fscache_n_op_pend);
62500 } else if (!list_empty(&object->pending_ops)) {
62501 atomic_inc(&op->usage);
62502 list_add_tail(&op->pend_link, &object->pending_ops);
62503- fscache_stat(&fscache_n_op_pend);
62504+ fscache_stat_unchecked(&fscache_n_op_pend);
62505 fscache_start_operations(object);
62506 } else {
62507 ASSERTCMP(object->n_exclusive, ==, 0);
62508@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
62509 object->n_ops++;
62510 atomic_inc(&op->usage);
62511 list_add_tail(&op->pend_link, &object->pending_ops);
62512- fscache_stat(&fscache_n_op_pend);
62513+ fscache_stat_unchecked(&fscache_n_op_pend);
62514 ret = 0;
62515 } else if (fscache_object_is_dying(object)) {
62516- fscache_stat(&fscache_n_op_rejected);
62517+ fscache_stat_unchecked(&fscache_n_op_rejected);
62518 op->state = FSCACHE_OP_ST_CANCELLED;
62519 ret = -ENOBUFS;
62520 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
62521@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
62522 ret = -EBUSY;
62523 if (op->state == FSCACHE_OP_ST_PENDING) {
62524 ASSERT(!list_empty(&op->pend_link));
62525- fscache_stat(&fscache_n_op_cancelled);
62526+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62527 list_del_init(&op->pend_link);
62528 if (do_cancel)
62529 do_cancel(op);
62530@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
62531 while (!list_empty(&object->pending_ops)) {
62532 op = list_entry(object->pending_ops.next,
62533 struct fscache_operation, pend_link);
62534- fscache_stat(&fscache_n_op_cancelled);
62535+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62536 list_del_init(&op->pend_link);
62537
62538 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
62539@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
62540 op->state, ==, FSCACHE_OP_ST_CANCELLED);
62541 op->state = FSCACHE_OP_ST_DEAD;
62542
62543- fscache_stat(&fscache_n_op_release);
62544+ fscache_stat_unchecked(&fscache_n_op_release);
62545
62546 if (op->release) {
62547 op->release(op);
62548@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
62549 * lock, and defer it otherwise */
62550 if (!spin_trylock(&object->lock)) {
62551 _debug("defer put");
62552- fscache_stat(&fscache_n_op_deferred_release);
62553+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
62554
62555 cache = object->cache;
62556 spin_lock(&cache->op_gc_list_lock);
62557@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
62558
62559 _debug("GC DEFERRED REL OBJ%x OP%x",
62560 object->debug_id, op->debug_id);
62561- fscache_stat(&fscache_n_op_gc);
62562+ fscache_stat_unchecked(&fscache_n_op_gc);
62563
62564 ASSERTCMP(atomic_read(&op->usage), ==, 0);
62565 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
62566diff --git a/fs/fscache/page.c b/fs/fscache/page.c
62567index de33b3f..8be4d29 100644
62568--- a/fs/fscache/page.c
62569+++ b/fs/fscache/page.c
62570@@ -74,7 +74,7 @@ try_again:
62571 val = radix_tree_lookup(&cookie->stores, page->index);
62572 if (!val) {
62573 rcu_read_unlock();
62574- fscache_stat(&fscache_n_store_vmscan_not_storing);
62575+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
62576 __fscache_uncache_page(cookie, page);
62577 return true;
62578 }
62579@@ -104,11 +104,11 @@ try_again:
62580 spin_unlock(&cookie->stores_lock);
62581
62582 if (xpage) {
62583- fscache_stat(&fscache_n_store_vmscan_cancelled);
62584- fscache_stat(&fscache_n_store_radix_deletes);
62585+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
62586+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62587 ASSERTCMP(xpage, ==, page);
62588 } else {
62589- fscache_stat(&fscache_n_store_vmscan_gone);
62590+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
62591 }
62592
62593 wake_up_bit(&cookie->flags, 0);
62594@@ -123,11 +123,11 @@ page_busy:
62595 * sleeping on memory allocation, so we may need to impose a timeout
62596 * too. */
62597 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
62598- fscache_stat(&fscache_n_store_vmscan_busy);
62599+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
62600 return false;
62601 }
62602
62603- fscache_stat(&fscache_n_store_vmscan_wait);
62604+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
62605 if (!release_page_wait_timeout(cookie, page))
62606 _debug("fscache writeout timeout page: %p{%lx}",
62607 page, page->index);
62608@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
62609 FSCACHE_COOKIE_STORING_TAG);
62610 if (!radix_tree_tag_get(&cookie->stores, page->index,
62611 FSCACHE_COOKIE_PENDING_TAG)) {
62612- fscache_stat(&fscache_n_store_radix_deletes);
62613+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62614 xpage = radix_tree_delete(&cookie->stores, page->index);
62615 }
62616 spin_unlock(&cookie->stores_lock);
62617@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
62618
62619 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
62620
62621- fscache_stat(&fscache_n_attr_changed_calls);
62622+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
62623
62624 if (fscache_object_is_active(object)) {
62625 fscache_stat(&fscache_n_cop_attr_changed);
62626@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62627
62628 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62629
62630- fscache_stat(&fscache_n_attr_changed);
62631+ fscache_stat_unchecked(&fscache_n_attr_changed);
62632
62633 op = kzalloc(sizeof(*op), GFP_KERNEL);
62634 if (!op) {
62635- fscache_stat(&fscache_n_attr_changed_nomem);
62636+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
62637 _leave(" = -ENOMEM");
62638 return -ENOMEM;
62639 }
62640@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62641 if (fscache_submit_exclusive_op(object, op) < 0)
62642 goto nobufs_dec;
62643 spin_unlock(&cookie->lock);
62644- fscache_stat(&fscache_n_attr_changed_ok);
62645+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
62646 fscache_put_operation(op);
62647 _leave(" = 0");
62648 return 0;
62649@@ -242,7 +242,7 @@ nobufs:
62650 kfree(op);
62651 if (wake_cookie)
62652 __fscache_wake_unused_cookie(cookie);
62653- fscache_stat(&fscache_n_attr_changed_nobufs);
62654+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
62655 _leave(" = %d", -ENOBUFS);
62656 return -ENOBUFS;
62657 }
62658@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
62659 /* allocate a retrieval operation and attempt to submit it */
62660 op = kzalloc(sizeof(*op), GFP_NOIO);
62661 if (!op) {
62662- fscache_stat(&fscache_n_retrievals_nomem);
62663+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62664 return NULL;
62665 }
62666
62667@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
62668 return 0;
62669 }
62670
62671- fscache_stat(&fscache_n_retrievals_wait);
62672+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
62673
62674 jif = jiffies;
62675 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
62676 TASK_INTERRUPTIBLE) != 0) {
62677- fscache_stat(&fscache_n_retrievals_intr);
62678+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62679 _leave(" = -ERESTARTSYS");
62680 return -ERESTARTSYS;
62681 }
62682@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
62683 */
62684 int fscache_wait_for_operation_activation(struct fscache_object *object,
62685 struct fscache_operation *op,
62686- atomic_t *stat_op_waits,
62687- atomic_t *stat_object_dead,
62688+ atomic_unchecked_t *stat_op_waits,
62689+ atomic_unchecked_t *stat_object_dead,
62690 void (*do_cancel)(struct fscache_operation *))
62691 {
62692 int ret;
62693@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62694
62695 _debug(">>> WT");
62696 if (stat_op_waits)
62697- fscache_stat(stat_op_waits);
62698+ fscache_stat_unchecked(stat_op_waits);
62699 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
62700 TASK_INTERRUPTIBLE) != 0) {
62701 ret = fscache_cancel_op(op, do_cancel);
62702@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62703 check_if_dead:
62704 if (op->state == FSCACHE_OP_ST_CANCELLED) {
62705 if (stat_object_dead)
62706- fscache_stat(stat_object_dead);
62707+ fscache_stat_unchecked(stat_object_dead);
62708 _leave(" = -ENOBUFS [cancelled]");
62709 return -ENOBUFS;
62710 }
62711@@ -381,7 +381,7 @@ check_if_dead:
62712 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
62713 fscache_cancel_op(op, do_cancel);
62714 if (stat_object_dead)
62715- fscache_stat(stat_object_dead);
62716+ fscache_stat_unchecked(stat_object_dead);
62717 return -ENOBUFS;
62718 }
62719 return 0;
62720@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62721
62722 _enter("%p,%p,,,", cookie, page);
62723
62724- fscache_stat(&fscache_n_retrievals);
62725+ fscache_stat_unchecked(&fscache_n_retrievals);
62726
62727 if (hlist_empty(&cookie->backing_objects))
62728 goto nobufs;
62729@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62730 goto nobufs_unlock_dec;
62731 spin_unlock(&cookie->lock);
62732
62733- fscache_stat(&fscache_n_retrieval_ops);
62734+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62735
62736 /* pin the netfs read context in case we need to do the actual netfs
62737 * read because we've encountered a cache read failure */
62738@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62739
62740 error:
62741 if (ret == -ENOMEM)
62742- fscache_stat(&fscache_n_retrievals_nomem);
62743+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62744 else if (ret == -ERESTARTSYS)
62745- fscache_stat(&fscache_n_retrievals_intr);
62746+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62747 else if (ret == -ENODATA)
62748- fscache_stat(&fscache_n_retrievals_nodata);
62749+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62750 else if (ret < 0)
62751- fscache_stat(&fscache_n_retrievals_nobufs);
62752+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62753 else
62754- fscache_stat(&fscache_n_retrievals_ok);
62755+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62756
62757 fscache_put_retrieval(op);
62758 _leave(" = %d", ret);
62759@@ -505,7 +505,7 @@ nobufs_unlock:
62760 __fscache_wake_unused_cookie(cookie);
62761 kfree(op);
62762 nobufs:
62763- fscache_stat(&fscache_n_retrievals_nobufs);
62764+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62765 _leave(" = -ENOBUFS");
62766 return -ENOBUFS;
62767 }
62768@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62769
62770 _enter("%p,,%d,,,", cookie, *nr_pages);
62771
62772- fscache_stat(&fscache_n_retrievals);
62773+ fscache_stat_unchecked(&fscache_n_retrievals);
62774
62775 if (hlist_empty(&cookie->backing_objects))
62776 goto nobufs;
62777@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62778 goto nobufs_unlock_dec;
62779 spin_unlock(&cookie->lock);
62780
62781- fscache_stat(&fscache_n_retrieval_ops);
62782+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62783
62784 /* pin the netfs read context in case we need to do the actual netfs
62785 * read because we've encountered a cache read failure */
62786@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62787
62788 error:
62789 if (ret == -ENOMEM)
62790- fscache_stat(&fscache_n_retrievals_nomem);
62791+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62792 else if (ret == -ERESTARTSYS)
62793- fscache_stat(&fscache_n_retrievals_intr);
62794+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62795 else if (ret == -ENODATA)
62796- fscache_stat(&fscache_n_retrievals_nodata);
62797+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62798 else if (ret < 0)
62799- fscache_stat(&fscache_n_retrievals_nobufs);
62800+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62801 else
62802- fscache_stat(&fscache_n_retrievals_ok);
62803+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62804
62805 fscache_put_retrieval(op);
62806 _leave(" = %d", ret);
62807@@ -636,7 +636,7 @@ nobufs_unlock:
62808 if (wake_cookie)
62809 __fscache_wake_unused_cookie(cookie);
62810 nobufs:
62811- fscache_stat(&fscache_n_retrievals_nobufs);
62812+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62813 _leave(" = -ENOBUFS");
62814 return -ENOBUFS;
62815 }
62816@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62817
62818 _enter("%p,%p,,,", cookie, page);
62819
62820- fscache_stat(&fscache_n_allocs);
62821+ fscache_stat_unchecked(&fscache_n_allocs);
62822
62823 if (hlist_empty(&cookie->backing_objects))
62824 goto nobufs;
62825@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62826 goto nobufs_unlock_dec;
62827 spin_unlock(&cookie->lock);
62828
62829- fscache_stat(&fscache_n_alloc_ops);
62830+ fscache_stat_unchecked(&fscache_n_alloc_ops);
62831
62832 ret = fscache_wait_for_operation_activation(
62833 object, &op->op,
62834@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62835
62836 error:
62837 if (ret == -ERESTARTSYS)
62838- fscache_stat(&fscache_n_allocs_intr);
62839+ fscache_stat_unchecked(&fscache_n_allocs_intr);
62840 else if (ret < 0)
62841- fscache_stat(&fscache_n_allocs_nobufs);
62842+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62843 else
62844- fscache_stat(&fscache_n_allocs_ok);
62845+ fscache_stat_unchecked(&fscache_n_allocs_ok);
62846
62847 fscache_put_retrieval(op);
62848 _leave(" = %d", ret);
62849@@ -730,7 +730,7 @@ nobufs_unlock:
62850 if (wake_cookie)
62851 __fscache_wake_unused_cookie(cookie);
62852 nobufs:
62853- fscache_stat(&fscache_n_allocs_nobufs);
62854+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62855 _leave(" = -ENOBUFS");
62856 return -ENOBUFS;
62857 }
62858@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62859
62860 spin_lock(&cookie->stores_lock);
62861
62862- fscache_stat(&fscache_n_store_calls);
62863+ fscache_stat_unchecked(&fscache_n_store_calls);
62864
62865 /* find a page to store */
62866 page = NULL;
62867@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62868 page = results[0];
62869 _debug("gang %d [%lx]", n, page->index);
62870 if (page->index > op->store_limit) {
62871- fscache_stat(&fscache_n_store_pages_over_limit);
62872+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
62873 goto superseded;
62874 }
62875
62876@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62877 spin_unlock(&cookie->stores_lock);
62878 spin_unlock(&object->lock);
62879
62880- fscache_stat(&fscache_n_store_pages);
62881+ fscache_stat_unchecked(&fscache_n_store_pages);
62882 fscache_stat(&fscache_n_cop_write_page);
62883 ret = object->cache->ops->write_page(op, page);
62884 fscache_stat_d(&fscache_n_cop_write_page);
62885@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62886 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62887 ASSERT(PageFsCache(page));
62888
62889- fscache_stat(&fscache_n_stores);
62890+ fscache_stat_unchecked(&fscache_n_stores);
62891
62892 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
62893 _leave(" = -ENOBUFS [invalidating]");
62894@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62895 spin_unlock(&cookie->stores_lock);
62896 spin_unlock(&object->lock);
62897
62898- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
62899+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62900 op->store_limit = object->store_limit;
62901
62902 __fscache_use_cookie(cookie);
62903@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62904
62905 spin_unlock(&cookie->lock);
62906 radix_tree_preload_end();
62907- fscache_stat(&fscache_n_store_ops);
62908- fscache_stat(&fscache_n_stores_ok);
62909+ fscache_stat_unchecked(&fscache_n_store_ops);
62910+ fscache_stat_unchecked(&fscache_n_stores_ok);
62911
62912 /* the work queue now carries its own ref on the object */
62913 fscache_put_operation(&op->op);
62914@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62915 return 0;
62916
62917 already_queued:
62918- fscache_stat(&fscache_n_stores_again);
62919+ fscache_stat_unchecked(&fscache_n_stores_again);
62920 already_pending:
62921 spin_unlock(&cookie->stores_lock);
62922 spin_unlock(&object->lock);
62923 spin_unlock(&cookie->lock);
62924 radix_tree_preload_end();
62925 kfree(op);
62926- fscache_stat(&fscache_n_stores_ok);
62927+ fscache_stat_unchecked(&fscache_n_stores_ok);
62928 _leave(" = 0");
62929 return 0;
62930
62931@@ -1039,14 +1039,14 @@ nobufs:
62932 kfree(op);
62933 if (wake_cookie)
62934 __fscache_wake_unused_cookie(cookie);
62935- fscache_stat(&fscache_n_stores_nobufs);
62936+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
62937 _leave(" = -ENOBUFS");
62938 return -ENOBUFS;
62939
62940 nomem_free:
62941 kfree(op);
62942 nomem:
62943- fscache_stat(&fscache_n_stores_oom);
62944+ fscache_stat_unchecked(&fscache_n_stores_oom);
62945 _leave(" = -ENOMEM");
62946 return -ENOMEM;
62947 }
62948@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
62949 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62950 ASSERTCMP(page, !=, NULL);
62951
62952- fscache_stat(&fscache_n_uncaches);
62953+ fscache_stat_unchecked(&fscache_n_uncaches);
62954
62955 /* cache withdrawal may beat us to it */
62956 if (!PageFsCache(page))
62957@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
62958 struct fscache_cookie *cookie = op->op.object->cookie;
62959
62960 #ifdef CONFIG_FSCACHE_STATS
62961- atomic_inc(&fscache_n_marks);
62962+ atomic_inc_unchecked(&fscache_n_marks);
62963 #endif
62964
62965 _debug("- mark %p{%lx}", page, page->index);
62966diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
62967index 40d13c7..ddf52b9 100644
62968--- a/fs/fscache/stats.c
62969+++ b/fs/fscache/stats.c
62970@@ -18,99 +18,99 @@
62971 /*
62972 * operation counters
62973 */
62974-atomic_t fscache_n_op_pend;
62975-atomic_t fscache_n_op_run;
62976-atomic_t fscache_n_op_enqueue;
62977-atomic_t fscache_n_op_requeue;
62978-atomic_t fscache_n_op_deferred_release;
62979-atomic_t fscache_n_op_release;
62980-atomic_t fscache_n_op_gc;
62981-atomic_t fscache_n_op_cancelled;
62982-atomic_t fscache_n_op_rejected;
62983+atomic_unchecked_t fscache_n_op_pend;
62984+atomic_unchecked_t fscache_n_op_run;
62985+atomic_unchecked_t fscache_n_op_enqueue;
62986+atomic_unchecked_t fscache_n_op_requeue;
62987+atomic_unchecked_t fscache_n_op_deferred_release;
62988+atomic_unchecked_t fscache_n_op_release;
62989+atomic_unchecked_t fscache_n_op_gc;
62990+atomic_unchecked_t fscache_n_op_cancelled;
62991+atomic_unchecked_t fscache_n_op_rejected;
62992
62993-atomic_t fscache_n_attr_changed;
62994-atomic_t fscache_n_attr_changed_ok;
62995-atomic_t fscache_n_attr_changed_nobufs;
62996-atomic_t fscache_n_attr_changed_nomem;
62997-atomic_t fscache_n_attr_changed_calls;
62998+atomic_unchecked_t fscache_n_attr_changed;
62999+atomic_unchecked_t fscache_n_attr_changed_ok;
63000+atomic_unchecked_t fscache_n_attr_changed_nobufs;
63001+atomic_unchecked_t fscache_n_attr_changed_nomem;
63002+atomic_unchecked_t fscache_n_attr_changed_calls;
63003
63004-atomic_t fscache_n_allocs;
63005-atomic_t fscache_n_allocs_ok;
63006-atomic_t fscache_n_allocs_wait;
63007-atomic_t fscache_n_allocs_nobufs;
63008-atomic_t fscache_n_allocs_intr;
63009-atomic_t fscache_n_allocs_object_dead;
63010-atomic_t fscache_n_alloc_ops;
63011-atomic_t fscache_n_alloc_op_waits;
63012+atomic_unchecked_t fscache_n_allocs;
63013+atomic_unchecked_t fscache_n_allocs_ok;
63014+atomic_unchecked_t fscache_n_allocs_wait;
63015+atomic_unchecked_t fscache_n_allocs_nobufs;
63016+atomic_unchecked_t fscache_n_allocs_intr;
63017+atomic_unchecked_t fscache_n_allocs_object_dead;
63018+atomic_unchecked_t fscache_n_alloc_ops;
63019+atomic_unchecked_t fscache_n_alloc_op_waits;
63020
63021-atomic_t fscache_n_retrievals;
63022-atomic_t fscache_n_retrievals_ok;
63023-atomic_t fscache_n_retrievals_wait;
63024-atomic_t fscache_n_retrievals_nodata;
63025-atomic_t fscache_n_retrievals_nobufs;
63026-atomic_t fscache_n_retrievals_intr;
63027-atomic_t fscache_n_retrievals_nomem;
63028-atomic_t fscache_n_retrievals_object_dead;
63029-atomic_t fscache_n_retrieval_ops;
63030-atomic_t fscache_n_retrieval_op_waits;
63031+atomic_unchecked_t fscache_n_retrievals;
63032+atomic_unchecked_t fscache_n_retrievals_ok;
63033+atomic_unchecked_t fscache_n_retrievals_wait;
63034+atomic_unchecked_t fscache_n_retrievals_nodata;
63035+atomic_unchecked_t fscache_n_retrievals_nobufs;
63036+atomic_unchecked_t fscache_n_retrievals_intr;
63037+atomic_unchecked_t fscache_n_retrievals_nomem;
63038+atomic_unchecked_t fscache_n_retrievals_object_dead;
63039+atomic_unchecked_t fscache_n_retrieval_ops;
63040+atomic_unchecked_t fscache_n_retrieval_op_waits;
63041
63042-atomic_t fscache_n_stores;
63043-atomic_t fscache_n_stores_ok;
63044-atomic_t fscache_n_stores_again;
63045-atomic_t fscache_n_stores_nobufs;
63046-atomic_t fscache_n_stores_oom;
63047-atomic_t fscache_n_store_ops;
63048-atomic_t fscache_n_store_calls;
63049-atomic_t fscache_n_store_pages;
63050-atomic_t fscache_n_store_radix_deletes;
63051-atomic_t fscache_n_store_pages_over_limit;
63052+atomic_unchecked_t fscache_n_stores;
63053+atomic_unchecked_t fscache_n_stores_ok;
63054+atomic_unchecked_t fscache_n_stores_again;
63055+atomic_unchecked_t fscache_n_stores_nobufs;
63056+atomic_unchecked_t fscache_n_stores_oom;
63057+atomic_unchecked_t fscache_n_store_ops;
63058+atomic_unchecked_t fscache_n_store_calls;
63059+atomic_unchecked_t fscache_n_store_pages;
63060+atomic_unchecked_t fscache_n_store_radix_deletes;
63061+atomic_unchecked_t fscache_n_store_pages_over_limit;
63062
63063-atomic_t fscache_n_store_vmscan_not_storing;
63064-atomic_t fscache_n_store_vmscan_gone;
63065-atomic_t fscache_n_store_vmscan_busy;
63066-atomic_t fscache_n_store_vmscan_cancelled;
63067-atomic_t fscache_n_store_vmscan_wait;
63068+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63069+atomic_unchecked_t fscache_n_store_vmscan_gone;
63070+atomic_unchecked_t fscache_n_store_vmscan_busy;
63071+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63072+atomic_unchecked_t fscache_n_store_vmscan_wait;
63073
63074-atomic_t fscache_n_marks;
63075-atomic_t fscache_n_uncaches;
63076+atomic_unchecked_t fscache_n_marks;
63077+atomic_unchecked_t fscache_n_uncaches;
63078
63079-atomic_t fscache_n_acquires;
63080-atomic_t fscache_n_acquires_null;
63081-atomic_t fscache_n_acquires_no_cache;
63082-atomic_t fscache_n_acquires_ok;
63083-atomic_t fscache_n_acquires_nobufs;
63084-atomic_t fscache_n_acquires_oom;
63085+atomic_unchecked_t fscache_n_acquires;
63086+atomic_unchecked_t fscache_n_acquires_null;
63087+atomic_unchecked_t fscache_n_acquires_no_cache;
63088+atomic_unchecked_t fscache_n_acquires_ok;
63089+atomic_unchecked_t fscache_n_acquires_nobufs;
63090+atomic_unchecked_t fscache_n_acquires_oom;
63091
63092-atomic_t fscache_n_invalidates;
63093-atomic_t fscache_n_invalidates_run;
63094+atomic_unchecked_t fscache_n_invalidates;
63095+atomic_unchecked_t fscache_n_invalidates_run;
63096
63097-atomic_t fscache_n_updates;
63098-atomic_t fscache_n_updates_null;
63099-atomic_t fscache_n_updates_run;
63100+atomic_unchecked_t fscache_n_updates;
63101+atomic_unchecked_t fscache_n_updates_null;
63102+atomic_unchecked_t fscache_n_updates_run;
63103
63104-atomic_t fscache_n_relinquishes;
63105-atomic_t fscache_n_relinquishes_null;
63106-atomic_t fscache_n_relinquishes_waitcrt;
63107-atomic_t fscache_n_relinquishes_retire;
63108+atomic_unchecked_t fscache_n_relinquishes;
63109+atomic_unchecked_t fscache_n_relinquishes_null;
63110+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63111+atomic_unchecked_t fscache_n_relinquishes_retire;
63112
63113-atomic_t fscache_n_cookie_index;
63114-atomic_t fscache_n_cookie_data;
63115-atomic_t fscache_n_cookie_special;
63116+atomic_unchecked_t fscache_n_cookie_index;
63117+atomic_unchecked_t fscache_n_cookie_data;
63118+atomic_unchecked_t fscache_n_cookie_special;
63119
63120-atomic_t fscache_n_object_alloc;
63121-atomic_t fscache_n_object_no_alloc;
63122-atomic_t fscache_n_object_lookups;
63123-atomic_t fscache_n_object_lookups_negative;
63124-atomic_t fscache_n_object_lookups_positive;
63125-atomic_t fscache_n_object_lookups_timed_out;
63126-atomic_t fscache_n_object_created;
63127-atomic_t fscache_n_object_avail;
63128-atomic_t fscache_n_object_dead;
63129+atomic_unchecked_t fscache_n_object_alloc;
63130+atomic_unchecked_t fscache_n_object_no_alloc;
63131+atomic_unchecked_t fscache_n_object_lookups;
63132+atomic_unchecked_t fscache_n_object_lookups_negative;
63133+atomic_unchecked_t fscache_n_object_lookups_positive;
63134+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63135+atomic_unchecked_t fscache_n_object_created;
63136+atomic_unchecked_t fscache_n_object_avail;
63137+atomic_unchecked_t fscache_n_object_dead;
63138
63139-atomic_t fscache_n_checkaux_none;
63140-atomic_t fscache_n_checkaux_okay;
63141-atomic_t fscache_n_checkaux_update;
63142-atomic_t fscache_n_checkaux_obsolete;
63143+atomic_unchecked_t fscache_n_checkaux_none;
63144+atomic_unchecked_t fscache_n_checkaux_okay;
63145+atomic_unchecked_t fscache_n_checkaux_update;
63146+atomic_unchecked_t fscache_n_checkaux_obsolete;
63147
63148 atomic_t fscache_n_cop_alloc_object;
63149 atomic_t fscache_n_cop_lookup_object;
63150@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63151 seq_puts(m, "FS-Cache statistics\n");
63152
63153 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63154- atomic_read(&fscache_n_cookie_index),
63155- atomic_read(&fscache_n_cookie_data),
63156- atomic_read(&fscache_n_cookie_special));
63157+ atomic_read_unchecked(&fscache_n_cookie_index),
63158+ atomic_read_unchecked(&fscache_n_cookie_data),
63159+ atomic_read_unchecked(&fscache_n_cookie_special));
63160
63161 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63162- atomic_read(&fscache_n_object_alloc),
63163- atomic_read(&fscache_n_object_no_alloc),
63164- atomic_read(&fscache_n_object_avail),
63165- atomic_read(&fscache_n_object_dead));
63166+ atomic_read_unchecked(&fscache_n_object_alloc),
63167+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63168+ atomic_read_unchecked(&fscache_n_object_avail),
63169+ atomic_read_unchecked(&fscache_n_object_dead));
63170 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63171- atomic_read(&fscache_n_checkaux_none),
63172- atomic_read(&fscache_n_checkaux_okay),
63173- atomic_read(&fscache_n_checkaux_update),
63174- atomic_read(&fscache_n_checkaux_obsolete));
63175+ atomic_read_unchecked(&fscache_n_checkaux_none),
63176+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63177+ atomic_read_unchecked(&fscache_n_checkaux_update),
63178+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63179
63180 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63181- atomic_read(&fscache_n_marks),
63182- atomic_read(&fscache_n_uncaches));
63183+ atomic_read_unchecked(&fscache_n_marks),
63184+ atomic_read_unchecked(&fscache_n_uncaches));
63185
63186 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63187 " oom=%u\n",
63188- atomic_read(&fscache_n_acquires),
63189- atomic_read(&fscache_n_acquires_null),
63190- atomic_read(&fscache_n_acquires_no_cache),
63191- atomic_read(&fscache_n_acquires_ok),
63192- atomic_read(&fscache_n_acquires_nobufs),
63193- atomic_read(&fscache_n_acquires_oom));
63194+ atomic_read_unchecked(&fscache_n_acquires),
63195+ atomic_read_unchecked(&fscache_n_acquires_null),
63196+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63197+ atomic_read_unchecked(&fscache_n_acquires_ok),
63198+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63199+ atomic_read_unchecked(&fscache_n_acquires_oom));
63200
63201 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63202- atomic_read(&fscache_n_object_lookups),
63203- atomic_read(&fscache_n_object_lookups_negative),
63204- atomic_read(&fscache_n_object_lookups_positive),
63205- atomic_read(&fscache_n_object_created),
63206- atomic_read(&fscache_n_object_lookups_timed_out));
63207+ atomic_read_unchecked(&fscache_n_object_lookups),
63208+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63209+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63210+ atomic_read_unchecked(&fscache_n_object_created),
63211+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63212
63213 seq_printf(m, "Invals : n=%u run=%u\n",
63214- atomic_read(&fscache_n_invalidates),
63215- atomic_read(&fscache_n_invalidates_run));
63216+ atomic_read_unchecked(&fscache_n_invalidates),
63217+ atomic_read_unchecked(&fscache_n_invalidates_run));
63218
63219 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63220- atomic_read(&fscache_n_updates),
63221- atomic_read(&fscache_n_updates_null),
63222- atomic_read(&fscache_n_updates_run));
63223+ atomic_read_unchecked(&fscache_n_updates),
63224+ atomic_read_unchecked(&fscache_n_updates_null),
63225+ atomic_read_unchecked(&fscache_n_updates_run));
63226
63227 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63228- atomic_read(&fscache_n_relinquishes),
63229- atomic_read(&fscache_n_relinquishes_null),
63230- atomic_read(&fscache_n_relinquishes_waitcrt),
63231- atomic_read(&fscache_n_relinquishes_retire));
63232+ atomic_read_unchecked(&fscache_n_relinquishes),
63233+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63234+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63235+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63236
63237 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63238- atomic_read(&fscache_n_attr_changed),
63239- atomic_read(&fscache_n_attr_changed_ok),
63240- atomic_read(&fscache_n_attr_changed_nobufs),
63241- atomic_read(&fscache_n_attr_changed_nomem),
63242- atomic_read(&fscache_n_attr_changed_calls));
63243+ atomic_read_unchecked(&fscache_n_attr_changed),
63244+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63245+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63246+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63247+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63248
63249 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63250- atomic_read(&fscache_n_allocs),
63251- atomic_read(&fscache_n_allocs_ok),
63252- atomic_read(&fscache_n_allocs_wait),
63253- atomic_read(&fscache_n_allocs_nobufs),
63254- atomic_read(&fscache_n_allocs_intr));
63255+ atomic_read_unchecked(&fscache_n_allocs),
63256+ atomic_read_unchecked(&fscache_n_allocs_ok),
63257+ atomic_read_unchecked(&fscache_n_allocs_wait),
63258+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63259+ atomic_read_unchecked(&fscache_n_allocs_intr));
63260 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63261- atomic_read(&fscache_n_alloc_ops),
63262- atomic_read(&fscache_n_alloc_op_waits),
63263- atomic_read(&fscache_n_allocs_object_dead));
63264+ atomic_read_unchecked(&fscache_n_alloc_ops),
63265+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63266+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63267
63268 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63269 " int=%u oom=%u\n",
63270- atomic_read(&fscache_n_retrievals),
63271- atomic_read(&fscache_n_retrievals_ok),
63272- atomic_read(&fscache_n_retrievals_wait),
63273- atomic_read(&fscache_n_retrievals_nodata),
63274- atomic_read(&fscache_n_retrievals_nobufs),
63275- atomic_read(&fscache_n_retrievals_intr),
63276- atomic_read(&fscache_n_retrievals_nomem));
63277+ atomic_read_unchecked(&fscache_n_retrievals),
63278+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63279+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63280+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63281+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63282+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63283+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63284 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63285- atomic_read(&fscache_n_retrieval_ops),
63286- atomic_read(&fscache_n_retrieval_op_waits),
63287- atomic_read(&fscache_n_retrievals_object_dead));
63288+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63289+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63290+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63291
63292 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63293- atomic_read(&fscache_n_stores),
63294- atomic_read(&fscache_n_stores_ok),
63295- atomic_read(&fscache_n_stores_again),
63296- atomic_read(&fscache_n_stores_nobufs),
63297- atomic_read(&fscache_n_stores_oom));
63298+ atomic_read_unchecked(&fscache_n_stores),
63299+ atomic_read_unchecked(&fscache_n_stores_ok),
63300+ atomic_read_unchecked(&fscache_n_stores_again),
63301+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63302+ atomic_read_unchecked(&fscache_n_stores_oom));
63303 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63304- atomic_read(&fscache_n_store_ops),
63305- atomic_read(&fscache_n_store_calls),
63306- atomic_read(&fscache_n_store_pages),
63307- atomic_read(&fscache_n_store_radix_deletes),
63308- atomic_read(&fscache_n_store_pages_over_limit));
63309+ atomic_read_unchecked(&fscache_n_store_ops),
63310+ atomic_read_unchecked(&fscache_n_store_calls),
63311+ atomic_read_unchecked(&fscache_n_store_pages),
63312+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63313+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63314
63315 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63316- atomic_read(&fscache_n_store_vmscan_not_storing),
63317- atomic_read(&fscache_n_store_vmscan_gone),
63318- atomic_read(&fscache_n_store_vmscan_busy),
63319- atomic_read(&fscache_n_store_vmscan_cancelled),
63320- atomic_read(&fscache_n_store_vmscan_wait));
63321+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63322+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63323+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63324+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63325+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63326
63327 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63328- atomic_read(&fscache_n_op_pend),
63329- atomic_read(&fscache_n_op_run),
63330- atomic_read(&fscache_n_op_enqueue),
63331- atomic_read(&fscache_n_op_cancelled),
63332- atomic_read(&fscache_n_op_rejected));
63333+ atomic_read_unchecked(&fscache_n_op_pend),
63334+ atomic_read_unchecked(&fscache_n_op_run),
63335+ atomic_read_unchecked(&fscache_n_op_enqueue),
63336+ atomic_read_unchecked(&fscache_n_op_cancelled),
63337+ atomic_read_unchecked(&fscache_n_op_rejected));
63338 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63339- atomic_read(&fscache_n_op_deferred_release),
63340- atomic_read(&fscache_n_op_release),
63341- atomic_read(&fscache_n_op_gc));
63342+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63343+ atomic_read_unchecked(&fscache_n_op_release),
63344+ atomic_read_unchecked(&fscache_n_op_gc));
63345
63346 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63347 atomic_read(&fscache_n_cop_alloc_object),
63348diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63349index 28d0c7a..04816b7 100644
63350--- a/fs/fuse/cuse.c
63351+++ b/fs/fuse/cuse.c
63352@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63353 INIT_LIST_HEAD(&cuse_conntbl[i]);
63354
63355 /* inherit and extend fuse_dev_operations */
63356- cuse_channel_fops = fuse_dev_operations;
63357- cuse_channel_fops.owner = THIS_MODULE;
63358- cuse_channel_fops.open = cuse_channel_open;
63359- cuse_channel_fops.release = cuse_channel_release;
63360+ pax_open_kernel();
63361+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63362+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63363+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63364+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63365+ pax_close_kernel();
63366
63367 cuse_class = class_create(THIS_MODULE, "cuse");
63368 if (IS_ERR(cuse_class))
63369diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63370index ed19a7d..91e9a4c 100644
63371--- a/fs/fuse/dev.c
63372+++ b/fs/fuse/dev.c
63373@@ -1394,7 +1394,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63374 ret = 0;
63375 pipe_lock(pipe);
63376
63377- if (!pipe->readers) {
63378+ if (!atomic_read(&pipe->readers)) {
63379 send_sig(SIGPIPE, current, 0);
63380 if (!ret)
63381 ret = -EPIPE;
63382@@ -1423,7 +1423,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63383 page_nr++;
63384 ret += buf->len;
63385
63386- if (pipe->files)
63387+ if (atomic_read(&pipe->files))
63388 do_wakeup = 1;
63389 }
63390
63391diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63392index 08e7b1a..d91c6ee 100644
63393--- a/fs/fuse/dir.c
63394+++ b/fs/fuse/dir.c
63395@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
63396 return link;
63397 }
63398
63399-static void free_link(char *link)
63400+static void free_link(const char *link)
63401 {
63402 if (!IS_ERR(link))
63403 free_page((unsigned long) link);
63404diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
63405index fd62cae..3494dfa 100644
63406--- a/fs/hostfs/hostfs_kern.c
63407+++ b/fs/hostfs/hostfs_kern.c
63408@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63409
63410 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
63411 {
63412- char *s = nd_get_link(nd);
63413+ const char *s = nd_get_link(nd);
63414 if (!IS_ERR(s))
63415 __putname(s);
63416 }
63417diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
63418index 5eba47f..d353c22 100644
63419--- a/fs/hugetlbfs/inode.c
63420+++ b/fs/hugetlbfs/inode.c
63421@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63422 struct mm_struct *mm = current->mm;
63423 struct vm_area_struct *vma;
63424 struct hstate *h = hstate_file(file);
63425+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
63426 struct vm_unmapped_area_info info;
63427
63428 if (len & ~huge_page_mask(h))
63429@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63430 return addr;
63431 }
63432
63433+#ifdef CONFIG_PAX_RANDMMAP
63434+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
63435+#endif
63436+
63437 if (addr) {
63438 addr = ALIGN(addr, huge_page_size(h));
63439 vma = find_vma(mm, addr);
63440- if (TASK_SIZE - len >= addr &&
63441- (!vma || addr + len <= vma->vm_start))
63442+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
63443 return addr;
63444 }
63445
63446 info.flags = 0;
63447 info.length = len;
63448 info.low_limit = TASK_UNMAPPED_BASE;
63449+
63450+#ifdef CONFIG_PAX_RANDMMAP
63451+ if (mm->pax_flags & MF_PAX_RANDMMAP)
63452+ info.low_limit += mm->delta_mmap;
63453+#endif
63454+
63455 info.high_limit = TASK_SIZE;
63456 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
63457 info.align_offset = 0;
63458@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
63459 };
63460 MODULE_ALIAS_FS("hugetlbfs");
63461
63462-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63463+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63464
63465 static int can_do_hugetlb_shm(void)
63466 {
63467diff --git a/fs/inode.c b/fs/inode.c
63468index aa149e7..46f1f65 100644
63469--- a/fs/inode.c
63470+++ b/fs/inode.c
63471@@ -842,16 +842,20 @@ unsigned int get_next_ino(void)
63472 unsigned int *p = &get_cpu_var(last_ino);
63473 unsigned int res = *p;
63474
63475+start:
63476+
63477 #ifdef CONFIG_SMP
63478 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
63479- static atomic_t shared_last_ino;
63480- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
63481+ static atomic_unchecked_t shared_last_ino;
63482+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
63483
63484 res = next - LAST_INO_BATCH;
63485 }
63486 #endif
63487
63488- *p = ++res;
63489+ if (unlikely(!++res))
63490+ goto start; /* never zero */
63491+ *p = res;
63492 put_cpu_var(last_ino);
63493 return res;
63494 }
63495diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
63496index 4a6cf28..d3a29d3 100644
63497--- a/fs/jffs2/erase.c
63498+++ b/fs/jffs2/erase.c
63499@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
63500 struct jffs2_unknown_node marker = {
63501 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
63502 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63503- .totlen = cpu_to_je32(c->cleanmarker_size)
63504+ .totlen = cpu_to_je32(c->cleanmarker_size),
63505+ .hdr_crc = cpu_to_je32(0)
63506 };
63507
63508 jffs2_prealloc_raw_node_refs(c, jeb, 1);
63509diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
63510index 09ed551..45684f8 100644
63511--- a/fs/jffs2/wbuf.c
63512+++ b/fs/jffs2/wbuf.c
63513@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
63514 {
63515 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
63516 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63517- .totlen = constant_cpu_to_je32(8)
63518+ .totlen = constant_cpu_to_je32(8),
63519+ .hdr_crc = constant_cpu_to_je32(0)
63520 };
63521
63522 /*
63523diff --git a/fs/jfs/super.c b/fs/jfs/super.c
63524index 16c3a95..e9cb75d 100644
63525--- a/fs/jfs/super.c
63526+++ b/fs/jfs/super.c
63527@@ -902,7 +902,7 @@ static int __init init_jfs_fs(void)
63528
63529 jfs_inode_cachep =
63530 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
63531- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
63532+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
63533 init_once);
63534 if (jfs_inode_cachep == NULL)
63535 return -ENOMEM;
63536diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
63537index 2d881b3..fe1ac77 100644
63538--- a/fs/kernfs/dir.c
63539+++ b/fs/kernfs/dir.c
63540@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
63541 *
63542 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63543 */
63544-static unsigned int kernfs_name_hash(const char *name, const void *ns)
63545+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
63546 {
63547 unsigned long hash = init_name_hash();
63548 unsigned int len = strlen(name);
63549@@ -833,6 +833,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
63550 ret = scops->mkdir(parent, dentry->d_name.name, mode);
63551
63552 kernfs_put_active(parent);
63553+
63554+ if (!ret) {
63555+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
63556+ ret = PTR_ERR_OR_ZERO(dentry_ret);
63557+ }
63558+
63559 return ret;
63560 }
63561
63562diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
63563index ddc9f96..4e450ad 100644
63564--- a/fs/kernfs/file.c
63565+++ b/fs/kernfs/file.c
63566@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
63567
63568 struct kernfs_open_node {
63569 atomic_t refcnt;
63570- atomic_t event;
63571+ atomic_unchecked_t event;
63572 wait_queue_head_t poll;
63573 struct list_head files; /* goes through kernfs_open_file.list */
63574 };
63575@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
63576 {
63577 struct kernfs_open_file *of = sf->private;
63578
63579- of->event = atomic_read(&of->kn->attr.open->event);
63580+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
63581
63582 return of->kn->attr.ops->seq_show(sf, v);
63583 }
63584@@ -271,7 +271,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
63585 {
63586 struct kernfs_open_file *of = kernfs_of(file);
63587 const struct kernfs_ops *ops;
63588- size_t len;
63589+ ssize_t len;
63590 char *buf;
63591
63592 if (of->atomic_write_len) {
63593@@ -384,12 +384,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
63594 return ret;
63595 }
63596
63597-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63598- void *buf, int len, int write)
63599+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63600+ void *buf, size_t len, int write)
63601 {
63602 struct file *file = vma->vm_file;
63603 struct kernfs_open_file *of = kernfs_of(file);
63604- int ret;
63605+ ssize_t ret;
63606
63607 if (!of->vm_ops)
63608 return -EINVAL;
63609@@ -568,7 +568,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
63610 return -ENOMEM;
63611
63612 atomic_set(&new_on->refcnt, 0);
63613- atomic_set(&new_on->event, 1);
63614+ atomic_set_unchecked(&new_on->event, 1);
63615 init_waitqueue_head(&new_on->poll);
63616 INIT_LIST_HEAD(&new_on->files);
63617 goto retry;
63618@@ -792,7 +792,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
63619
63620 kernfs_put_active(kn);
63621
63622- if (of->event != atomic_read(&on->event))
63623+ if (of->event != atomic_read_unchecked(&on->event))
63624 goto trigger;
63625
63626 return DEFAULT_POLLMASK;
63627@@ -823,7 +823,7 @@ repeat:
63628
63629 on = kn->attr.open;
63630 if (on) {
63631- atomic_inc(&on->event);
63632+ atomic_inc_unchecked(&on->event);
63633 wake_up_interruptible(&on->poll);
63634 }
63635
63636diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
63637index 8a19889..4c3069a 100644
63638--- a/fs/kernfs/symlink.c
63639+++ b/fs/kernfs/symlink.c
63640@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
63641 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
63642 void *cookie)
63643 {
63644- char *page = nd_get_link(nd);
63645+ const char *page = nd_get_link(nd);
63646 if (!IS_ERR(page))
63647 free_page((unsigned long)page);
63648 }
63649diff --git a/fs/libfs.c b/fs/libfs.c
63650index 005843c..06c4191 100644
63651--- a/fs/libfs.c
63652+++ b/fs/libfs.c
63653@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63654
63655 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
63656 struct dentry *next = list_entry(p, struct dentry, d_child);
63657+ char d_name[sizeof(next->d_iname)];
63658+ const unsigned char *name;
63659+
63660 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
63661 if (!simple_positive(next)) {
63662 spin_unlock(&next->d_lock);
63663@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63664
63665 spin_unlock(&next->d_lock);
63666 spin_unlock(&dentry->d_lock);
63667- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
63668+ name = next->d_name.name;
63669+ if (name == next->d_iname) {
63670+ memcpy(d_name, name, next->d_name.len);
63671+ name = d_name;
63672+ }
63673+ if (!dir_emit(ctx, name, next->d_name.len,
63674 next->d_inode->i_ino, dt_type(next->d_inode)))
63675 return 0;
63676 spin_lock(&dentry->d_lock);
63677@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
63678 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
63679 void *cookie)
63680 {
63681- char *s = nd_get_link(nd);
63682+ const char *s = nd_get_link(nd);
63683 if (!IS_ERR(s))
63684 kfree(s);
63685 }
63686diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
63687index acd3947..1f896e2 100644
63688--- a/fs/lockd/clntproc.c
63689+++ b/fs/lockd/clntproc.c
63690@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
63691 /*
63692 * Cookie counter for NLM requests
63693 */
63694-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
63695+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
63696
63697 void nlmclnt_next_cookie(struct nlm_cookie *c)
63698 {
63699- u32 cookie = atomic_inc_return(&nlm_cookie);
63700+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
63701
63702 memcpy(c->data, &cookie, 4);
63703 c->len=4;
63704diff --git a/fs/locks.c b/fs/locks.c
63705index 59e2f90..bd69071 100644
63706--- a/fs/locks.c
63707+++ b/fs/locks.c
63708@@ -2374,7 +2374,7 @@ void locks_remove_file(struct file *filp)
63709 locks_remove_posix(filp, filp);
63710
63711 if (filp->f_op->flock) {
63712- struct file_lock fl = {
63713+ struct file_lock flock = {
63714 .fl_owner = filp,
63715 .fl_pid = current->tgid,
63716 .fl_file = filp,
63717@@ -2382,9 +2382,9 @@ void locks_remove_file(struct file *filp)
63718 .fl_type = F_UNLCK,
63719 .fl_end = OFFSET_MAX,
63720 };
63721- filp->f_op->flock(filp, F_SETLKW, &fl);
63722- if (fl.fl_ops && fl.fl_ops->fl_release_private)
63723- fl.fl_ops->fl_release_private(&fl);
63724+ filp->f_op->flock(filp, F_SETLKW, &flock);
63725+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
63726+ flock.fl_ops->fl_release_private(&flock);
63727 }
63728
63729 spin_lock(&inode->i_lock);
63730diff --git a/fs/mount.h b/fs/mount.h
63731index 0ad6f76..a04c146 100644
63732--- a/fs/mount.h
63733+++ b/fs/mount.h
63734@@ -12,7 +12,7 @@ struct mnt_namespace {
63735 u64 seq; /* Sequence number to prevent loops */
63736 wait_queue_head_t poll;
63737 u64 event;
63738-};
63739+} __randomize_layout;
63740
63741 struct mnt_pcp {
63742 int mnt_count;
63743@@ -63,7 +63,7 @@ struct mount {
63744 int mnt_expiry_mark; /* true if marked for expiry */
63745 struct hlist_head mnt_pins;
63746 struct path mnt_ex_mountpoint;
63747-};
63748+} __randomize_layout;
63749
63750 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
63751
63752diff --git a/fs/namei.c b/fs/namei.c
63753index bc35b02..7ed1f1d 100644
63754--- a/fs/namei.c
63755+++ b/fs/namei.c
63756@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
63757 if (ret != -EACCES)
63758 return ret;
63759
63760+#ifdef CONFIG_GRKERNSEC
63761+ /* we'll block if we have to log due to a denied capability use */
63762+ if (mask & MAY_NOT_BLOCK)
63763+ return -ECHILD;
63764+#endif
63765+
63766 if (S_ISDIR(inode->i_mode)) {
63767 /* DACs are overridable for directories */
63768- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63769- return 0;
63770 if (!(mask & MAY_WRITE))
63771- if (capable_wrt_inode_uidgid(inode,
63772- CAP_DAC_READ_SEARCH))
63773+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63774+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63775 return 0;
63776+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63777+ return 0;
63778 return -EACCES;
63779 }
63780 /*
63781+ * Searching includes executable on directories, else just read.
63782+ */
63783+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63784+ if (mask == MAY_READ)
63785+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63786+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63787+ return 0;
63788+
63789+ /*
63790 * Read/write DACs are always overridable.
63791 * Executable DACs are overridable when there is
63792 * at least one exec bit set.
63793@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
63794 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63795 return 0;
63796
63797- /*
63798- * Searching includes executable on directories, else just read.
63799- */
63800- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63801- if (mask == MAY_READ)
63802- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63803- return 0;
63804-
63805 return -EACCES;
63806 }
63807 EXPORT_SYMBOL(generic_permission);
63808@@ -497,7 +504,7 @@ struct nameidata {
63809 int last_type;
63810 unsigned depth;
63811 struct file *base;
63812- char *saved_names[MAX_NESTED_LINKS + 1];
63813+ const char *saved_names[MAX_NESTED_LINKS + 1];
63814 };
63815
63816 /*
63817@@ -708,13 +715,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
63818 nd->flags |= LOOKUP_JUMPED;
63819 }
63820
63821-void nd_set_link(struct nameidata *nd, char *path)
63822+void nd_set_link(struct nameidata *nd, const char *path)
63823 {
63824 nd->saved_names[nd->depth] = path;
63825 }
63826 EXPORT_SYMBOL(nd_set_link);
63827
63828-char *nd_get_link(struct nameidata *nd)
63829+const char *nd_get_link(const struct nameidata *nd)
63830 {
63831 return nd->saved_names[nd->depth];
63832 }
63833@@ -849,7 +856,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63834 {
63835 struct dentry *dentry = link->dentry;
63836 int error;
63837- char *s;
63838+ const char *s;
63839
63840 BUG_ON(nd->flags & LOOKUP_RCU);
63841
63842@@ -870,6 +877,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63843 if (error)
63844 goto out_put_nd_path;
63845
63846+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
63847+ dentry->d_inode, dentry, nd->path.mnt)) {
63848+ error = -EACCES;
63849+ goto out_put_nd_path;
63850+ }
63851+
63852 nd->last_type = LAST_BIND;
63853 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
63854 error = PTR_ERR(*p);
63855@@ -1633,6 +1646,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
63856 if (res)
63857 break;
63858 res = walk_component(nd, path, LOOKUP_FOLLOW);
63859+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
63860+ res = -EACCES;
63861 put_link(nd, &link, cookie);
63862 } while (res > 0);
63863
63864@@ -1705,7 +1720,7 @@ EXPORT_SYMBOL(full_name_hash);
63865 static inline u64 hash_name(const char *name)
63866 {
63867 unsigned long a, b, adata, bdata, mask, hash, len;
63868- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63869+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63870
63871 hash = a = 0;
63872 len = -sizeof(unsigned long);
63873@@ -2000,6 +2015,8 @@ static int path_lookupat(int dfd, const char *name,
63874 if (err)
63875 break;
63876 err = lookup_last(nd, &path);
63877+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
63878+ err = -EACCES;
63879 put_link(nd, &link, cookie);
63880 }
63881 }
63882@@ -2007,6 +2024,13 @@ static int path_lookupat(int dfd, const char *name,
63883 if (!err)
63884 err = complete_walk(nd);
63885
63886+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
63887+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
63888+ path_put(&nd->path);
63889+ err = -ENOENT;
63890+ }
63891+ }
63892+
63893 if (!err && nd->flags & LOOKUP_DIRECTORY) {
63894 if (!d_can_lookup(nd->path.dentry)) {
63895 path_put(&nd->path);
63896@@ -2028,8 +2052,15 @@ static int filename_lookup(int dfd, struct filename *name,
63897 retval = path_lookupat(dfd, name->name,
63898 flags | LOOKUP_REVAL, nd);
63899
63900- if (likely(!retval))
63901+ if (likely(!retval)) {
63902 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
63903+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
63904+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
63905+ path_put(&nd->path);
63906+ return -ENOENT;
63907+ }
63908+ }
63909+ }
63910 return retval;
63911 }
63912
63913@@ -2595,6 +2626,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
63914 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
63915 return -EPERM;
63916
63917+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
63918+ return -EPERM;
63919+ if (gr_handle_rawio(inode))
63920+ return -EPERM;
63921+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
63922+ return -EACCES;
63923+
63924 return 0;
63925 }
63926
63927@@ -2826,7 +2864,7 @@ looked_up:
63928 * cleared otherwise prior to returning.
63929 */
63930 static int lookup_open(struct nameidata *nd, struct path *path,
63931- struct file *file,
63932+ struct path *link, struct file *file,
63933 const struct open_flags *op,
63934 bool got_write, int *opened)
63935 {
63936@@ -2861,6 +2899,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
63937 /* Negative dentry, just create the file */
63938 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
63939 umode_t mode = op->mode;
63940+
63941+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
63942+ error = -EACCES;
63943+ goto out_dput;
63944+ }
63945+
63946+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
63947+ error = -EACCES;
63948+ goto out_dput;
63949+ }
63950+
63951 if (!IS_POSIXACL(dir->d_inode))
63952 mode &= ~current_umask();
63953 /*
63954@@ -2882,6 +2931,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
63955 nd->flags & LOOKUP_EXCL);
63956 if (error)
63957 goto out_dput;
63958+ else
63959+ gr_handle_create(dentry, nd->path.mnt);
63960 }
63961 out_no_open:
63962 path->dentry = dentry;
63963@@ -2896,7 +2947,7 @@ out_dput:
63964 /*
63965 * Handle the last step of open()
63966 */
63967-static int do_last(struct nameidata *nd, struct path *path,
63968+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
63969 struct file *file, const struct open_flags *op,
63970 int *opened, struct filename *name)
63971 {
63972@@ -2946,6 +2997,15 @@ static int do_last(struct nameidata *nd, struct path *path,
63973 if (error)
63974 return error;
63975
63976+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
63977+ error = -ENOENT;
63978+ goto out;
63979+ }
63980+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
63981+ error = -EACCES;
63982+ goto out;
63983+ }
63984+
63985 audit_inode(name, dir, LOOKUP_PARENT);
63986 error = -EISDIR;
63987 /* trailing slashes? */
63988@@ -2965,7 +3025,7 @@ retry_lookup:
63989 */
63990 }
63991 mutex_lock(&dir->d_inode->i_mutex);
63992- error = lookup_open(nd, path, file, op, got_write, opened);
63993+ error = lookup_open(nd, path, link, file, op, got_write, opened);
63994 mutex_unlock(&dir->d_inode->i_mutex);
63995
63996 if (error <= 0) {
63997@@ -2989,11 +3049,28 @@ retry_lookup:
63998 goto finish_open_created;
63999 }
64000
64001+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
64002+ error = -ENOENT;
64003+ goto exit_dput;
64004+ }
64005+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64006+ error = -EACCES;
64007+ goto exit_dput;
64008+ }
64009+
64010 /*
64011 * create/update audit record if it already exists.
64012 */
64013- if (d_is_positive(path->dentry))
64014+ if (d_is_positive(path->dentry)) {
64015+ /* only check if O_CREAT is specified, all other checks need to go
64016+ into may_open */
64017+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64018+ error = -EACCES;
64019+ goto exit_dput;
64020+ }
64021+
64022 audit_inode(name, path->dentry, 0);
64023+ }
64024
64025 /*
64026 * If atomic_open() acquired write access it is dropped now due to
64027@@ -3034,6 +3111,11 @@ finish_lookup:
64028 }
64029 }
64030 BUG_ON(inode != path->dentry->d_inode);
64031+ /* if we're resolving a symlink to another symlink */
64032+ if (link && gr_handle_symlink_owner(link, inode)) {
64033+ error = -EACCES;
64034+ goto out;
64035+ }
64036 return 1;
64037 }
64038
64039@@ -3053,7 +3135,18 @@ finish_open:
64040 path_put(&save_parent);
64041 return error;
64042 }
64043+
64044+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64045+ error = -ENOENT;
64046+ goto out;
64047+ }
64048+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64049+ error = -EACCES;
64050+ goto out;
64051+ }
64052+
64053 audit_inode(name, nd->path.dentry, 0);
64054+
64055 error = -EISDIR;
64056 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64057 goto out;
64058@@ -3214,7 +3307,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64059 if (unlikely(error))
64060 goto out;
64061
64062- error = do_last(nd, &path, file, op, &opened, pathname);
64063+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64064 while (unlikely(error > 0)) { /* trailing symlink */
64065 struct path link = path;
64066 void *cookie;
64067@@ -3232,7 +3325,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64068 error = follow_link(&link, nd, &cookie);
64069 if (unlikely(error))
64070 break;
64071- error = do_last(nd, &path, file, op, &opened, pathname);
64072+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64073 put_link(nd, &link, cookie);
64074 }
64075 out:
64076@@ -3329,9 +3422,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
64077 goto unlock;
64078
64079 error = -EEXIST;
64080- if (d_is_positive(dentry))
64081+ if (d_is_positive(dentry)) {
64082+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64083+ error = -ENOENT;
64084 goto fail;
64085-
64086+ }
64087 /*
64088 * Special case - lookup gave negative, but... we had foo/bar/
64089 * From the vfs_mknod() POV we just have a negative dentry -
64090@@ -3383,6 +3478,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64091 }
64092 EXPORT_SYMBOL(user_path_create);
64093
64094+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64095+{
64096+ struct filename *tmp = getname(pathname);
64097+ struct dentry *res;
64098+ if (IS_ERR(tmp))
64099+ return ERR_CAST(tmp);
64100+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64101+ if (IS_ERR(res))
64102+ putname(tmp);
64103+ else
64104+ *to = tmp;
64105+ return res;
64106+}
64107+
64108 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64109 {
64110 int error = may_create(dir, dentry);
64111@@ -3446,6 +3555,17 @@ retry:
64112
64113 if (!IS_POSIXACL(path.dentry->d_inode))
64114 mode &= ~current_umask();
64115+
64116+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64117+ error = -EPERM;
64118+ goto out;
64119+ }
64120+
64121+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64122+ error = -EACCES;
64123+ goto out;
64124+ }
64125+
64126 error = security_path_mknod(&path, dentry, mode, dev);
64127 if (error)
64128 goto out;
64129@@ -3461,6 +3581,8 @@ retry:
64130 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64131 break;
64132 }
64133+ if (!error)
64134+ gr_handle_create(dentry, path.mnt);
64135 out:
64136 done_path_create(&path, dentry);
64137 if (retry_estale(error, lookup_flags)) {
64138@@ -3515,9 +3637,16 @@ retry:
64139
64140 if (!IS_POSIXACL(path.dentry->d_inode))
64141 mode &= ~current_umask();
64142+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64143+ error = -EACCES;
64144+ goto out;
64145+ }
64146 error = security_path_mkdir(&path, dentry, mode);
64147 if (!error)
64148 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64149+ if (!error)
64150+ gr_handle_create(dentry, path.mnt);
64151+out:
64152 done_path_create(&path, dentry);
64153 if (retry_estale(error, lookup_flags)) {
64154 lookup_flags |= LOOKUP_REVAL;
64155@@ -3601,6 +3730,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64156 struct filename *name;
64157 struct dentry *dentry;
64158 struct nameidata nd;
64159+ u64 saved_ino = 0;
64160+ dev_t saved_dev = 0;
64161 unsigned int lookup_flags = 0;
64162 retry:
64163 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64164@@ -3633,10 +3764,21 @@ retry:
64165 error = -ENOENT;
64166 goto exit3;
64167 }
64168+
64169+ saved_ino = gr_get_ino_from_dentry(dentry);
64170+ saved_dev = gr_get_dev_from_dentry(dentry);
64171+
64172+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64173+ error = -EACCES;
64174+ goto exit3;
64175+ }
64176+
64177 error = security_path_rmdir(&nd.path, dentry);
64178 if (error)
64179 goto exit3;
64180 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64181+ if (!error && (saved_dev || saved_ino))
64182+ gr_handle_delete(saved_ino, saved_dev);
64183 exit3:
64184 dput(dentry);
64185 exit2:
64186@@ -3729,6 +3871,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64187 struct nameidata nd;
64188 struct inode *inode = NULL;
64189 struct inode *delegated_inode = NULL;
64190+ u64 saved_ino = 0;
64191+ dev_t saved_dev = 0;
64192 unsigned int lookup_flags = 0;
64193 retry:
64194 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64195@@ -3755,10 +3899,22 @@ retry_deleg:
64196 if (d_is_negative(dentry))
64197 goto slashes;
64198 ihold(inode);
64199+
64200+ if (inode->i_nlink <= 1) {
64201+ saved_ino = gr_get_ino_from_dentry(dentry);
64202+ saved_dev = gr_get_dev_from_dentry(dentry);
64203+ }
64204+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64205+ error = -EACCES;
64206+ goto exit2;
64207+ }
64208+
64209 error = security_path_unlink(&nd.path, dentry);
64210 if (error)
64211 goto exit2;
64212 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64213+ if (!error && (saved_ino || saved_dev))
64214+ gr_handle_delete(saved_ino, saved_dev);
64215 exit2:
64216 dput(dentry);
64217 }
64218@@ -3847,9 +4003,17 @@ retry:
64219 if (IS_ERR(dentry))
64220 goto out_putname;
64221
64222+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64223+ error = -EACCES;
64224+ goto out;
64225+ }
64226+
64227 error = security_path_symlink(&path, dentry, from->name);
64228 if (!error)
64229 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64230+ if (!error)
64231+ gr_handle_create(dentry, path.mnt);
64232+out:
64233 done_path_create(&path, dentry);
64234 if (retry_estale(error, lookup_flags)) {
64235 lookup_flags |= LOOKUP_REVAL;
64236@@ -3953,6 +4117,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64237 struct dentry *new_dentry;
64238 struct path old_path, new_path;
64239 struct inode *delegated_inode = NULL;
64240+ struct filename *to = NULL;
64241 int how = 0;
64242 int error;
64243
64244@@ -3976,7 +4141,7 @@ retry:
64245 if (error)
64246 return error;
64247
64248- new_dentry = user_path_create(newdfd, newname, &new_path,
64249+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64250 (how & LOOKUP_REVAL));
64251 error = PTR_ERR(new_dentry);
64252 if (IS_ERR(new_dentry))
64253@@ -3988,11 +4153,28 @@ retry:
64254 error = may_linkat(&old_path);
64255 if (unlikely(error))
64256 goto out_dput;
64257+
64258+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64259+ old_path.dentry->d_inode,
64260+ old_path.dentry->d_inode->i_mode, to)) {
64261+ error = -EACCES;
64262+ goto out_dput;
64263+ }
64264+
64265+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64266+ old_path.dentry, old_path.mnt, to)) {
64267+ error = -EACCES;
64268+ goto out_dput;
64269+ }
64270+
64271 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64272 if (error)
64273 goto out_dput;
64274 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64275+ if (!error)
64276+ gr_handle_create(new_dentry, new_path.mnt);
64277 out_dput:
64278+ putname(to);
64279 done_path_create(&new_path, new_dentry);
64280 if (delegated_inode) {
64281 error = break_deleg_wait(&delegated_inode);
64282@@ -4308,6 +4490,20 @@ retry_deleg:
64283 if (new_dentry == trap)
64284 goto exit5;
64285
64286+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
64287+ /* use EXDEV error to cause 'mv' to switch to an alternative
64288+ * method for usability
64289+ */
64290+ error = -EXDEV;
64291+ goto exit5;
64292+ }
64293+
64294+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64295+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64296+ to, flags);
64297+ if (error)
64298+ goto exit5;
64299+
64300 error = security_path_rename(&oldnd.path, old_dentry,
64301 &newnd.path, new_dentry, flags);
64302 if (error)
64303@@ -4315,6 +4511,9 @@ retry_deleg:
64304 error = vfs_rename(old_dir->d_inode, old_dentry,
64305 new_dir->d_inode, new_dentry,
64306 &delegated_inode, flags);
64307+ if (!error)
64308+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64309+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64310 exit5:
64311 dput(new_dentry);
64312 exit4:
64313@@ -4371,14 +4570,24 @@ EXPORT_SYMBOL(vfs_whiteout);
64314
64315 int readlink_copy(char __user *buffer, int buflen, const char *link)
64316 {
64317+ char tmpbuf[64];
64318+ const char *newlink;
64319 int len = PTR_ERR(link);
64320+
64321 if (IS_ERR(link))
64322 goto out;
64323
64324 len = strlen(link);
64325 if (len > (unsigned) buflen)
64326 len = buflen;
64327- if (copy_to_user(buffer, link, len))
64328+
64329+ if (len < sizeof(tmpbuf)) {
64330+ memcpy(tmpbuf, link, len);
64331+ newlink = tmpbuf;
64332+ } else
64333+ newlink = link;
64334+
64335+ if (copy_to_user(buffer, newlink, len))
64336 len = -EFAULT;
64337 out:
64338 return len;
64339diff --git a/fs/namespace.c b/fs/namespace.c
64340index cd1e968..e64ff16 100644
64341--- a/fs/namespace.c
64342+++ b/fs/namespace.c
64343@@ -1448,6 +1448,9 @@ static int do_umount(struct mount *mnt, int flags)
64344 if (!(sb->s_flags & MS_RDONLY))
64345 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64346 up_write(&sb->s_umount);
64347+
64348+ gr_log_remount(mnt->mnt_devname, retval);
64349+
64350 return retval;
64351 }
64352
64353@@ -1470,6 +1473,9 @@ static int do_umount(struct mount *mnt, int flags)
64354 }
64355 unlock_mount_hash();
64356 namespace_unlock();
64357+
64358+ gr_log_unmount(mnt->mnt_devname, retval);
64359+
64360 return retval;
64361 }
64362
64363@@ -1520,7 +1526,7 @@ static inline bool may_mount(void)
64364 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64365 */
64366
64367-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64368+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64369 {
64370 struct path path;
64371 struct mount *mnt;
64372@@ -1565,7 +1571,7 @@ out:
64373 /*
64374 * The 2.0 compatible umount. No flags.
64375 */
64376-SYSCALL_DEFINE1(oldumount, char __user *, name)
64377+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64378 {
64379 return sys_umount(name, 0);
64380 }
64381@@ -2631,6 +2637,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64382 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64383 MS_STRICTATIME);
64384
64385+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
64386+ retval = -EPERM;
64387+ goto dput_out;
64388+ }
64389+
64390+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
64391+ retval = -EPERM;
64392+ goto dput_out;
64393+ }
64394+
64395 if (flags & MS_REMOUNT)
64396 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
64397 data_page);
64398@@ -2644,7 +2660,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64399 retval = do_new_mount(&path, type_page, flags, mnt_flags,
64400 dev_name, data_page);
64401 dput_out:
64402+ gr_log_mount(dev_name, &path, retval);
64403+
64404 path_put(&path);
64405+
64406 return retval;
64407 }
64408
64409@@ -2662,7 +2681,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
64410 * number incrementing at 10Ghz will take 12,427 years to wrap which
64411 * is effectively never, so we can ignore the possibility.
64412 */
64413-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
64414+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
64415
64416 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64417 {
64418@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64419 return ERR_PTR(ret);
64420 }
64421 new_ns->ns.ops = &mntns_operations;
64422- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
64423+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
64424 atomic_set(&new_ns->count, 1);
64425 new_ns->root = NULL;
64426 INIT_LIST_HEAD(&new_ns->list);
64427@@ -2688,7 +2707,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64428 return new_ns;
64429 }
64430
64431-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64432+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64433 struct user_namespace *user_ns, struct fs_struct *new_fs)
64434 {
64435 struct mnt_namespace *new_ns;
64436@@ -2809,8 +2828,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
64437 }
64438 EXPORT_SYMBOL(mount_subtree);
64439
64440-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
64441- char __user *, type, unsigned long, flags, void __user *, data)
64442+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
64443+ const char __user *, type, unsigned long, flags, void __user *, data)
64444 {
64445 int ret;
64446 char *kernel_type;
64447@@ -2916,6 +2935,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
64448 if (error)
64449 goto out2;
64450
64451+ if (gr_handle_chroot_pivot()) {
64452+ error = -EPERM;
64453+ goto out2;
64454+ }
64455+
64456 get_fs_root(current->fs, &root);
64457 old_mp = lock_mount(&old);
64458 error = PTR_ERR(old_mp);
64459@@ -3190,7 +3214,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
64460 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
64461 return -EPERM;
64462
64463- if (fs->users != 1)
64464+ if (atomic_read(&fs->users) != 1)
64465 return -EINVAL;
64466
64467 get_mnt_ns(mnt_ns);
64468diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
64469index 02f8d09..a5c25d1 100644
64470--- a/fs/nfs/callback_xdr.c
64471+++ b/fs/nfs/callback_xdr.c
64472@@ -51,7 +51,7 @@ struct callback_op {
64473 callback_decode_arg_t decode_args;
64474 callback_encode_res_t encode_res;
64475 long res_maxsize;
64476-};
64477+} __do_const;
64478
64479 static struct callback_op callback_ops[];
64480
64481diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
64482index 2211f6b..30d0950 100644
64483--- a/fs/nfs/inode.c
64484+++ b/fs/nfs/inode.c
64485@@ -1234,16 +1234,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
64486 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
64487 }
64488
64489-static atomic_long_t nfs_attr_generation_counter;
64490+static atomic_long_unchecked_t nfs_attr_generation_counter;
64491
64492 static unsigned long nfs_read_attr_generation_counter(void)
64493 {
64494- return atomic_long_read(&nfs_attr_generation_counter);
64495+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
64496 }
64497
64498 unsigned long nfs_inc_attr_generation_counter(void)
64499 {
64500- return atomic_long_inc_return(&nfs_attr_generation_counter);
64501+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
64502 }
64503
64504 void nfs_fattr_init(struct nfs_fattr *fattr)
64505diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
64506index ac71d13..a2e590a 100644
64507--- a/fs/nfsd/nfs4proc.c
64508+++ b/fs/nfsd/nfs4proc.c
64509@@ -1237,7 +1237,7 @@ struct nfsd4_operation {
64510 nfsd4op_rsize op_rsize_bop;
64511 stateid_getter op_get_currentstateid;
64512 stateid_setter op_set_currentstateid;
64513-};
64514+} __do_const;
64515
64516 static struct nfsd4_operation nfsd4_ops[];
64517
64518diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
64519index 15f7b73..00e230b 100644
64520--- a/fs/nfsd/nfs4xdr.c
64521+++ b/fs/nfsd/nfs4xdr.c
64522@@ -1560,7 +1560,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
64523
64524 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
64525
64526-static nfsd4_dec nfsd4_dec_ops[] = {
64527+static const nfsd4_dec nfsd4_dec_ops[] = {
64528 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
64529 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
64530 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
64531diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
64532index 83a9694..6b7f928 100644
64533--- a/fs/nfsd/nfscache.c
64534+++ b/fs/nfsd/nfscache.c
64535@@ -537,7 +537,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64536 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
64537 u32 hash;
64538 struct nfsd_drc_bucket *b;
64539- int len;
64540+ long len;
64541 size_t bufsize = 0;
64542
64543 if (!rp)
64544@@ -546,11 +546,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64545 hash = nfsd_cache_hash(rp->c_xid);
64546 b = &drc_hashtbl[hash];
64547
64548- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
64549- len >>= 2;
64550+ if (statp) {
64551+ len = (char*)statp - (char*)resv->iov_base;
64552+ len = resv->iov_len - len;
64553+ len >>= 2;
64554+ }
64555
64556 /* Don't cache excessive amounts of data and XDR failures */
64557- if (!statp || len > (256 >> 2)) {
64558+ if (!statp || len > (256 >> 2) || len < 0) {
64559 nfsd_reply_cache_free(b, rp);
64560 return;
64561 }
64562@@ -558,7 +561,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64563 switch (cachetype) {
64564 case RC_REPLSTAT:
64565 if (len != 1)
64566- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
64567+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
64568 rp->c_replstat = *statp;
64569 break;
64570 case RC_REPLBUFF:
64571diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
64572index 5685c67..73029ef 100644
64573--- a/fs/nfsd/vfs.c
64574+++ b/fs/nfsd/vfs.c
64575@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
64576
64577 oldfs = get_fs();
64578 set_fs(KERNEL_DS);
64579- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
64580+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
64581 set_fs(oldfs);
64582 return nfsd_finish_read(file, count, host_err);
64583 }
64584@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
64585
64586 /* Write the data. */
64587 oldfs = get_fs(); set_fs(KERNEL_DS);
64588- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
64589+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
64590 set_fs(oldfs);
64591 if (host_err < 0)
64592 goto out_nfserr;
64593@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
64594 */
64595
64596 oldfs = get_fs(); set_fs(KERNEL_DS);
64597- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
64598+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
64599 set_fs(oldfs);
64600
64601 if (host_err < 0)
64602diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
64603index 52ccd34..7a6b202 100644
64604--- a/fs/nls/nls_base.c
64605+++ b/fs/nls/nls_base.c
64606@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
64607
64608 int __register_nls(struct nls_table *nls, struct module *owner)
64609 {
64610- struct nls_table ** tmp = &tables;
64611+ struct nls_table *tmp = tables;
64612
64613 if (nls->next)
64614 return -EBUSY;
64615
64616- nls->owner = owner;
64617+ pax_open_kernel();
64618+ *(void **)&nls->owner = owner;
64619+ pax_close_kernel();
64620 spin_lock(&nls_lock);
64621- while (*tmp) {
64622- if (nls == *tmp) {
64623+ while (tmp) {
64624+ if (nls == tmp) {
64625 spin_unlock(&nls_lock);
64626 return -EBUSY;
64627 }
64628- tmp = &(*tmp)->next;
64629+ tmp = tmp->next;
64630 }
64631- nls->next = tables;
64632+ pax_open_kernel();
64633+ *(struct nls_table **)&nls->next = tables;
64634+ pax_close_kernel();
64635 tables = nls;
64636 spin_unlock(&nls_lock);
64637 return 0;
64638@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
64639
64640 int unregister_nls(struct nls_table * nls)
64641 {
64642- struct nls_table ** tmp = &tables;
64643+ struct nls_table * const * tmp = &tables;
64644
64645 spin_lock(&nls_lock);
64646 while (*tmp) {
64647 if (nls == *tmp) {
64648- *tmp = nls->next;
64649+ pax_open_kernel();
64650+ *(struct nls_table **)tmp = nls->next;
64651+ pax_close_kernel();
64652 spin_unlock(&nls_lock);
64653 return 0;
64654 }
64655@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
64656 return -EINVAL;
64657 }
64658
64659-static struct nls_table *find_nls(char *charset)
64660+static struct nls_table *find_nls(const char *charset)
64661 {
64662 struct nls_table *nls;
64663 spin_lock(&nls_lock);
64664@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
64665 return nls;
64666 }
64667
64668-struct nls_table *load_nls(char *charset)
64669+struct nls_table *load_nls(const char *charset)
64670 {
64671 return try_then_request_module(find_nls(charset), "nls_%s", charset);
64672 }
64673diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
64674index 162b3f1..6076a7c 100644
64675--- a/fs/nls/nls_euc-jp.c
64676+++ b/fs/nls/nls_euc-jp.c
64677@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
64678 p_nls = load_nls("cp932");
64679
64680 if (p_nls) {
64681- table.charset2upper = p_nls->charset2upper;
64682- table.charset2lower = p_nls->charset2lower;
64683+ pax_open_kernel();
64684+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64685+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64686+ pax_close_kernel();
64687 return register_nls(&table);
64688 }
64689
64690diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
64691index a80a741..7b96e1b 100644
64692--- a/fs/nls/nls_koi8-ru.c
64693+++ b/fs/nls/nls_koi8-ru.c
64694@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
64695 p_nls = load_nls("koi8-u");
64696
64697 if (p_nls) {
64698- table.charset2upper = p_nls->charset2upper;
64699- table.charset2lower = p_nls->charset2lower;
64700+ pax_open_kernel();
64701+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64702+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64703+ pax_close_kernel();
64704 return register_nls(&table);
64705 }
64706
64707diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
64708index bff8567..83281c6 100644
64709--- a/fs/notify/fanotify/fanotify_user.c
64710+++ b/fs/notify/fanotify/fanotify_user.c
64711@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
64712
64713 fd = fanotify_event_metadata.fd;
64714 ret = -EFAULT;
64715- if (copy_to_user(buf, &fanotify_event_metadata,
64716- fanotify_event_metadata.event_len))
64717+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
64718+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
64719 goto out_close_fd;
64720
64721 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
64722diff --git a/fs/notify/notification.c b/fs/notify/notification.c
64723index a95d8e0..a91a5fd 100644
64724--- a/fs/notify/notification.c
64725+++ b/fs/notify/notification.c
64726@@ -48,7 +48,7 @@
64727 #include <linux/fsnotify_backend.h>
64728 #include "fsnotify.h"
64729
64730-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64731+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64732
64733 /**
64734 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
64735@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64736 */
64737 u32 fsnotify_get_cookie(void)
64738 {
64739- return atomic_inc_return(&fsnotify_sync_cookie);
64740+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
64741 }
64742 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
64743
64744diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
64745index 9e38daf..5727cae 100644
64746--- a/fs/ntfs/dir.c
64747+++ b/fs/ntfs/dir.c
64748@@ -1310,7 +1310,7 @@ find_next_index_buffer:
64749 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
64750 ~(s64)(ndir->itype.index.block_size - 1)));
64751 /* Bounds checks. */
64752- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64753+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64754 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
64755 "inode 0x%lx or driver bug.", vdir->i_ino);
64756 goto err_out;
64757diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
64758index 643faa4..ef9027e 100644
64759--- a/fs/ntfs/file.c
64760+++ b/fs/ntfs/file.c
64761@@ -1280,7 +1280,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
64762 char *addr;
64763 size_t total = 0;
64764 unsigned len;
64765- int left;
64766+ unsigned left;
64767
64768 do {
64769 len = PAGE_CACHE_SIZE - ofs;
64770diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
64771index 9e1e112..241a52a 100644
64772--- a/fs/ntfs/super.c
64773+++ b/fs/ntfs/super.c
64774@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64775 if (!silent)
64776 ntfs_error(sb, "Primary boot sector is invalid.");
64777 } else if (!silent)
64778- ntfs_error(sb, read_err_str, "primary");
64779+ ntfs_error(sb, read_err_str, "%s", "primary");
64780 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
64781 if (bh_primary)
64782 brelse(bh_primary);
64783@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64784 goto hotfix_primary_boot_sector;
64785 brelse(bh_backup);
64786 } else if (!silent)
64787- ntfs_error(sb, read_err_str, "backup");
64788+ ntfs_error(sb, read_err_str, "%s", "backup");
64789 /* Try to read NT3.51- backup boot sector. */
64790 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
64791 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
64792@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64793 "sector.");
64794 brelse(bh_backup);
64795 } else if (!silent)
64796- ntfs_error(sb, read_err_str, "backup");
64797+ ntfs_error(sb, read_err_str, "%s", "backup");
64798 /* We failed. Cleanup and return. */
64799 if (bh_primary)
64800 brelse(bh_primary);
64801diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
64802index 0440134..d52c93a 100644
64803--- a/fs/ocfs2/localalloc.c
64804+++ b/fs/ocfs2/localalloc.c
64805@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
64806 goto bail;
64807 }
64808
64809- atomic_inc(&osb->alloc_stats.moves);
64810+ atomic_inc_unchecked(&osb->alloc_stats.moves);
64811
64812 bail:
64813 if (handle)
64814diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
64815index 7d6b7d0..5fb529a 100644
64816--- a/fs/ocfs2/ocfs2.h
64817+++ b/fs/ocfs2/ocfs2.h
64818@@ -242,11 +242,11 @@ enum ocfs2_vol_state
64819
64820 struct ocfs2_alloc_stats
64821 {
64822- atomic_t moves;
64823- atomic_t local_data;
64824- atomic_t bitmap_data;
64825- atomic_t bg_allocs;
64826- atomic_t bg_extends;
64827+ atomic_unchecked_t moves;
64828+ atomic_unchecked_t local_data;
64829+ atomic_unchecked_t bitmap_data;
64830+ atomic_unchecked_t bg_allocs;
64831+ atomic_unchecked_t bg_extends;
64832 };
64833
64834 enum ocfs2_local_alloc_state
64835diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
64836index 0cb889a..6a26b24 100644
64837--- a/fs/ocfs2/suballoc.c
64838+++ b/fs/ocfs2/suballoc.c
64839@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
64840 mlog_errno(status);
64841 goto bail;
64842 }
64843- atomic_inc(&osb->alloc_stats.bg_extends);
64844+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
64845
64846 /* You should never ask for this much metadata */
64847 BUG_ON(bits_wanted >
64848@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
64849 mlog_errno(status);
64850 goto bail;
64851 }
64852- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64853+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64854
64855 *suballoc_loc = res.sr_bg_blkno;
64856 *suballoc_bit_start = res.sr_bit_offset;
64857@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
64858 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
64859 res->sr_bits);
64860
64861- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64862+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64863
64864 BUG_ON(res->sr_bits != 1);
64865
64866@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
64867 mlog_errno(status);
64868 goto bail;
64869 }
64870- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64871+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64872
64873 BUG_ON(res.sr_bits != 1);
64874
64875@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
64876 cluster_start,
64877 num_clusters);
64878 if (!status)
64879- atomic_inc(&osb->alloc_stats.local_data);
64880+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
64881 } else {
64882 if (min_clusters > (osb->bitmap_cpg - 1)) {
64883 /* The only paths asking for contiguousness
64884@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
64885 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
64886 res.sr_bg_blkno,
64887 res.sr_bit_offset);
64888- atomic_inc(&osb->alloc_stats.bitmap_data);
64889+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
64890 *num_clusters = res.sr_bits;
64891 }
64892 }
64893diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
64894index 8372317..ec86e79 100644
64895--- a/fs/ocfs2/super.c
64896+++ b/fs/ocfs2/super.c
64897@@ -306,11 +306,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
64898 "%10s => GlobalAllocs: %d LocalAllocs: %d "
64899 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
64900 "Stats",
64901- atomic_read(&osb->alloc_stats.bitmap_data),
64902- atomic_read(&osb->alloc_stats.local_data),
64903- atomic_read(&osb->alloc_stats.bg_allocs),
64904- atomic_read(&osb->alloc_stats.moves),
64905- atomic_read(&osb->alloc_stats.bg_extends));
64906+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
64907+ atomic_read_unchecked(&osb->alloc_stats.local_data),
64908+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
64909+ atomic_read_unchecked(&osb->alloc_stats.moves),
64910+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
64911
64912 out += snprintf(buf + out, len - out,
64913 "%10s => State: %u Descriptor: %llu Size: %u bits "
64914@@ -2113,11 +2113,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
64915
64916 mutex_init(&osb->system_file_mutex);
64917
64918- atomic_set(&osb->alloc_stats.moves, 0);
64919- atomic_set(&osb->alloc_stats.local_data, 0);
64920- atomic_set(&osb->alloc_stats.bitmap_data, 0);
64921- atomic_set(&osb->alloc_stats.bg_allocs, 0);
64922- atomic_set(&osb->alloc_stats.bg_extends, 0);
64923+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
64924+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
64925+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
64926+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
64927+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
64928
64929 /* Copy the blockcheck stats from the superblock probe */
64930 osb->osb_ecc_stats = *stats;
64931diff --git a/fs/open.c b/fs/open.c
64932index 813be03..781941d 100644
64933--- a/fs/open.c
64934+++ b/fs/open.c
64935@@ -32,6 +32,8 @@
64936 #include <linux/dnotify.h>
64937 #include <linux/compat.h>
64938
64939+#define CREATE_TRACE_POINTS
64940+#include <trace/events/fs.h>
64941 #include "internal.h"
64942
64943 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
64944@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
64945 error = locks_verify_truncate(inode, NULL, length);
64946 if (!error)
64947 error = security_path_truncate(path);
64948+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
64949+ error = -EACCES;
64950 if (!error)
64951 error = do_truncate(path->dentry, length, 0, NULL);
64952
64953@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
64954 error = locks_verify_truncate(inode, f.file, length);
64955 if (!error)
64956 error = security_path_truncate(&f.file->f_path);
64957+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
64958+ error = -EACCES;
64959 if (!error)
64960 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
64961 sb_end_write(inode->i_sb);
64962@@ -392,6 +398,9 @@ retry:
64963 if (__mnt_is_readonly(path.mnt))
64964 res = -EROFS;
64965
64966+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
64967+ res = -EACCES;
64968+
64969 out_path_release:
64970 path_put(&path);
64971 if (retry_estale(res, lookup_flags)) {
64972@@ -423,6 +432,8 @@ retry:
64973 if (error)
64974 goto dput_and_out;
64975
64976+ gr_log_chdir(path.dentry, path.mnt);
64977+
64978 set_fs_pwd(current->fs, &path);
64979
64980 dput_and_out:
64981@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
64982 goto out_putf;
64983
64984 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
64985+
64986+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
64987+ error = -EPERM;
64988+
64989+ if (!error)
64990+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
64991+
64992 if (!error)
64993 set_fs_pwd(current->fs, &f.file->f_path);
64994 out_putf:
64995@@ -481,7 +499,13 @@ retry:
64996 if (error)
64997 goto dput_and_out;
64998
64999+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
65000+ goto dput_and_out;
65001+
65002 set_fs_root(current->fs, &path);
65003+
65004+ gr_handle_chroot_chdir(&path);
65005+
65006 error = 0;
65007 dput_and_out:
65008 path_put(&path);
65009@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
65010 return error;
65011 retry_deleg:
65012 mutex_lock(&inode->i_mutex);
65013+
65014+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65015+ error = -EACCES;
65016+ goto out_unlock;
65017+ }
65018+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65019+ error = -EACCES;
65020+ goto out_unlock;
65021+ }
65022+
65023 error = security_path_chmod(path, mode);
65024 if (error)
65025 goto out_unlock;
65026@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65027 uid = make_kuid(current_user_ns(), user);
65028 gid = make_kgid(current_user_ns(), group);
65029
65030+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65031+ return -EACCES;
65032+
65033 newattrs.ia_valid = ATTR_CTIME;
65034 if (user != (uid_t) -1) {
65035 if (!uid_valid(uid))
65036@@ -1014,6 +1051,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65037 } else {
65038 fsnotify_open(f);
65039 fd_install(fd, f);
65040+ trace_do_sys_open(tmp->name, flags, mode);
65041 }
65042 }
65043 putname(tmp);
65044diff --git a/fs/pipe.c b/fs/pipe.c
65045index 21981e5..3d5f55c 100644
65046--- a/fs/pipe.c
65047+++ b/fs/pipe.c
65048@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65049
65050 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65051 {
65052- if (pipe->files)
65053+ if (atomic_read(&pipe->files))
65054 mutex_lock_nested(&pipe->mutex, subclass);
65055 }
65056
65057@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65058
65059 void pipe_unlock(struct pipe_inode_info *pipe)
65060 {
65061- if (pipe->files)
65062+ if (atomic_read(&pipe->files))
65063 mutex_unlock(&pipe->mutex);
65064 }
65065 EXPORT_SYMBOL(pipe_unlock);
65066@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65067 }
65068 if (bufs) /* More to do? */
65069 continue;
65070- if (!pipe->writers)
65071+ if (!atomic_read(&pipe->writers))
65072 break;
65073- if (!pipe->waiting_writers) {
65074+ if (!atomic_read(&pipe->waiting_writers)) {
65075 /* syscall merging: Usually we must not sleep
65076 * if O_NONBLOCK is set, or if we got some data.
65077 * But if a writer sleeps in kernel space, then
65078@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65079
65080 __pipe_lock(pipe);
65081
65082- if (!pipe->readers) {
65083+ if (!atomic_read(&pipe->readers)) {
65084 send_sig(SIGPIPE, current, 0);
65085 ret = -EPIPE;
65086 goto out;
65087@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65088 for (;;) {
65089 int bufs;
65090
65091- if (!pipe->readers) {
65092+ if (!atomic_read(&pipe->readers)) {
65093 send_sig(SIGPIPE, current, 0);
65094 if (!ret)
65095 ret = -EPIPE;
65096@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65097 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65098 do_wakeup = 0;
65099 }
65100- pipe->waiting_writers++;
65101+ atomic_inc(&pipe->waiting_writers);
65102 pipe_wait(pipe);
65103- pipe->waiting_writers--;
65104+ atomic_dec(&pipe->waiting_writers);
65105 }
65106 out:
65107 __pipe_unlock(pipe);
65108@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65109 mask = 0;
65110 if (filp->f_mode & FMODE_READ) {
65111 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65112- if (!pipe->writers && filp->f_version != pipe->w_counter)
65113+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65114 mask |= POLLHUP;
65115 }
65116
65117@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65118 * Most Unices do not set POLLERR for FIFOs but on Linux they
65119 * behave exactly like pipes for poll().
65120 */
65121- if (!pipe->readers)
65122+ if (!atomic_read(&pipe->readers))
65123 mask |= POLLERR;
65124 }
65125
65126@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65127 int kill = 0;
65128
65129 spin_lock(&inode->i_lock);
65130- if (!--pipe->files) {
65131+ if (atomic_dec_and_test(&pipe->files)) {
65132 inode->i_pipe = NULL;
65133 kill = 1;
65134 }
65135@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65136
65137 __pipe_lock(pipe);
65138 if (file->f_mode & FMODE_READ)
65139- pipe->readers--;
65140+ atomic_dec(&pipe->readers);
65141 if (file->f_mode & FMODE_WRITE)
65142- pipe->writers--;
65143+ atomic_dec(&pipe->writers);
65144
65145- if (pipe->readers || pipe->writers) {
65146+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65147 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65148 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65149 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65150@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65151 kfree(pipe);
65152 }
65153
65154-static struct vfsmount *pipe_mnt __read_mostly;
65155+struct vfsmount *pipe_mnt __read_mostly;
65156
65157 /*
65158 * pipefs_dname() is called from d_path().
65159@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65160 goto fail_iput;
65161
65162 inode->i_pipe = pipe;
65163- pipe->files = 2;
65164- pipe->readers = pipe->writers = 1;
65165+ atomic_set(&pipe->files, 2);
65166+ atomic_set(&pipe->readers, 1);
65167+ atomic_set(&pipe->writers, 1);
65168 inode->i_fop = &pipefifo_fops;
65169
65170 /*
65171@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65172 spin_lock(&inode->i_lock);
65173 if (inode->i_pipe) {
65174 pipe = inode->i_pipe;
65175- pipe->files++;
65176+ atomic_inc(&pipe->files);
65177 spin_unlock(&inode->i_lock);
65178 } else {
65179 spin_unlock(&inode->i_lock);
65180 pipe = alloc_pipe_info();
65181 if (!pipe)
65182 return -ENOMEM;
65183- pipe->files = 1;
65184+ atomic_set(&pipe->files, 1);
65185 spin_lock(&inode->i_lock);
65186 if (unlikely(inode->i_pipe)) {
65187- inode->i_pipe->files++;
65188+ atomic_inc(&inode->i_pipe->files);
65189 spin_unlock(&inode->i_lock);
65190 free_pipe_info(pipe);
65191 pipe = inode->i_pipe;
65192@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65193 * opened, even when there is no process writing the FIFO.
65194 */
65195 pipe->r_counter++;
65196- if (pipe->readers++ == 0)
65197+ if (atomic_inc_return(&pipe->readers) == 1)
65198 wake_up_partner(pipe);
65199
65200- if (!is_pipe && !pipe->writers) {
65201+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65202 if ((filp->f_flags & O_NONBLOCK)) {
65203 /* suppress POLLHUP until we have
65204 * seen a writer */
65205@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65206 * errno=ENXIO when there is no process reading the FIFO.
65207 */
65208 ret = -ENXIO;
65209- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65210+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65211 goto err;
65212
65213 pipe->w_counter++;
65214- if (!pipe->writers++)
65215+ if (atomic_inc_return(&pipe->writers) == 1)
65216 wake_up_partner(pipe);
65217
65218- if (!is_pipe && !pipe->readers) {
65219+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65220 if (wait_for_partner(pipe, &pipe->r_counter))
65221 goto err_wr;
65222 }
65223@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65224 * the process can at least talk to itself.
65225 */
65226
65227- pipe->readers++;
65228- pipe->writers++;
65229+ atomic_inc(&pipe->readers);
65230+ atomic_inc(&pipe->writers);
65231 pipe->r_counter++;
65232 pipe->w_counter++;
65233- if (pipe->readers == 1 || pipe->writers == 1)
65234+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65235 wake_up_partner(pipe);
65236 break;
65237
65238@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65239 return 0;
65240
65241 err_rd:
65242- if (!--pipe->readers)
65243+ if (atomic_dec_and_test(&pipe->readers))
65244 wake_up_interruptible(&pipe->wait);
65245 ret = -ERESTARTSYS;
65246 goto err;
65247
65248 err_wr:
65249- if (!--pipe->writers)
65250+ if (atomic_dec_and_test(&pipe->writers))
65251 wake_up_interruptible(&pipe->wait);
65252 ret = -ERESTARTSYS;
65253 goto err;
65254diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65255index 0855f77..6787d50 100644
65256--- a/fs/posix_acl.c
65257+++ b/fs/posix_acl.c
65258@@ -20,6 +20,7 @@
65259 #include <linux/xattr.h>
65260 #include <linux/export.h>
65261 #include <linux/user_namespace.h>
65262+#include <linux/grsecurity.h>
65263
65264 struct posix_acl **acl_by_type(struct inode *inode, int type)
65265 {
65266@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65267 }
65268 }
65269 if (mode_p)
65270- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65271+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65272 return not_equiv;
65273 }
65274 EXPORT_SYMBOL(posix_acl_equiv_mode);
65275@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65276 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65277 }
65278
65279- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65280+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65281 return not_equiv;
65282 }
65283
65284@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65285 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65286 int err = -ENOMEM;
65287 if (clone) {
65288+ *mode_p &= ~gr_acl_umask();
65289+
65290 err = posix_acl_create_masq(clone, mode_p);
65291 if (err < 0) {
65292 posix_acl_release(clone);
65293@@ -659,11 +662,12 @@ struct posix_acl *
65294 posix_acl_from_xattr(struct user_namespace *user_ns,
65295 const void *value, size_t size)
65296 {
65297- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65298- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65299+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65300+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65301 int count;
65302 struct posix_acl *acl;
65303 struct posix_acl_entry *acl_e;
65304+ umode_t umask = gr_acl_umask();
65305
65306 if (!value)
65307 return NULL;
65308@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65309
65310 switch(acl_e->e_tag) {
65311 case ACL_USER_OBJ:
65312+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65313+ break;
65314 case ACL_GROUP_OBJ:
65315 case ACL_MASK:
65316+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65317+ break;
65318 case ACL_OTHER:
65319+ acl_e->e_perm &= ~(umask & S_IRWXO);
65320 break;
65321
65322 case ACL_USER:
65323+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65324 acl_e->e_uid =
65325 make_kuid(user_ns,
65326 le32_to_cpu(entry->e_id));
65327@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65328 goto fail;
65329 break;
65330 case ACL_GROUP:
65331+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65332 acl_e->e_gid =
65333 make_kgid(user_ns,
65334 le32_to_cpu(entry->e_id));
65335diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65336index 2183fcf..3c32a98 100644
65337--- a/fs/proc/Kconfig
65338+++ b/fs/proc/Kconfig
65339@@ -30,7 +30,7 @@ config PROC_FS
65340
65341 config PROC_KCORE
65342 bool "/proc/kcore support" if !ARM
65343- depends on PROC_FS && MMU
65344+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65345 help
65346 Provides a virtual ELF core file of the live kernel. This can
65347 be read with gdb and other ELF tools. No modifications can be
65348@@ -38,8 +38,8 @@ config PROC_KCORE
65349
65350 config PROC_VMCORE
65351 bool "/proc/vmcore support"
65352- depends on PROC_FS && CRASH_DUMP
65353- default y
65354+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
65355+ default n
65356 help
65357 Exports the dump image of crashed kernel in ELF format.
65358
65359@@ -63,8 +63,8 @@ config PROC_SYSCTL
65360 limited in memory.
65361
65362 config PROC_PAGE_MONITOR
65363- default y
65364- depends on PROC_FS && MMU
65365+ default n
65366+ depends on PROC_FS && MMU && !GRKERNSEC
65367 bool "Enable /proc page monitoring" if EXPERT
65368 help
65369 Various /proc files exist to monitor process memory utilization:
65370diff --git a/fs/proc/array.c b/fs/proc/array.c
65371index bd117d0..e6872d7 100644
65372--- a/fs/proc/array.c
65373+++ b/fs/proc/array.c
65374@@ -60,6 +60,7 @@
65375 #include <linux/tty.h>
65376 #include <linux/string.h>
65377 #include <linux/mman.h>
65378+#include <linux/grsecurity.h>
65379 #include <linux/proc_fs.h>
65380 #include <linux/ioport.h>
65381 #include <linux/uaccess.h>
65382@@ -344,6 +345,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
65383 seq_putc(m, '\n');
65384 }
65385
65386+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65387+static inline void task_pax(struct seq_file *m, struct task_struct *p)
65388+{
65389+ if (p->mm)
65390+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
65391+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
65392+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
65393+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
65394+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
65395+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
65396+ else
65397+ seq_printf(m, "PaX:\t-----\n");
65398+}
65399+#endif
65400+
65401 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65402 struct pid *pid, struct task_struct *task)
65403 {
65404@@ -362,9 +378,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65405 task_cpus_allowed(m, task);
65406 cpuset_task_status_allowed(m, task);
65407 task_context_switch_counts(m, task);
65408+
65409+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65410+ task_pax(m, task);
65411+#endif
65412+
65413+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
65414+ task_grsec_rbac(m, task);
65415+#endif
65416+
65417 return 0;
65418 }
65419
65420+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65421+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65422+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65423+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65424+#endif
65425+
65426 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65427 struct pid *pid, struct task_struct *task, int whole)
65428 {
65429@@ -386,6 +417,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65430 char tcomm[sizeof(task->comm)];
65431 unsigned long flags;
65432
65433+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65434+ if (current->exec_id != m->exec_id) {
65435+ gr_log_badprocpid("stat");
65436+ return 0;
65437+ }
65438+#endif
65439+
65440 state = *get_task_state(task);
65441 vsize = eip = esp = 0;
65442 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65443@@ -456,6 +494,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65444 gtime = task_gtime(task);
65445 }
65446
65447+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65448+ if (PAX_RAND_FLAGS(mm)) {
65449+ eip = 0;
65450+ esp = 0;
65451+ wchan = 0;
65452+ }
65453+#endif
65454+#ifdef CONFIG_GRKERNSEC_HIDESYM
65455+ wchan = 0;
65456+ eip =0;
65457+ esp =0;
65458+#endif
65459+
65460 /* scale priority and nice values from timeslices to -20..20 */
65461 /* to make it look like a "normal" Unix priority/nice value */
65462 priority = task_prio(task);
65463@@ -487,9 +538,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65464 seq_put_decimal_ull(m, ' ', vsize);
65465 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
65466 seq_put_decimal_ull(m, ' ', rsslim);
65467+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65468+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
65469+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
65470+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
65471+#else
65472 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
65473 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
65474 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
65475+#endif
65476 seq_put_decimal_ull(m, ' ', esp);
65477 seq_put_decimal_ull(m, ' ', eip);
65478 /* The signal information here is obsolete.
65479@@ -511,7 +568,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65480 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
65481 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
65482
65483- if (mm && permitted) {
65484+ if (mm && permitted
65485+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65486+ && !PAX_RAND_FLAGS(mm)
65487+#endif
65488+ ) {
65489 seq_put_decimal_ull(m, ' ', mm->start_data);
65490 seq_put_decimal_ull(m, ' ', mm->end_data);
65491 seq_put_decimal_ull(m, ' ', mm->start_brk);
65492@@ -549,8 +610,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65493 struct pid *pid, struct task_struct *task)
65494 {
65495 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
65496- struct mm_struct *mm = get_task_mm(task);
65497+ struct mm_struct *mm;
65498
65499+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65500+ if (current->exec_id != m->exec_id) {
65501+ gr_log_badprocpid("statm");
65502+ return 0;
65503+ }
65504+#endif
65505+ mm = get_task_mm(task);
65506 if (mm) {
65507 size = task_statm(mm, &shared, &text, &data, &resident);
65508 mmput(mm);
65509@@ -573,6 +641,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65510 return 0;
65511 }
65512
65513+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65514+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
65515+{
65516+ unsigned long flags;
65517+ u32 curr_ip = 0;
65518+
65519+ if (lock_task_sighand(task, &flags)) {
65520+ curr_ip = task->signal->curr_ip;
65521+ unlock_task_sighand(task, &flags);
65522+ }
65523+ return seq_printf(m, "%pI4\n", &curr_ip);
65524+}
65525+#endif
65526+
65527 #ifdef CONFIG_CHECKPOINT_RESTORE
65528 static struct pid *
65529 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
65530diff --git a/fs/proc/base.c b/fs/proc/base.c
65531index 3f3d7ae..68de109 100644
65532--- a/fs/proc/base.c
65533+++ b/fs/proc/base.c
65534@@ -113,6 +113,14 @@ struct pid_entry {
65535 union proc_op op;
65536 };
65537
65538+struct getdents_callback {
65539+ struct linux_dirent __user * current_dir;
65540+ struct linux_dirent __user * previous;
65541+ struct file * file;
65542+ int count;
65543+ int error;
65544+};
65545+
65546 #define NOD(NAME, MODE, IOP, FOP, OP) { \
65547 .name = (NAME), \
65548 .len = sizeof(NAME) - 1, \
65549@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
65550 return 0;
65551 }
65552
65553+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65554+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65555+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65556+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65557+#endif
65558+
65559 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65560 struct pid *pid, struct task_struct *task)
65561 {
65562 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
65563 if (mm && !IS_ERR(mm)) {
65564 unsigned int nwords = 0;
65565+
65566+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65567+ /* allow if we're currently ptracing this task */
65568+ if (PAX_RAND_FLAGS(mm) &&
65569+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
65570+ mmput(mm);
65571+ return 0;
65572+ }
65573+#endif
65574+
65575 do {
65576 nwords += 2;
65577 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
65578@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65579 }
65580
65581
65582-#ifdef CONFIG_KALLSYMS
65583+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65584 /*
65585 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
65586 * Returns the resolved symbol. If that fails, simply return the address.
65587@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
65588 mutex_unlock(&task->signal->cred_guard_mutex);
65589 }
65590
65591-#ifdef CONFIG_STACKTRACE
65592+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65593
65594 #define MAX_STACK_TRACE_DEPTH 64
65595
65596@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
65597 return 0;
65598 }
65599
65600-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65601+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65602 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65603 struct pid *pid, struct task_struct *task)
65604 {
65605@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65606 /************************************************************************/
65607
65608 /* permission checks */
65609-static int proc_fd_access_allowed(struct inode *inode)
65610+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
65611 {
65612 struct task_struct *task;
65613 int allowed = 0;
65614@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
65615 */
65616 task = get_proc_task(inode);
65617 if (task) {
65618- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65619+ if (log)
65620+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65621+ else
65622+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65623 put_task_struct(task);
65624 }
65625 return allowed;
65626@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
65627 struct task_struct *task,
65628 int hide_pid_min)
65629 {
65630+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65631+ return false;
65632+
65633+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65634+ rcu_read_lock();
65635+ {
65636+ const struct cred *tmpcred = current_cred();
65637+ const struct cred *cred = __task_cred(task);
65638+
65639+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
65640+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65641+ || in_group_p(grsec_proc_gid)
65642+#endif
65643+ ) {
65644+ rcu_read_unlock();
65645+ return true;
65646+ }
65647+ }
65648+ rcu_read_unlock();
65649+
65650+ if (!pid->hide_pid)
65651+ return false;
65652+#endif
65653+
65654 if (pid->hide_pid < hide_pid_min)
65655 return true;
65656 if (in_group_p(pid->pid_gid))
65657 return true;
65658+
65659 return ptrace_may_access(task, PTRACE_MODE_READ);
65660 }
65661
65662@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
65663 put_task_struct(task);
65664
65665 if (!has_perms) {
65666+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65667+ {
65668+#else
65669 if (pid->hide_pid == 2) {
65670+#endif
65671 /*
65672 * Let's make getdents(), stat(), and open()
65673 * consistent with each other. If a process
65674@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
65675
65676 if (task) {
65677 mm = mm_access(task, mode);
65678+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
65679+ mmput(mm);
65680+ mm = ERR_PTR(-EPERM);
65681+ }
65682 put_task_struct(task);
65683
65684 if (!IS_ERR_OR_NULL(mm)) {
65685@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
65686 return PTR_ERR(mm);
65687
65688 file->private_data = mm;
65689+
65690+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65691+ file->f_version = current->exec_id;
65692+#endif
65693+
65694 return 0;
65695 }
65696
65697@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65698 ssize_t copied;
65699 char *page;
65700
65701+#ifdef CONFIG_GRKERNSEC
65702+ if (write)
65703+ return -EPERM;
65704+#endif
65705+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65706+ if (file->f_version != current->exec_id) {
65707+ gr_log_badprocpid("mem");
65708+ return 0;
65709+ }
65710+#endif
65711+
65712 if (!mm)
65713 return 0;
65714
65715@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65716 goto free;
65717
65718 while (count > 0) {
65719- int this_len = min_t(int, count, PAGE_SIZE);
65720+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
65721
65722 if (write && copy_from_user(page, buf, this_len)) {
65723 copied = -EFAULT;
65724@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65725 if (!mm)
65726 return 0;
65727
65728+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65729+ if (file->f_version != current->exec_id) {
65730+ gr_log_badprocpid("environ");
65731+ return 0;
65732+ }
65733+#endif
65734+
65735 page = (char *)__get_free_page(GFP_TEMPORARY);
65736 if (!page)
65737 return -ENOMEM;
65738@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65739 goto free;
65740 while (count > 0) {
65741 size_t this_len, max_len;
65742- int retval;
65743+ ssize_t retval;
65744
65745 if (src >= (mm->env_end - mm->env_start))
65746 break;
65747@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
65748 int error = -EACCES;
65749
65750 /* Are we allowed to snoop on the tasks file descriptors? */
65751- if (!proc_fd_access_allowed(inode))
65752+ if (!proc_fd_access_allowed(inode, 0))
65753 goto out;
65754
65755 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65756@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
65757 struct path path;
65758
65759 /* Are we allowed to snoop on the tasks file descriptors? */
65760- if (!proc_fd_access_allowed(inode))
65761- goto out;
65762+ /* logging this is needed for learning on chromium to work properly,
65763+ but we don't want to flood the logs from 'ps' which does a readlink
65764+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
65765+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
65766+ */
65767+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
65768+ if (!proc_fd_access_allowed(inode,0))
65769+ goto out;
65770+ } else {
65771+ if (!proc_fd_access_allowed(inode,1))
65772+ goto out;
65773+ }
65774
65775 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65776 if (error)
65777@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
65778 rcu_read_lock();
65779 cred = __task_cred(task);
65780 inode->i_uid = cred->euid;
65781+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65782+ inode->i_gid = grsec_proc_gid;
65783+#else
65784 inode->i_gid = cred->egid;
65785+#endif
65786 rcu_read_unlock();
65787 }
65788 security_task_to_inode(task, inode);
65789@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
65790 return -ENOENT;
65791 }
65792 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65793+#ifdef CONFIG_GRKERNSEC_PROC_USER
65794+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65795+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65796+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65797+#endif
65798 task_dumpable(task)) {
65799 cred = __task_cred(task);
65800 stat->uid = cred->euid;
65801+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65802+ stat->gid = grsec_proc_gid;
65803+#else
65804 stat->gid = cred->egid;
65805+#endif
65806 }
65807 }
65808 rcu_read_unlock();
65809@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
65810
65811 if (task) {
65812 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65813+#ifdef CONFIG_GRKERNSEC_PROC_USER
65814+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65815+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65816+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65817+#endif
65818 task_dumpable(task)) {
65819 rcu_read_lock();
65820 cred = __task_cred(task);
65821 inode->i_uid = cred->euid;
65822+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65823+ inode->i_gid = grsec_proc_gid;
65824+#else
65825 inode->i_gid = cred->egid;
65826+#endif
65827 rcu_read_unlock();
65828 } else {
65829 inode->i_uid = GLOBAL_ROOT_UID;
65830@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
65831 if (!task)
65832 goto out_no_task;
65833
65834+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65835+ goto out;
65836+
65837 /*
65838 * Yes, it does not scale. And it should not. Don't add
65839 * new entries into /proc/<tgid>/ without very good reasons.
65840@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
65841 if (!task)
65842 return -ENOENT;
65843
65844+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65845+ goto out;
65846+
65847 if (!dir_emit_dots(file, ctx))
65848 goto out;
65849
65850@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
65851 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
65852 #endif
65853 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
65854-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65855+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65856 ONE("syscall", S_IRUSR, proc_pid_syscall),
65857 #endif
65858 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
65859@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
65860 #ifdef CONFIG_SECURITY
65861 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
65862 #endif
65863-#ifdef CONFIG_KALLSYMS
65864+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65865 ONE("wchan", S_IRUGO, proc_pid_wchan),
65866 #endif
65867-#ifdef CONFIG_STACKTRACE
65868+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65869 ONE("stack", S_IRUSR, proc_pid_stack),
65870 #endif
65871 #ifdef CONFIG_SCHEDSTATS
65872@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
65873 #ifdef CONFIG_HARDWALL
65874 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
65875 #endif
65876+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65877+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
65878+#endif
65879 #ifdef CONFIG_USER_NS
65880 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
65881 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
65882@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
65883 if (!inode)
65884 goto out;
65885
65886+#ifdef CONFIG_GRKERNSEC_PROC_USER
65887+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
65888+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65889+ inode->i_gid = grsec_proc_gid;
65890+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
65891+#else
65892 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
65893+#endif
65894 inode->i_op = &proc_tgid_base_inode_operations;
65895 inode->i_fop = &proc_tgid_base_operations;
65896 inode->i_flags|=S_IMMUTABLE;
65897@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
65898 if (!task)
65899 goto out;
65900
65901+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65902+ goto out_put_task;
65903+
65904 result = proc_pid_instantiate(dir, dentry, task, NULL);
65905+out_put_task:
65906 put_task_struct(task);
65907 out:
65908 return ERR_PTR(result);
65909@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
65910 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
65911 #endif
65912 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
65913-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65914+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65915 ONE("syscall", S_IRUSR, proc_pid_syscall),
65916 #endif
65917 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
65918@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
65919 #ifdef CONFIG_SECURITY
65920 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
65921 #endif
65922-#ifdef CONFIG_KALLSYMS
65923+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65924 ONE("wchan", S_IRUGO, proc_pid_wchan),
65925 #endif
65926-#ifdef CONFIG_STACKTRACE
65927+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65928 ONE("stack", S_IRUSR, proc_pid_stack),
65929 #endif
65930 #ifdef CONFIG_SCHEDSTATS
65931diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
65932index cbd82df..c0407d2 100644
65933--- a/fs/proc/cmdline.c
65934+++ b/fs/proc/cmdline.c
65935@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
65936
65937 static int __init proc_cmdline_init(void)
65938 {
65939+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65940+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
65941+#else
65942 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
65943+#endif
65944 return 0;
65945 }
65946 fs_initcall(proc_cmdline_init);
65947diff --git a/fs/proc/devices.c b/fs/proc/devices.c
65948index 50493ed..248166b 100644
65949--- a/fs/proc/devices.c
65950+++ b/fs/proc/devices.c
65951@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
65952
65953 static int __init proc_devices_init(void)
65954 {
65955+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65956+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
65957+#else
65958 proc_create("devices", 0, NULL, &proc_devinfo_operations);
65959+#endif
65960 return 0;
65961 }
65962 fs_initcall(proc_devices_init);
65963diff --git a/fs/proc/fd.c b/fs/proc/fd.c
65964index 8e5ad83..1f07a8c 100644
65965--- a/fs/proc/fd.c
65966+++ b/fs/proc/fd.c
65967@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
65968 if (!task)
65969 return -ENOENT;
65970
65971- files = get_files_struct(task);
65972+ if (!gr_acl_handle_procpidmem(task))
65973+ files = get_files_struct(task);
65974 put_task_struct(task);
65975
65976 if (files) {
65977@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
65978 */
65979 int proc_fd_permission(struct inode *inode, int mask)
65980 {
65981+ struct task_struct *task;
65982 int rv = generic_permission(inode, mask);
65983- if (rv == 0)
65984- return 0;
65985+
65986 if (task_tgid(current) == proc_pid(inode))
65987 rv = 0;
65988+
65989+ task = get_proc_task(inode);
65990+ if (task == NULL)
65991+ return rv;
65992+
65993+ if (gr_acl_handle_procpidmem(task))
65994+ rv = -EACCES;
65995+
65996+ put_task_struct(task);
65997+
65998 return rv;
65999 }
66000
66001diff --git a/fs/proc/generic.c b/fs/proc/generic.c
66002index b502bba..849e216 100644
66003--- a/fs/proc/generic.c
66004+++ b/fs/proc/generic.c
66005@@ -22,6 +22,7 @@
66006 #include <linux/bitops.h>
66007 #include <linux/spinlock.h>
66008 #include <linux/completion.h>
66009+#include <linux/grsecurity.h>
66010 #include <asm/uaccess.h>
66011
66012 #include "internal.h"
66013@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66014 return proc_lookup_de(PDE(dir), dir, dentry);
66015 }
66016
66017+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66018+ unsigned int flags)
66019+{
66020+ if (gr_proc_is_restricted())
66021+ return ERR_PTR(-EACCES);
66022+
66023+ return proc_lookup_de(PDE(dir), dir, dentry);
66024+}
66025+
66026 /*
66027 * This returns non-zero if at EOF, so that the /proc
66028 * root directory can use this and check if it should
66029@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66030 return proc_readdir_de(PDE(inode), file, ctx);
66031 }
66032
66033+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66034+{
66035+ struct inode *inode = file_inode(file);
66036+
66037+ if (gr_proc_is_restricted())
66038+ return -EACCES;
66039+
66040+ return proc_readdir_de(PDE(inode), file, ctx);
66041+}
66042+
66043 /*
66044 * These are the generic /proc directory operations. They
66045 * use the in-memory "struct proc_dir_entry" tree to parse
66046@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
66047 .iterate = proc_readdir,
66048 };
66049
66050+static const struct file_operations proc_dir_restricted_operations = {
66051+ .llseek = generic_file_llseek,
66052+ .read = generic_read_dir,
66053+ .iterate = proc_readdir_restrict,
66054+};
66055+
66056 /*
66057 * proc directories can do almost nothing..
66058 */
66059@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66060 .setattr = proc_notify_change,
66061 };
66062
66063+static const struct inode_operations proc_dir_restricted_inode_operations = {
66064+ .lookup = proc_lookup_restrict,
66065+ .getattr = proc_getattr,
66066+ .setattr = proc_notify_change,
66067+};
66068+
66069 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66070 {
66071 int ret;
66072@@ -339,8 +371,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
66073 return ret;
66074
66075 if (S_ISDIR(dp->mode)) {
66076- dp->proc_fops = &proc_dir_operations;
66077- dp->proc_iops = &proc_dir_inode_operations;
66078+ if (dp->restricted) {
66079+ dp->proc_fops = &proc_dir_restricted_operations;
66080+ dp->proc_iops = &proc_dir_restricted_inode_operations;
66081+ } else {
66082+ dp->proc_fops = &proc_dir_operations;
66083+ dp->proc_iops = &proc_dir_inode_operations;
66084+ }
66085 dir->nlink++;
66086 } else if (S_ISLNK(dp->mode)) {
66087 dp->proc_iops = &proc_link_inode_operations;
66088@@ -453,6 +490,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66089 }
66090 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66091
66092+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66093+ struct proc_dir_entry *parent, void *data)
66094+{
66095+ struct proc_dir_entry *ent;
66096+
66097+ if (mode == 0)
66098+ mode = S_IRUGO | S_IXUGO;
66099+
66100+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66101+ if (ent) {
66102+ ent->data = data;
66103+ ent->restricted = 1;
66104+ if (proc_register(parent, ent) < 0) {
66105+ kfree(ent);
66106+ ent = NULL;
66107+ }
66108+ }
66109+ return ent;
66110+}
66111+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66112+
66113 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66114 struct proc_dir_entry *parent)
66115 {
66116@@ -467,6 +525,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66117 }
66118 EXPORT_SYMBOL(proc_mkdir);
66119
66120+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66121+ struct proc_dir_entry *parent)
66122+{
66123+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66124+}
66125+EXPORT_SYMBOL(proc_mkdir_restrict);
66126+
66127 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66128 struct proc_dir_entry *parent,
66129 const struct file_operations *proc_fops,
66130diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66131index 3b0f838..a0e0f63e 100644
66132--- a/fs/proc/inode.c
66133+++ b/fs/proc/inode.c
66134@@ -24,11 +24,17 @@
66135 #include <linux/mount.h>
66136 #include <linux/magic.h>
66137 #include <linux/namei.h>
66138+#include <linux/grsecurity.h>
66139
66140 #include <asm/uaccess.h>
66141
66142 #include "internal.h"
66143
66144+#ifdef CONFIG_PROC_SYSCTL
66145+extern const struct inode_operations proc_sys_inode_operations;
66146+extern const struct inode_operations proc_sys_dir_operations;
66147+#endif
66148+
66149 static void proc_evict_inode(struct inode *inode)
66150 {
66151 struct proc_dir_entry *de;
66152@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
66153 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
66154 sysctl_head_put(head);
66155 }
66156+
66157+#ifdef CONFIG_PROC_SYSCTL
66158+ if (inode->i_op == &proc_sys_inode_operations ||
66159+ inode->i_op == &proc_sys_dir_operations)
66160+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66161+#endif
66162+
66163 }
66164
66165 static struct kmem_cache * proc_inode_cachep;
66166@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66167 if (de->mode) {
66168 inode->i_mode = de->mode;
66169 inode->i_uid = de->uid;
66170+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66171+ inode->i_gid = grsec_proc_gid;
66172+#else
66173 inode->i_gid = de->gid;
66174+#endif
66175 }
66176 if (de->size)
66177 inode->i_size = de->size;
66178diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66179index c835b94..c9e01a3 100644
66180--- a/fs/proc/internal.h
66181+++ b/fs/proc/internal.h
66182@@ -47,9 +47,10 @@ struct proc_dir_entry {
66183 struct completion *pde_unload_completion;
66184 struct list_head pde_openers; /* who did ->open, but not ->release */
66185 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66186+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66187 u8 namelen;
66188 char name[];
66189-};
66190+} __randomize_layout;
66191
66192 union proc_op {
66193 int (*proc_get_link)(struct dentry *, struct path *);
66194@@ -67,7 +68,7 @@ struct proc_inode {
66195 struct ctl_table *sysctl_entry;
66196 const struct proc_ns_operations *ns_ops;
66197 struct inode vfs_inode;
66198-};
66199+} __randomize_layout;
66200
66201 /*
66202 * General functions
66203@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66204 struct pid *, struct task_struct *);
66205 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66206 struct pid *, struct task_struct *);
66207+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66208+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66209+ struct pid *, struct task_struct *);
66210+#endif
66211
66212 /*
66213 * base.c
66214@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66215 * generic.c
66216 */
66217 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66218+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66219 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66220 struct dentry *);
66221 extern int proc_readdir(struct file *, struct dir_context *);
66222+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66223 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66224
66225 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66226diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66227index a352d57..cb94a5c 100644
66228--- a/fs/proc/interrupts.c
66229+++ b/fs/proc/interrupts.c
66230@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66231
66232 static int __init proc_interrupts_init(void)
66233 {
66234+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66235+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66236+#else
66237 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66238+#endif
66239 return 0;
66240 }
66241 fs_initcall(proc_interrupts_init);
66242diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66243index 91a4e64..cb007c0 100644
66244--- a/fs/proc/kcore.c
66245+++ b/fs/proc/kcore.c
66246@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66247 * the addresses in the elf_phdr on our list.
66248 */
66249 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66250- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66251+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66252+ if (tsz > buflen)
66253 tsz = buflen;
66254-
66255+
66256 while (buflen) {
66257 struct kcore_list *m;
66258
66259@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66260 kfree(elf_buf);
66261 } else {
66262 if (kern_addr_valid(start)) {
66263- unsigned long n;
66264+ char *elf_buf;
66265+ mm_segment_t oldfs;
66266
66267- n = copy_to_user(buffer, (char *)start, tsz);
66268- /*
66269- * We cannot distinguish between fault on source
66270- * and fault on destination. When this happens
66271- * we clear too and hope it will trigger the
66272- * EFAULT again.
66273- */
66274- if (n) {
66275- if (clear_user(buffer + tsz - n,
66276- n))
66277+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66278+ if (!elf_buf)
66279+ return -ENOMEM;
66280+ oldfs = get_fs();
66281+ set_fs(KERNEL_DS);
66282+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66283+ set_fs(oldfs);
66284+ if (copy_to_user(buffer, elf_buf, tsz)) {
66285+ kfree(elf_buf);
66286 return -EFAULT;
66287+ }
66288 }
66289+ set_fs(oldfs);
66290+ kfree(elf_buf);
66291 } else {
66292 if (clear_user(buffer, tsz))
66293 return -EFAULT;
66294@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66295
66296 static int open_kcore(struct inode *inode, struct file *filp)
66297 {
66298+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66299+ return -EPERM;
66300+#endif
66301 if (!capable(CAP_SYS_RAWIO))
66302 return -EPERM;
66303 if (kcore_need_update)
66304diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66305index d3ebf2e..6ad42d1 100644
66306--- a/fs/proc/meminfo.c
66307+++ b/fs/proc/meminfo.c
66308@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66309 vmi.used >> 10,
66310 vmi.largest_chunk >> 10
66311 #ifdef CONFIG_MEMORY_FAILURE
66312- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66313+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66314 #endif
66315 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66316 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66317diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66318index d4a3574..b421ce9 100644
66319--- a/fs/proc/nommu.c
66320+++ b/fs/proc/nommu.c
66321@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66322
66323 if (file) {
66324 seq_pad(m, ' ');
66325- seq_path(m, &file->f_path, "");
66326+ seq_path(m, &file->f_path, "\n\\");
66327 }
66328
66329 seq_putc(m, '\n');
66330diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66331index 1bde894..22ac7eb 100644
66332--- a/fs/proc/proc_net.c
66333+++ b/fs/proc/proc_net.c
66334@@ -23,9 +23,27 @@
66335 #include <linux/nsproxy.h>
66336 #include <net/net_namespace.h>
66337 #include <linux/seq_file.h>
66338+#include <linux/grsecurity.h>
66339
66340 #include "internal.h"
66341
66342+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66343+static struct seq_operations *ipv6_seq_ops_addr;
66344+
66345+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66346+{
66347+ ipv6_seq_ops_addr = addr;
66348+}
66349+
66350+void unregister_ipv6_seq_ops_addr(void)
66351+{
66352+ ipv6_seq_ops_addr = NULL;
66353+}
66354+
66355+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
66356+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
66357+#endif
66358+
66359 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
66360 {
66361 return pde->parent->data;
66362@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
66363 return maybe_get_net(PDE_NET(PDE(inode)));
66364 }
66365
66366+extern const struct seq_operations dev_seq_ops;
66367+
66368 int seq_open_net(struct inode *ino, struct file *f,
66369 const struct seq_operations *ops, int size)
66370 {
66371@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
66372
66373 BUG_ON(size < sizeof(*p));
66374
66375+ /* only permit access to /proc/net/dev */
66376+ if (
66377+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66378+ ops != ipv6_seq_ops_addr &&
66379+#endif
66380+ ops != &dev_seq_ops && gr_proc_is_restricted())
66381+ return -EACCES;
66382+
66383 net = get_proc_net(ino);
66384 if (net == NULL)
66385 return -ENXIO;
66386@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
66387 int err;
66388 struct net *net;
66389
66390+ if (gr_proc_is_restricted())
66391+ return -EACCES;
66392+
66393 err = -ENXIO;
66394 net = get_proc_net(inode);
66395 if (net == NULL)
66396diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
66397index f92d5dd..26398ac 100644
66398--- a/fs/proc/proc_sysctl.c
66399+++ b/fs/proc/proc_sysctl.c
66400@@ -11,13 +11,21 @@
66401 #include <linux/namei.h>
66402 #include <linux/mm.h>
66403 #include <linux/module.h>
66404+#include <linux/nsproxy.h>
66405+#ifdef CONFIG_GRKERNSEC
66406+#include <net/net_namespace.h>
66407+#endif
66408 #include "internal.h"
66409
66410+extern int gr_handle_chroot_sysctl(const int op);
66411+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66412+ const int op);
66413+
66414 static const struct dentry_operations proc_sys_dentry_operations;
66415 static const struct file_operations proc_sys_file_operations;
66416-static const struct inode_operations proc_sys_inode_operations;
66417+const struct inode_operations proc_sys_inode_operations;
66418 static const struct file_operations proc_sys_dir_file_operations;
66419-static const struct inode_operations proc_sys_dir_operations;
66420+const struct inode_operations proc_sys_dir_operations;
66421
66422 void proc_sys_poll_notify(struct ctl_table_poll *poll)
66423 {
66424@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
66425
66426 err = NULL;
66427 d_set_d_op(dentry, &proc_sys_dentry_operations);
66428+
66429+ gr_handle_proc_create(dentry, inode);
66430+
66431 d_add(dentry, inode);
66432
66433 out:
66434@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66435 struct inode *inode = file_inode(filp);
66436 struct ctl_table_header *head = grab_header(inode);
66437 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
66438+ int op = write ? MAY_WRITE : MAY_READ;
66439 ssize_t error;
66440 size_t res;
66441
66442@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66443 * and won't be until we finish.
66444 */
66445 error = -EPERM;
66446- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
66447+ if (sysctl_perm(head, table, op))
66448 goto out;
66449
66450 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
66451@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66452 if (!table->proc_handler)
66453 goto out;
66454
66455+#ifdef CONFIG_GRKERNSEC
66456+ error = -EPERM;
66457+ if (gr_handle_chroot_sysctl(op))
66458+ goto out;
66459+ dget(filp->f_path.dentry);
66460+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
66461+ dput(filp->f_path.dentry);
66462+ goto out;
66463+ }
66464+ dput(filp->f_path.dentry);
66465+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
66466+ goto out;
66467+ if (write) {
66468+ if (current->nsproxy->net_ns != table->extra2) {
66469+ if (!capable(CAP_SYS_ADMIN))
66470+ goto out;
66471+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
66472+ goto out;
66473+ }
66474+#endif
66475+
66476 /* careful: calling conventions are nasty here */
66477 res = count;
66478 error = table->proc_handler(table, write, buf, &res, ppos);
66479@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
66480 return false;
66481 } else {
66482 d_set_d_op(child, &proc_sys_dentry_operations);
66483+
66484+ gr_handle_proc_create(child, inode);
66485+
66486 d_add(child, inode);
66487 }
66488 } else {
66489@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
66490 if ((*pos)++ < ctx->pos)
66491 return true;
66492
66493+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
66494+ return 0;
66495+
66496 if (unlikely(S_ISLNK(table->mode)))
66497 res = proc_sys_link_fill_cache(file, ctx, head, table);
66498 else
66499@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
66500 if (IS_ERR(head))
66501 return PTR_ERR(head);
66502
66503+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
66504+ return -ENOENT;
66505+
66506 generic_fillattr(inode, stat);
66507 if (table)
66508 stat->mode = (stat->mode & S_IFMT) | table->mode;
66509@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
66510 .llseek = generic_file_llseek,
66511 };
66512
66513-static const struct inode_operations proc_sys_inode_operations = {
66514+const struct inode_operations proc_sys_inode_operations = {
66515 .permission = proc_sys_permission,
66516 .setattr = proc_sys_setattr,
66517 .getattr = proc_sys_getattr,
66518 };
66519
66520-static const struct inode_operations proc_sys_dir_operations = {
66521+const struct inode_operations proc_sys_dir_operations = {
66522 .lookup = proc_sys_lookup,
66523 .permission = proc_sys_permission,
66524 .setattr = proc_sys_setattr,
66525@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
66526 static struct ctl_dir *new_dir(struct ctl_table_set *set,
66527 const char *name, int namelen)
66528 {
66529- struct ctl_table *table;
66530+ ctl_table_no_const *table;
66531 struct ctl_dir *new;
66532 struct ctl_node *node;
66533 char *new_name;
66534@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
66535 return NULL;
66536
66537 node = (struct ctl_node *)(new + 1);
66538- table = (struct ctl_table *)(node + 1);
66539+ table = (ctl_table_no_const *)(node + 1);
66540 new_name = (char *)(table + 2);
66541 memcpy(new_name, name, namelen);
66542 new_name[namelen] = '\0';
66543@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
66544 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
66545 struct ctl_table_root *link_root)
66546 {
66547- struct ctl_table *link_table, *entry, *link;
66548+ ctl_table_no_const *link_table, *link;
66549+ struct ctl_table *entry;
66550 struct ctl_table_header *links;
66551 struct ctl_node *node;
66552 char *link_name;
66553@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
66554 return NULL;
66555
66556 node = (struct ctl_node *)(links + 1);
66557- link_table = (struct ctl_table *)(node + nr_entries);
66558+ link_table = (ctl_table_no_const *)(node + nr_entries);
66559 link_name = (char *)&link_table[nr_entries + 1];
66560
66561 for (link = link_table, entry = table; entry->procname; link++, entry++) {
66562@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66563 struct ctl_table_header ***subheader, struct ctl_table_set *set,
66564 struct ctl_table *table)
66565 {
66566- struct ctl_table *ctl_table_arg = NULL;
66567- struct ctl_table *entry, *files;
66568+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
66569+ struct ctl_table *entry;
66570 int nr_files = 0;
66571 int nr_dirs = 0;
66572 int err = -ENOMEM;
66573@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66574 nr_files++;
66575 }
66576
66577- files = table;
66578 /* If there are mixed files and directories we need a new table */
66579 if (nr_dirs && nr_files) {
66580- struct ctl_table *new;
66581+ ctl_table_no_const *new;
66582 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
66583 GFP_KERNEL);
66584 if (!files)
66585@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66586 /* Register everything except a directory full of subdirectories */
66587 if (nr_files || !nr_dirs) {
66588 struct ctl_table_header *header;
66589- header = __register_sysctl_table(set, path, files);
66590+ header = __register_sysctl_table(set, path, files ? files : table);
66591 if (!header) {
66592 kfree(ctl_table_arg);
66593 goto out;
66594diff --git a/fs/proc/root.c b/fs/proc/root.c
66595index e74ac9f..35e89f4 100644
66596--- a/fs/proc/root.c
66597+++ b/fs/proc/root.c
66598@@ -188,7 +188,15 @@ void __init proc_root_init(void)
66599 proc_mkdir("openprom", NULL);
66600 #endif
66601 proc_tty_init();
66602+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66603+#ifdef CONFIG_GRKERNSEC_PROC_USER
66604+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
66605+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66606+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
66607+#endif
66608+#else
66609 proc_mkdir("bus", NULL);
66610+#endif
66611 proc_sys_init();
66612 }
66613
66614diff --git a/fs/proc/stat.c b/fs/proc/stat.c
66615index 510413eb..34d9a8c 100644
66616--- a/fs/proc/stat.c
66617+++ b/fs/proc/stat.c
66618@@ -11,6 +11,7 @@
66619 #include <linux/irqnr.h>
66620 #include <linux/cputime.h>
66621 #include <linux/tick.h>
66622+#include <linux/grsecurity.h>
66623
66624 #ifndef arch_irq_stat_cpu
66625 #define arch_irq_stat_cpu(cpu) 0
66626@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
66627 u64 sum_softirq = 0;
66628 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
66629 struct timespec boottime;
66630+ int unrestricted = 1;
66631+
66632+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66633+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66634+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
66635+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66636+ && !in_group_p(grsec_proc_gid)
66637+#endif
66638+ )
66639+ unrestricted = 0;
66640+#endif
66641+#endif
66642
66643 user = nice = system = idle = iowait =
66644 irq = softirq = steal = 0;
66645@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
66646 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66647 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66648 idle += get_idle_time(i);
66649- iowait += get_iowait_time(i);
66650- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66651- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66652- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66653- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66654- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66655- sum += kstat_cpu_irqs_sum(i);
66656- sum += arch_irq_stat_cpu(i);
66657+ if (unrestricted) {
66658+ iowait += get_iowait_time(i);
66659+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66660+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66661+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66662+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66663+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66664+ sum += kstat_cpu_irqs_sum(i);
66665+ sum += arch_irq_stat_cpu(i);
66666+ for (j = 0; j < NR_SOFTIRQS; j++) {
66667+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66668
66669- for (j = 0; j < NR_SOFTIRQS; j++) {
66670- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66671-
66672- per_softirq_sums[j] += softirq_stat;
66673- sum_softirq += softirq_stat;
66674+ per_softirq_sums[j] += softirq_stat;
66675+ sum_softirq += softirq_stat;
66676+ }
66677 }
66678 }
66679- sum += arch_irq_stat();
66680+ if (unrestricted)
66681+ sum += arch_irq_stat();
66682
66683 seq_puts(p, "cpu ");
66684 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66685@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
66686 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66687 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66688 idle = get_idle_time(i);
66689- iowait = get_iowait_time(i);
66690- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66691- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66692- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66693- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66694- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66695+ if (unrestricted) {
66696+ iowait = get_iowait_time(i);
66697+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66698+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66699+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66700+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66701+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66702+ }
66703 seq_printf(p, "cpu%d", i);
66704 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66705 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
66706@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
66707
66708 /* sum again ? it could be updated? */
66709 for_each_irq_nr(j)
66710- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
66711+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
66712
66713 seq_printf(p,
66714 "\nctxt %llu\n"
66715@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
66716 "processes %lu\n"
66717 "procs_running %lu\n"
66718 "procs_blocked %lu\n",
66719- nr_context_switches(),
66720+ unrestricted ? nr_context_switches() : 0ULL,
66721 (unsigned long)jif,
66722- total_forks,
66723- nr_running(),
66724- nr_iowait());
66725+ unrestricted ? total_forks : 0UL,
66726+ unrestricted ? nr_running() : 0UL,
66727+ unrestricted ? nr_iowait() : 0UL);
66728
66729 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
66730
66731diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
66732index 88f9b83..314064c 100644
66733--- a/fs/proc/task_mmu.c
66734+++ b/fs/proc/task_mmu.c
66735@@ -13,12 +13,19 @@
66736 #include <linux/swap.h>
66737 #include <linux/swapops.h>
66738 #include <linux/mmu_notifier.h>
66739+#include <linux/grsecurity.h>
66740
66741 #include <asm/elf.h>
66742 #include <asm/uaccess.h>
66743 #include <asm/tlbflush.h>
66744 #include "internal.h"
66745
66746+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66747+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66748+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66749+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66750+#endif
66751+
66752 void task_mem(struct seq_file *m, struct mm_struct *mm)
66753 {
66754 unsigned long data, text, lib, swap;
66755@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66756 "VmExe:\t%8lu kB\n"
66757 "VmLib:\t%8lu kB\n"
66758 "VmPTE:\t%8lu kB\n"
66759- "VmSwap:\t%8lu kB\n",
66760- hiwater_vm << (PAGE_SHIFT-10),
66761+ "VmSwap:\t%8lu kB\n"
66762+
66763+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66764+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
66765+#endif
66766+
66767+ ,hiwater_vm << (PAGE_SHIFT-10),
66768 total_vm << (PAGE_SHIFT-10),
66769 mm->locked_vm << (PAGE_SHIFT-10),
66770 mm->pinned_vm << (PAGE_SHIFT-10),
66771@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66772 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
66773 (PTRS_PER_PTE * sizeof(pte_t) *
66774 atomic_long_read(&mm->nr_ptes)) >> 10,
66775- swap << (PAGE_SHIFT-10));
66776+ swap << (PAGE_SHIFT-10)
66777+
66778+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66779+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66780+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
66781+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
66782+#else
66783+ , mm->context.user_cs_base
66784+ , mm->context.user_cs_limit
66785+#endif
66786+#endif
66787+
66788+ );
66789 }
66790
66791 unsigned long task_vsize(struct mm_struct *mm)
66792@@ -282,13 +306,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66793 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
66794 }
66795
66796- /* We don't show the stack guard page in /proc/maps */
66797+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66798+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
66799+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
66800+#else
66801 start = vma->vm_start;
66802- if (stack_guard_page_start(vma, start))
66803- start += PAGE_SIZE;
66804 end = vma->vm_end;
66805- if (stack_guard_page_end(vma, end))
66806- end -= PAGE_SIZE;
66807+#endif
66808
66809 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
66810 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
66811@@ -298,7 +322,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66812 flags & VM_WRITE ? 'w' : '-',
66813 flags & VM_EXEC ? 'x' : '-',
66814 flags & VM_MAYSHARE ? 's' : 'p',
66815+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66816+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
66817+#else
66818 pgoff,
66819+#endif
66820 MAJOR(dev), MINOR(dev), ino);
66821
66822 /*
66823@@ -307,7 +335,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66824 */
66825 if (file) {
66826 seq_pad(m, ' ');
66827- seq_path(m, &file->f_path, "\n");
66828+ seq_path(m, &file->f_path, "\n\\");
66829 goto done;
66830 }
66831
66832@@ -338,8 +366,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66833 * Thread stack in /proc/PID/task/TID/maps or
66834 * the main process stack.
66835 */
66836- if (!is_pid || (vma->vm_start <= mm->start_stack &&
66837- vma->vm_end >= mm->start_stack)) {
66838+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
66839+ (vma->vm_start <= mm->start_stack &&
66840+ vma->vm_end >= mm->start_stack)) {
66841 name = "[stack]";
66842 } else {
66843 /* Thread stack in /proc/PID/maps */
66844@@ -359,6 +388,12 @@ done:
66845
66846 static int show_map(struct seq_file *m, void *v, int is_pid)
66847 {
66848+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66849+ if (current->exec_id != m->exec_id) {
66850+ gr_log_badprocpid("maps");
66851+ return 0;
66852+ }
66853+#endif
66854 show_map_vma(m, v, is_pid);
66855 m_cache_vma(m, v);
66856 return 0;
66857@@ -629,12 +664,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
66858 .private = &mss,
66859 };
66860
66861+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66862+ if (current->exec_id != m->exec_id) {
66863+ gr_log_badprocpid("smaps");
66864+ return 0;
66865+ }
66866+#endif
66867 memset(&mss, 0, sizeof mss);
66868- mss.vma = vma;
66869- /* mmap_sem is held in m_start */
66870- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
66871- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
66872-
66873+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66874+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
66875+#endif
66876+ mss.vma = vma;
66877+ /* mmap_sem is held in m_start */
66878+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
66879+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
66880+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66881+ }
66882+#endif
66883 show_map_vma(m, vma, is_pid);
66884
66885 seq_printf(m,
66886@@ -652,7 +698,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
66887 "KernelPageSize: %8lu kB\n"
66888 "MMUPageSize: %8lu kB\n"
66889 "Locked: %8lu kB\n",
66890+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66891+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
66892+#else
66893 (vma->vm_end - vma->vm_start) >> 10,
66894+#endif
66895 mss.resident >> 10,
66896 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
66897 mss.shared_clean >> 10,
66898@@ -1486,6 +1536,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
66899 char buffer[64];
66900 int nid;
66901
66902+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66903+ if (current->exec_id != m->exec_id) {
66904+ gr_log_badprocpid("numa_maps");
66905+ return 0;
66906+ }
66907+#endif
66908+
66909 if (!mm)
66910 return 0;
66911
66912@@ -1507,11 +1564,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
66913 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
66914 }
66915
66916+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66917+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
66918+#else
66919 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
66920+#endif
66921
66922 if (file) {
66923 seq_puts(m, " file=");
66924- seq_path(m, &file->f_path, "\n\t= ");
66925+ seq_path(m, &file->f_path, "\n\t\\= ");
66926 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
66927 seq_puts(m, " heap");
66928 } else {
66929diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
66930index 599ec2e..f1413ae 100644
66931--- a/fs/proc/task_nommu.c
66932+++ b/fs/proc/task_nommu.c
66933@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66934 else
66935 bytes += kobjsize(mm);
66936
66937- if (current->fs && current->fs->users > 1)
66938+ if (current->fs && atomic_read(&current->fs->users) > 1)
66939 sbytes += kobjsize(current->fs);
66940 else
66941 bytes += kobjsize(current->fs);
66942@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
66943
66944 if (file) {
66945 seq_pad(m, ' ');
66946- seq_path(m, &file->f_path, "");
66947+ seq_path(m, &file->f_path, "\n\\");
66948 } else if (mm) {
66949 pid_t tid = pid_of_stack(priv, vma, is_pid);
66950
66951diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
66952index a90d6d35..d08047c 100644
66953--- a/fs/proc/vmcore.c
66954+++ b/fs/proc/vmcore.c
66955@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
66956 nr_bytes = count;
66957
66958 /* If pfn is not ram, return zeros for sparse dump files */
66959- if (pfn_is_ram(pfn) == 0)
66960- memset(buf, 0, nr_bytes);
66961- else {
66962+ if (pfn_is_ram(pfn) == 0) {
66963+ if (userbuf) {
66964+ if (clear_user((char __force_user *)buf, nr_bytes))
66965+ return -EFAULT;
66966+ } else
66967+ memset(buf, 0, nr_bytes);
66968+ } else {
66969 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
66970 offset, userbuf);
66971 if (tmp < 0)
66972@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
66973 static int copy_to(void *target, void *src, size_t size, int userbuf)
66974 {
66975 if (userbuf) {
66976- if (copy_to_user((char __user *) target, src, size))
66977+ if (copy_to_user((char __force_user *) target, src, size))
66978 return -EFAULT;
66979 } else {
66980 memcpy(target, src, size);
66981@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
66982 if (*fpos < m->offset + m->size) {
66983 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
66984 start = m->paddr + *fpos - m->offset;
66985- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
66986+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
66987 if (tmp < 0)
66988 return tmp;
66989 buflen -= tsz;
66990@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
66991 static ssize_t read_vmcore(struct file *file, char __user *buffer,
66992 size_t buflen, loff_t *fpos)
66993 {
66994- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
66995+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
66996 }
66997
66998 /*
66999diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
67000index d3fb2b6..43a8140 100644
67001--- a/fs/qnx6/qnx6.h
67002+++ b/fs/qnx6/qnx6.h
67003@@ -74,7 +74,7 @@ enum {
67004 BYTESEX_BE,
67005 };
67006
67007-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67008+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67009 {
67010 if (sbi->s_bytesex == BYTESEX_LE)
67011 return le64_to_cpu((__force __le64)n);
67012@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67013 return (__force __fs64)cpu_to_be64(n);
67014 }
67015
67016-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67017+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67018 {
67019 if (sbi->s_bytesex == BYTESEX_LE)
67020 return le32_to_cpu((__force __le32)n);
67021diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67022index bb2869f..d34ada8 100644
67023--- a/fs/quota/netlink.c
67024+++ b/fs/quota/netlink.c
67025@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67026 void quota_send_warning(struct kqid qid, dev_t dev,
67027 const char warntype)
67028 {
67029- static atomic_t seq;
67030+ static atomic_unchecked_t seq;
67031 struct sk_buff *skb;
67032 void *msg_head;
67033 int ret;
67034@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67035 "VFS: Not enough memory to send quota warning.\n");
67036 return;
67037 }
67038- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67039+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67040 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67041 if (!msg_head) {
67042 printk(KERN_ERR
67043diff --git a/fs/read_write.c b/fs/read_write.c
67044index c0805c93..d39f2eb 100644
67045--- a/fs/read_write.c
67046+++ b/fs/read_write.c
67047@@ -507,7 +507,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67048
67049 old_fs = get_fs();
67050 set_fs(get_ds());
67051- p = (__force const char __user *)buf;
67052+ p = (const char __force_user *)buf;
67053 if (count > MAX_RW_COUNT)
67054 count = MAX_RW_COUNT;
67055 if (file->f_op->write)
67056diff --git a/fs/readdir.c b/fs/readdir.c
67057index ced6791..936687b 100644
67058--- a/fs/readdir.c
67059+++ b/fs/readdir.c
67060@@ -18,6 +18,7 @@
67061 #include <linux/security.h>
67062 #include <linux/syscalls.h>
67063 #include <linux/unistd.h>
67064+#include <linux/namei.h>
67065
67066 #include <asm/uaccess.h>
67067
67068@@ -71,6 +72,7 @@ struct old_linux_dirent {
67069 struct readdir_callback {
67070 struct dir_context ctx;
67071 struct old_linux_dirent __user * dirent;
67072+ struct file * file;
67073 int result;
67074 };
67075
67076@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
67077 buf->result = -EOVERFLOW;
67078 return -EOVERFLOW;
67079 }
67080+
67081+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67082+ return 0;
67083+
67084 buf->result++;
67085 dirent = buf->dirent;
67086 if (!access_ok(VERIFY_WRITE, dirent,
67087@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67088 if (!f.file)
67089 return -EBADF;
67090
67091+ buf.file = f.file;
67092 error = iterate_dir(f.file, &buf.ctx);
67093 if (buf.result)
67094 error = buf.result;
67095@@ -145,6 +152,7 @@ struct getdents_callback {
67096 struct dir_context ctx;
67097 struct linux_dirent __user * current_dir;
67098 struct linux_dirent __user * previous;
67099+ struct file * file;
67100 int count;
67101 int error;
67102 };
67103@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
67104 buf->error = -EOVERFLOW;
67105 return -EOVERFLOW;
67106 }
67107+
67108+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67109+ return 0;
67110+
67111 dirent = buf->previous;
67112 if (dirent) {
67113 if (__put_user(offset, &dirent->d_off))
67114@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67115 if (!f.file)
67116 return -EBADF;
67117
67118+ buf.file = f.file;
67119 error = iterate_dir(f.file, &buf.ctx);
67120 if (error >= 0)
67121 error = buf.error;
67122@@ -230,6 +243,7 @@ struct getdents_callback64 {
67123 struct dir_context ctx;
67124 struct linux_dirent64 __user * current_dir;
67125 struct linux_dirent64 __user * previous;
67126+ struct file *file;
67127 int count;
67128 int error;
67129 };
67130@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
67131 buf->error = -EINVAL; /* only used if we fail.. */
67132 if (reclen > buf->count)
67133 return -EINVAL;
67134+
67135+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67136+ return 0;
67137+
67138 dirent = buf->previous;
67139 if (dirent) {
67140 if (__put_user(offset, &dirent->d_off))
67141@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67142 if (!f.file)
67143 return -EBADF;
67144
67145+ buf.file = f.file;
67146 error = iterate_dir(f.file, &buf.ctx);
67147 if (error >= 0)
67148 error = buf.error;
67149diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67150index 9c02d96..6562c10 100644
67151--- a/fs/reiserfs/do_balan.c
67152+++ b/fs/reiserfs/do_balan.c
67153@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67154 return;
67155 }
67156
67157- atomic_inc(&fs_generation(tb->tb_sb));
67158+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67159 do_balance_starts(tb);
67160
67161 /*
67162diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67163index aca73dd..e3c558d 100644
67164--- a/fs/reiserfs/item_ops.c
67165+++ b/fs/reiserfs/item_ops.c
67166@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67167 }
67168
67169 static struct item_operations errcatch_ops = {
67170- errcatch_bytes_number,
67171- errcatch_decrement_key,
67172- errcatch_is_left_mergeable,
67173- errcatch_print_item,
67174- errcatch_check_item,
67175+ .bytes_number = errcatch_bytes_number,
67176+ .decrement_key = errcatch_decrement_key,
67177+ .is_left_mergeable = errcatch_is_left_mergeable,
67178+ .print_item = errcatch_print_item,
67179+ .check_item = errcatch_check_item,
67180
67181- errcatch_create_vi,
67182- errcatch_check_left,
67183- errcatch_check_right,
67184- errcatch_part_size,
67185- errcatch_unit_num,
67186- errcatch_print_vi
67187+ .create_vi = errcatch_create_vi,
67188+ .check_left = errcatch_check_left,
67189+ .check_right = errcatch_check_right,
67190+ .part_size = errcatch_part_size,
67191+ .unit_num = errcatch_unit_num,
67192+ .print_vi = errcatch_print_vi
67193 };
67194
67195 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67196diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67197index 621b9f3..af527fd 100644
67198--- a/fs/reiserfs/procfs.c
67199+++ b/fs/reiserfs/procfs.c
67200@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67201 "SMALL_TAILS " : "NO_TAILS ",
67202 replay_only(sb) ? "REPLAY_ONLY " : "",
67203 convert_reiserfs(sb) ? "CONV " : "",
67204- atomic_read(&r->s_generation_counter),
67205+ atomic_read_unchecked(&r->s_generation_counter),
67206 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67207 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67208 SF(s_good_search_by_key_reada), SF(s_bmaps),
67209diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67210index bb79cdd..fcf49ef 100644
67211--- a/fs/reiserfs/reiserfs.h
67212+++ b/fs/reiserfs/reiserfs.h
67213@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
67214 /* Comment? -Hans */
67215 wait_queue_head_t s_wait;
67216 /* increased by one every time the tree gets re-balanced */
67217- atomic_t s_generation_counter;
67218+ atomic_unchecked_t s_generation_counter;
67219
67220 /* File system properties. Currently holds on-disk FS format */
67221 unsigned long s_properties;
67222@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67223 #define REISERFS_USER_MEM 1 /* user memory mode */
67224
67225 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67226-#define get_generation(s) atomic_read (&fs_generation(s))
67227+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67228 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67229 #define __fs_changed(gen,s) (gen != get_generation (s))
67230 #define fs_changed(gen,s) \
67231diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
67232index 71fbbe3..eff29ba 100644
67233--- a/fs/reiserfs/super.c
67234+++ b/fs/reiserfs/super.c
67235@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
67236 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
67237 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
67238 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
67239+#ifdef CONFIG_REISERFS_FS_XATTR
67240+ /* turn on user xattrs by default */
67241+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
67242+#endif
67243 /* no preallocation minimum, be smart in reiserfs_file_write instead */
67244 sbi->s_alloc_options.preallocmin = 0;
67245 /* Preallocate by 16 blocks (17-1) at once */
67246diff --git a/fs/select.c b/fs/select.c
67247index 467bb1c..cf9d65a 100644
67248--- a/fs/select.c
67249+++ b/fs/select.c
67250@@ -20,6 +20,7 @@
67251 #include <linux/export.h>
67252 #include <linux/slab.h>
67253 #include <linux/poll.h>
67254+#include <linux/security.h>
67255 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67256 #include <linux/file.h>
67257 #include <linux/fdtable.h>
67258@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67259 struct poll_list *walk = head;
67260 unsigned long todo = nfds;
67261
67262+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67263 if (nfds > rlimit(RLIMIT_NOFILE))
67264 return -EINVAL;
67265
67266diff --git a/fs/seq_file.c b/fs/seq_file.c
67267index dbf3a59..daf023f 100644
67268--- a/fs/seq_file.c
67269+++ b/fs/seq_file.c
67270@@ -12,6 +12,8 @@
67271 #include <linux/slab.h>
67272 #include <linux/cred.h>
67273 #include <linux/mm.h>
67274+#include <linux/sched.h>
67275+#include <linux/grsecurity.h>
67276
67277 #include <asm/uaccess.h>
67278 #include <asm/page.h>
67279@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
67280
67281 static void *seq_buf_alloc(unsigned long size)
67282 {
67283- void *buf;
67284-
67285- /*
67286- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
67287- * it's better to fall back to vmalloc() than to kill things.
67288- */
67289- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
67290- if (!buf && size > PAGE_SIZE)
67291- buf = vmalloc(size);
67292- return buf;
67293+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67294 }
67295
67296 /**
67297@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67298 #ifdef CONFIG_USER_NS
67299 p->user_ns = file->f_cred->user_ns;
67300 #endif
67301+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67302+ p->exec_id = current->exec_id;
67303+#endif
67304
67305 /*
67306 * Wrappers around seq_open(e.g. swaps_open) need to be
67307@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67308 }
67309 EXPORT_SYMBOL(seq_open);
67310
67311+
67312+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67313+{
67314+ if (gr_proc_is_restricted())
67315+ return -EACCES;
67316+
67317+ return seq_open(file, op);
67318+}
67319+EXPORT_SYMBOL(seq_open_restrict);
67320+
67321 static int traverse(struct seq_file *m, loff_t offset)
67322 {
67323 loff_t pos = 0, index;
67324@@ -158,7 +164,7 @@ Eoverflow:
67325 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67326 {
67327 struct seq_file *m = file->private_data;
67328- size_t copied = 0;
67329+ ssize_t copied = 0;
67330 loff_t pos;
67331 size_t n;
67332 void *p;
67333@@ -589,7 +595,7 @@ static void single_stop(struct seq_file *p, void *v)
67334 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67335 void *data)
67336 {
67337- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67338+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67339 int res = -ENOMEM;
67340
67341 if (op) {
67342@@ -625,6 +631,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67343 }
67344 EXPORT_SYMBOL(single_open_size);
67345
67346+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67347+ void *data)
67348+{
67349+ if (gr_proc_is_restricted())
67350+ return -EACCES;
67351+
67352+ return single_open(file, show, data);
67353+}
67354+EXPORT_SYMBOL(single_open_restrict);
67355+
67356+
67357 int single_release(struct inode *inode, struct file *file)
67358 {
67359 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
67360diff --git a/fs/splice.c b/fs/splice.c
67361index 75c6058..770d40c 100644
67362--- a/fs/splice.c
67363+++ b/fs/splice.c
67364@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67365 pipe_lock(pipe);
67366
67367 for (;;) {
67368- if (!pipe->readers) {
67369+ if (!atomic_read(&pipe->readers)) {
67370 send_sig(SIGPIPE, current, 0);
67371 if (!ret)
67372 ret = -EPIPE;
67373@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67374 page_nr++;
67375 ret += buf->len;
67376
67377- if (pipe->files)
67378+ if (atomic_read(&pipe->files))
67379 do_wakeup = 1;
67380
67381 if (!--spd->nr_pages)
67382@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67383 do_wakeup = 0;
67384 }
67385
67386- pipe->waiting_writers++;
67387+ atomic_inc(&pipe->waiting_writers);
67388 pipe_wait(pipe);
67389- pipe->waiting_writers--;
67390+ atomic_dec(&pipe->waiting_writers);
67391 }
67392
67393 pipe_unlock(pipe);
67394@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
67395 old_fs = get_fs();
67396 set_fs(get_ds());
67397 /* The cast to a user pointer is valid due to the set_fs() */
67398- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
67399+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
67400 set_fs(old_fs);
67401
67402 return res;
67403@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
67404 old_fs = get_fs();
67405 set_fs(get_ds());
67406 /* The cast to a user pointer is valid due to the set_fs() */
67407- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
67408+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
67409 set_fs(old_fs);
67410
67411 return res;
67412@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
67413 goto err;
67414
67415 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
67416- vec[i].iov_base = (void __user *) page_address(page);
67417+ vec[i].iov_base = (void __force_user *) page_address(page);
67418 vec[i].iov_len = this_len;
67419 spd.pages[i] = page;
67420 spd.nr_pages++;
67421@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67422 ops->release(pipe, buf);
67423 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67424 pipe->nrbufs--;
67425- if (pipe->files)
67426+ if (atomic_read(&pipe->files))
67427 sd->need_wakeup = true;
67428 }
67429
67430@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67431 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
67432 {
67433 while (!pipe->nrbufs) {
67434- if (!pipe->writers)
67435+ if (!atomic_read(&pipe->writers))
67436 return 0;
67437
67438- if (!pipe->waiting_writers && sd->num_spliced)
67439+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
67440 return 0;
67441
67442 if (sd->flags & SPLICE_F_NONBLOCK)
67443@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
67444 ops->release(pipe, buf);
67445 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67446 pipe->nrbufs--;
67447- if (pipe->files)
67448+ if (atomic_read(&pipe->files))
67449 sd.need_wakeup = true;
67450 } else {
67451 buf->offset += ret;
67452@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
67453 * out of the pipe right after the splice_to_pipe(). So set
67454 * PIPE_READERS appropriately.
67455 */
67456- pipe->readers = 1;
67457+ atomic_set(&pipe->readers, 1);
67458
67459 current->splice_pipe = pipe;
67460 }
67461@@ -1497,6 +1497,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
67462
67463 partial[buffers].offset = off;
67464 partial[buffers].len = plen;
67465+ partial[buffers].private = 0;
67466
67467 off = 0;
67468 len -= plen;
67469@@ -1733,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67470 ret = -ERESTARTSYS;
67471 break;
67472 }
67473- if (!pipe->writers)
67474+ if (!atomic_read(&pipe->writers))
67475 break;
67476- if (!pipe->waiting_writers) {
67477+ if (!atomic_read(&pipe->waiting_writers)) {
67478 if (flags & SPLICE_F_NONBLOCK) {
67479 ret = -EAGAIN;
67480 break;
67481@@ -1767,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67482 pipe_lock(pipe);
67483
67484 while (pipe->nrbufs >= pipe->buffers) {
67485- if (!pipe->readers) {
67486+ if (!atomic_read(&pipe->readers)) {
67487 send_sig(SIGPIPE, current, 0);
67488 ret = -EPIPE;
67489 break;
67490@@ -1780,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67491 ret = -ERESTARTSYS;
67492 break;
67493 }
67494- pipe->waiting_writers++;
67495+ atomic_inc(&pipe->waiting_writers);
67496 pipe_wait(pipe);
67497- pipe->waiting_writers--;
67498+ atomic_dec(&pipe->waiting_writers);
67499 }
67500
67501 pipe_unlock(pipe);
67502@@ -1818,14 +1819,14 @@ retry:
67503 pipe_double_lock(ipipe, opipe);
67504
67505 do {
67506- if (!opipe->readers) {
67507+ if (!atomic_read(&opipe->readers)) {
67508 send_sig(SIGPIPE, current, 0);
67509 if (!ret)
67510 ret = -EPIPE;
67511 break;
67512 }
67513
67514- if (!ipipe->nrbufs && !ipipe->writers)
67515+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
67516 break;
67517
67518 /*
67519@@ -1922,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67520 pipe_double_lock(ipipe, opipe);
67521
67522 do {
67523- if (!opipe->readers) {
67524+ if (!atomic_read(&opipe->readers)) {
67525 send_sig(SIGPIPE, current, 0);
67526 if (!ret)
67527 ret = -EPIPE;
67528@@ -1967,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67529 * return EAGAIN if we have the potential of some data in the
67530 * future, otherwise just return 0
67531 */
67532- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
67533+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
67534 ret = -EAGAIN;
67535
67536 pipe_unlock(ipipe);
67537diff --git a/fs/stat.c b/fs/stat.c
67538index ae0c3ce..9ee641c 100644
67539--- a/fs/stat.c
67540+++ b/fs/stat.c
67541@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
67542 stat->gid = inode->i_gid;
67543 stat->rdev = inode->i_rdev;
67544 stat->size = i_size_read(inode);
67545- stat->atime = inode->i_atime;
67546- stat->mtime = inode->i_mtime;
67547+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67548+ stat->atime = inode->i_ctime;
67549+ stat->mtime = inode->i_ctime;
67550+ } else {
67551+ stat->atime = inode->i_atime;
67552+ stat->mtime = inode->i_mtime;
67553+ }
67554 stat->ctime = inode->i_ctime;
67555 stat->blksize = (1 << inode->i_blkbits);
67556 stat->blocks = inode->i_blocks;
67557@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
67558 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
67559 {
67560 struct inode *inode = path->dentry->d_inode;
67561+ int retval;
67562
67563- if (inode->i_op->getattr)
67564- return inode->i_op->getattr(path->mnt, path->dentry, stat);
67565+ if (inode->i_op->getattr) {
67566+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
67567+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67568+ stat->atime = stat->ctime;
67569+ stat->mtime = stat->ctime;
67570+ }
67571+ return retval;
67572+ }
67573
67574 generic_fillattr(inode, stat);
67575 return 0;
67576diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
67577index 0b45ff4..847de5b 100644
67578--- a/fs/sysfs/dir.c
67579+++ b/fs/sysfs/dir.c
67580@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67581 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67582 {
67583 struct kernfs_node *parent, *kn;
67584+ const char *name;
67585+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
67586+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67587+ const char *parent_name;
67588+#endif
67589
67590 BUG_ON(!kobj);
67591
67592+ name = kobject_name(kobj);
67593+
67594 if (kobj->parent)
67595 parent = kobj->parent->sd;
67596 else
67597@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67598 if (!parent)
67599 return -ENOENT;
67600
67601- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
67602- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
67603+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67604+ parent_name = parent->name;
67605+ mode = S_IRWXU;
67606+
67607+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
67608+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
67609+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
67610+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
67611+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67612+#endif
67613+
67614+ kn = kernfs_create_dir_ns(parent, name,
67615+ mode, kobj, ns);
67616 if (IS_ERR(kn)) {
67617 if (PTR_ERR(kn) == -EEXIST)
67618- sysfs_warn_dup(parent, kobject_name(kobj));
67619+ sysfs_warn_dup(parent, name);
67620 return PTR_ERR(kn);
67621 }
67622
67623diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
67624index 69d4889..a810bd4 100644
67625--- a/fs/sysv/sysv.h
67626+++ b/fs/sysv/sysv.h
67627@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
67628 #endif
67629 }
67630
67631-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67632+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67633 {
67634 if (sbi->s_bytesex == BYTESEX_PDP)
67635 return PDP_swab((__force __u32)n);
67636diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
67637index fb08b0c..65fcc7e 100644
67638--- a/fs/ubifs/io.c
67639+++ b/fs/ubifs/io.c
67640@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
67641 return err;
67642 }
67643
67644-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67645+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67646 {
67647 int err;
67648
67649diff --git a/fs/udf/misc.c b/fs/udf/misc.c
67650index c175b4d..8f36a16 100644
67651--- a/fs/udf/misc.c
67652+++ b/fs/udf/misc.c
67653@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
67654
67655 u8 udf_tag_checksum(const struct tag *t)
67656 {
67657- u8 *data = (u8 *)t;
67658+ const u8 *data = (const u8 *)t;
67659 u8 checksum = 0;
67660 int i;
67661 for (i = 0; i < sizeof(struct tag); ++i)
67662diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
67663index 8d974c4..b82f6ec 100644
67664--- a/fs/ufs/swab.h
67665+++ b/fs/ufs/swab.h
67666@@ -22,7 +22,7 @@ enum {
67667 BYTESEX_BE
67668 };
67669
67670-static inline u64
67671+static inline u64 __intentional_overflow(-1)
67672 fs64_to_cpu(struct super_block *sbp, __fs64 n)
67673 {
67674 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67675@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
67676 return (__force __fs64)cpu_to_be64(n);
67677 }
67678
67679-static inline u32
67680+static inline u32 __intentional_overflow(-1)
67681 fs32_to_cpu(struct super_block *sbp, __fs32 n)
67682 {
67683 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67684diff --git a/fs/utimes.c b/fs/utimes.c
67685index aa138d6..5f3a811 100644
67686--- a/fs/utimes.c
67687+++ b/fs/utimes.c
67688@@ -1,6 +1,7 @@
67689 #include <linux/compiler.h>
67690 #include <linux/file.h>
67691 #include <linux/fs.h>
67692+#include <linux/security.h>
67693 #include <linux/linkage.h>
67694 #include <linux/mount.h>
67695 #include <linux/namei.h>
67696@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
67697 }
67698 }
67699 retry_deleg:
67700+
67701+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
67702+ error = -EACCES;
67703+ goto mnt_drop_write_and_out;
67704+ }
67705+
67706 mutex_lock(&inode->i_mutex);
67707 error = notify_change(path->dentry, &newattrs, &delegated_inode);
67708 mutex_unlock(&inode->i_mutex);
67709diff --git a/fs/xattr.c b/fs/xattr.c
67710index 4ef6985..a6cd6567 100644
67711--- a/fs/xattr.c
67712+++ b/fs/xattr.c
67713@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
67714 return rc;
67715 }
67716
67717+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
67718+ssize_t
67719+pax_getxattr(struct dentry *dentry, void *value, size_t size)
67720+{
67721+ struct inode *inode = dentry->d_inode;
67722+ ssize_t error;
67723+
67724+ error = inode_permission(inode, MAY_EXEC);
67725+ if (error)
67726+ return error;
67727+
67728+ if (inode->i_op->getxattr)
67729+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
67730+ else
67731+ error = -EOPNOTSUPP;
67732+
67733+ return error;
67734+}
67735+EXPORT_SYMBOL(pax_getxattr);
67736+#endif
67737+
67738 ssize_t
67739 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
67740 {
67741@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
67742 * Extended attribute SET operations
67743 */
67744 static long
67745-setxattr(struct dentry *d, const char __user *name, const void __user *value,
67746+setxattr(struct path *path, const char __user *name, const void __user *value,
67747 size_t size, int flags)
67748 {
67749 int error;
67750@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
67751 posix_acl_fix_xattr_from_user(kvalue, size);
67752 }
67753
67754- error = vfs_setxattr(d, kname, kvalue, size, flags);
67755+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
67756+ error = -EACCES;
67757+ goto out;
67758+ }
67759+
67760+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
67761 out:
67762 if (vvalue)
67763 vfree(vvalue);
67764@@ -376,7 +402,7 @@ retry:
67765 return error;
67766 error = mnt_want_write(path.mnt);
67767 if (!error) {
67768- error = setxattr(path.dentry, name, value, size, flags);
67769+ error = setxattr(&path, name, value, size, flags);
67770 mnt_drop_write(path.mnt);
67771 }
67772 path_put(&path);
67773@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
67774 audit_file(f.file);
67775 error = mnt_want_write_file(f.file);
67776 if (!error) {
67777- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
67778+ error = setxattr(&f.file->f_path, name, value, size, flags);
67779 mnt_drop_write_file(f.file);
67780 }
67781 fdput(f);
67782@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
67783 * Extended attribute REMOVE operations
67784 */
67785 static long
67786-removexattr(struct dentry *d, const char __user *name)
67787+removexattr(struct path *path, const char __user *name)
67788 {
67789 int error;
67790 char kname[XATTR_NAME_MAX + 1];
67791@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
67792 if (error < 0)
67793 return error;
67794
67795- return vfs_removexattr(d, kname);
67796+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
67797+ return -EACCES;
67798+
67799+ return vfs_removexattr(path->dentry, kname);
67800 }
67801
67802 static int path_removexattr(const char __user *pathname,
67803@@ -623,7 +652,7 @@ retry:
67804 return error;
67805 error = mnt_want_write(path.mnt);
67806 if (!error) {
67807- error = removexattr(path.dentry, name);
67808+ error = removexattr(&path, name);
67809 mnt_drop_write(path.mnt);
67810 }
67811 path_put(&path);
67812@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
67813 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
67814 {
67815 struct fd f = fdget(fd);
67816+ struct path *path;
67817 int error = -EBADF;
67818
67819 if (!f.file)
67820 return error;
67821+ path = &f.file->f_path;
67822 audit_file(f.file);
67823 error = mnt_want_write_file(f.file);
67824 if (!error) {
67825- error = removexattr(f.file->f_path.dentry, name);
67826+ error = removexattr(path, name);
67827 mnt_drop_write_file(f.file);
67828 }
67829 fdput(f);
67830diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
67831index 4e20fe7..6d1a55a 100644
67832--- a/fs/xfs/libxfs/xfs_bmap.c
67833+++ b/fs/xfs/libxfs/xfs_bmap.c
67834@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
67835
67836 #else
67837 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
67838-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
67839+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
67840 #endif /* DEBUG */
67841
67842 /*
67843diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
67844index 098cd78..724d3f8 100644
67845--- a/fs/xfs/xfs_dir2_readdir.c
67846+++ b/fs/xfs/xfs_dir2_readdir.c
67847@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
67848 ino = dp->d_ops->sf_get_ino(sfp, sfep);
67849 filetype = dp->d_ops->sf_get_ftype(sfep);
67850 ctx->pos = off & 0x7fffffff;
67851- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67852+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
67853+ char name[sfep->namelen];
67854+ memcpy(name, sfep->name, sfep->namelen);
67855+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
67856+ return 0;
67857+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67858 xfs_dir3_get_dtype(dp->i_mount, filetype)))
67859 return 0;
67860 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
67861diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
67862index a183198..6b52f52 100644
67863--- a/fs/xfs/xfs_ioctl.c
67864+++ b/fs/xfs/xfs_ioctl.c
67865@@ -119,7 +119,7 @@ xfs_find_handle(
67866 }
67867
67868 error = -EFAULT;
67869- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
67870+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
67871 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
67872 goto out_put;
67873
67874diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
67875index c31d2c2..6ec8f62 100644
67876--- a/fs/xfs/xfs_linux.h
67877+++ b/fs/xfs/xfs_linux.h
67878@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
67879 * of the compiler which do not like us using do_div in the middle
67880 * of large functions.
67881 */
67882-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
67883+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
67884 {
67885 __u32 mod;
67886
67887@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
67888 return 0;
67889 }
67890 #else
67891-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
67892+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
67893 {
67894 __u32 mod;
67895
67896diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
67897new file mode 100644
67898index 0000000..31f8fe4
67899--- /dev/null
67900+++ b/grsecurity/Kconfig
67901@@ -0,0 +1,1182 @@
67902+#
67903+# grecurity configuration
67904+#
67905+menu "Memory Protections"
67906+depends on GRKERNSEC
67907+
67908+config GRKERNSEC_KMEM
67909+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
67910+ default y if GRKERNSEC_CONFIG_AUTO
67911+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
67912+ help
67913+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
67914+ be written to or read from to modify or leak the contents of the running
67915+ kernel. /dev/port will also not be allowed to be opened, writing to
67916+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
67917+ If you have module support disabled, enabling this will close up several
67918+ ways that are currently used to insert malicious code into the running
67919+ kernel.
67920+
67921+ Even with this feature enabled, we still highly recommend that
67922+ you use the RBAC system, as it is still possible for an attacker to
67923+ modify the running kernel through other more obscure methods.
67924+
67925+ It is highly recommended that you say Y here if you meet all the
67926+ conditions above.
67927+
67928+config GRKERNSEC_VM86
67929+ bool "Restrict VM86 mode"
67930+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
67931+ depends on X86_32
67932+
67933+ help
67934+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
67935+ make use of a special execution mode on 32bit x86 processors called
67936+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
67937+ video cards and will still work with this option enabled. The purpose
67938+ of the option is to prevent exploitation of emulation errors in
67939+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
67940+ Nearly all users should be able to enable this option.
67941+
67942+config GRKERNSEC_IO
67943+ bool "Disable privileged I/O"
67944+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
67945+ depends on X86
67946+ select RTC_CLASS
67947+ select RTC_INTF_DEV
67948+ select RTC_DRV_CMOS
67949+
67950+ help
67951+ If you say Y here, all ioperm and iopl calls will return an error.
67952+ Ioperm and iopl can be used to modify the running kernel.
67953+ Unfortunately, some programs need this access to operate properly,
67954+ the most notable of which are XFree86 and hwclock. hwclock can be
67955+ remedied by having RTC support in the kernel, so real-time
67956+ clock support is enabled if this option is enabled, to ensure
67957+ that hwclock operates correctly. If hwclock still does not work,
67958+ either update udev or symlink /dev/rtc to /dev/rtc0.
67959+
67960+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
67961+ you may not be able to boot into a graphical environment with this
67962+ option enabled. In this case, you should use the RBAC system instead.
67963+
67964+config GRKERNSEC_BPF_HARDEN
67965+ bool "Harden BPF interpreter"
67966+ default y if GRKERNSEC_CONFIG_AUTO
67967+ help
67968+ Unlike previous versions of grsecurity that hardened both the BPF
67969+ interpreted code against corruption at rest as well as the JIT code
67970+ against JIT-spray attacks and attacker-controlled immediate values
67971+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
67972+ and will ensure the interpreted code is read-only at rest. This feature
67973+ may be removed at a later time when eBPF stabilizes to entirely revert
67974+ back to the more secure pre-3.16 BPF interpreter/JIT.
67975+
67976+ If you're using KERNEXEC, it's recommended that you enable this option
67977+ to supplement the hardening of the kernel.
67978+
67979+config GRKERNSEC_PERF_HARDEN
67980+ bool "Disable unprivileged PERF_EVENTS usage by default"
67981+ default y if GRKERNSEC_CONFIG_AUTO
67982+ depends on PERF_EVENTS
67983+ help
67984+ If you say Y here, the range of acceptable values for the
67985+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
67986+ default to a new value: 3. When the sysctl is set to this value, no
67987+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
67988+
67989+ Though PERF_EVENTS can be used legitimately for performance monitoring
67990+ and low-level application profiling, it is forced on regardless of
67991+ configuration, has been at fault for several vulnerabilities, and
67992+ creates new opportunities for side channels and other information leaks.
67993+
67994+ This feature puts PERF_EVENTS into a secure default state and permits
67995+ the administrator to change out of it temporarily if unprivileged
67996+ application profiling is needed.
67997+
67998+config GRKERNSEC_RAND_THREADSTACK
67999+ bool "Insert random gaps between thread stacks"
68000+ default y if GRKERNSEC_CONFIG_AUTO
68001+ depends on PAX_RANDMMAP && !PPC
68002+ help
68003+ If you say Y here, a random-sized gap will be enforced between allocated
68004+ thread stacks. Glibc's NPTL and other threading libraries that
68005+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68006+ The implementation currently provides 8 bits of entropy for the gap.
68007+
68008+ Many distributions do not compile threaded remote services with the
68009+ -fstack-check argument to GCC, causing the variable-sized stack-based
68010+ allocator, alloca(), to not probe the stack on allocation. This
68011+ permits an unbounded alloca() to skip over any guard page and potentially
68012+ modify another thread's stack reliably. An enforced random gap
68013+ reduces the reliability of such an attack and increases the chance
68014+ that such a read/write to another thread's stack instead lands in
68015+ an unmapped area, causing a crash and triggering grsecurity's
68016+ anti-bruteforcing logic.
68017+
68018+config GRKERNSEC_PROC_MEMMAP
68019+ bool "Harden ASLR against information leaks and entropy reduction"
68020+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68021+ depends on PAX_NOEXEC || PAX_ASLR
68022+ help
68023+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68024+ give no information about the addresses of its mappings if
68025+ PaX features that rely on random addresses are enabled on the task.
68026+ In addition to sanitizing this information and disabling other
68027+ dangerous sources of information, this option causes reads of sensitive
68028+ /proc/<pid> entries where the file descriptor was opened in a different
68029+ task than the one performing the read. Such attempts are logged.
68030+ This option also limits argv/env strings for suid/sgid binaries
68031+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68032+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68033+ binaries to prevent alternative mmap layouts from being abused.
68034+
68035+ If you use PaX it is essential that you say Y here as it closes up
68036+ several holes that make full ASLR useless locally.
68037+
68038+
68039+config GRKERNSEC_KSTACKOVERFLOW
68040+ bool "Prevent kernel stack overflows"
68041+ default y if GRKERNSEC_CONFIG_AUTO
68042+ depends on !IA64 && 64BIT
68043+ help
68044+ If you say Y here, the kernel's process stacks will be allocated
68045+ with vmalloc instead of the kernel's default allocator. This
68046+ introduces guard pages that in combination with the alloca checking
68047+ of the STACKLEAK feature prevents all forms of kernel process stack
68048+ overflow abuse. Note that this is different from kernel stack
68049+ buffer overflows.
68050+
68051+config GRKERNSEC_BRUTE
68052+ bool "Deter exploit bruteforcing"
68053+ default y if GRKERNSEC_CONFIG_AUTO
68054+ help
68055+ If you say Y here, attempts to bruteforce exploits against forking
68056+ daemons such as apache or sshd, as well as against suid/sgid binaries
68057+ will be deterred. When a child of a forking daemon is killed by PaX
68058+ or crashes due to an illegal instruction or other suspicious signal,
68059+ the parent process will be delayed 30 seconds upon every subsequent
68060+ fork until the administrator is able to assess the situation and
68061+ restart the daemon.
68062+ In the suid/sgid case, the attempt is logged, the user has all their
68063+ existing instances of the suid/sgid binary terminated and will
68064+ be unable to execute any suid/sgid binaries for 15 minutes.
68065+
68066+ It is recommended that you also enable signal logging in the auditing
68067+ section so that logs are generated when a process triggers a suspicious
68068+ signal.
68069+ If the sysctl option is enabled, a sysctl option with name
68070+ "deter_bruteforce" is created.
68071+
68072+config GRKERNSEC_MODHARDEN
68073+ bool "Harden module auto-loading"
68074+ default y if GRKERNSEC_CONFIG_AUTO
68075+ depends on MODULES
68076+ help
68077+ If you say Y here, module auto-loading in response to use of some
68078+ feature implemented by an unloaded module will be restricted to
68079+ root users. Enabling this option helps defend against attacks
68080+ by unprivileged users who abuse the auto-loading behavior to
68081+ cause a vulnerable module to load that is then exploited.
68082+
68083+ If this option prevents a legitimate use of auto-loading for a
68084+ non-root user, the administrator can execute modprobe manually
68085+ with the exact name of the module mentioned in the alert log.
68086+ Alternatively, the administrator can add the module to the list
68087+ of modules loaded at boot by modifying init scripts.
68088+
68089+ Modification of init scripts will most likely be needed on
68090+ Ubuntu servers with encrypted home directory support enabled,
68091+ as the first non-root user logging in will cause the ecb(aes),
68092+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68093+
68094+config GRKERNSEC_HIDESYM
68095+ bool "Hide kernel symbols"
68096+ default y if GRKERNSEC_CONFIG_AUTO
68097+ select PAX_USERCOPY_SLABS
68098+ help
68099+ If you say Y here, getting information on loaded modules, and
68100+ displaying all kernel symbols through a syscall will be restricted
68101+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68102+ /proc/kallsyms will be restricted to the root user. The RBAC
68103+ system can hide that entry even from root.
68104+
68105+ This option also prevents leaking of kernel addresses through
68106+ several /proc entries.
68107+
68108+ Note that this option is only effective provided the following
68109+ conditions are met:
68110+ 1) The kernel using grsecurity is not precompiled by some distribution
68111+ 2) You have also enabled GRKERNSEC_DMESG
68112+ 3) You are using the RBAC system and hiding other files such as your
68113+ kernel image and System.map. Alternatively, enabling this option
68114+ causes the permissions on /boot, /lib/modules, and the kernel
68115+ source directory to change at compile time to prevent
68116+ reading by non-root users.
68117+ If the above conditions are met, this option will aid in providing a
68118+ useful protection against local kernel exploitation of overflows
68119+ and arbitrary read/write vulnerabilities.
68120+
68121+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68122+ in addition to this feature.
68123+
68124+config GRKERNSEC_RANDSTRUCT
68125+ bool "Randomize layout of sensitive kernel structures"
68126+ default y if GRKERNSEC_CONFIG_AUTO
68127+ select GRKERNSEC_HIDESYM
68128+ select MODVERSIONS if MODULES
68129+ help
68130+ If you say Y here, the layouts of a number of sensitive kernel
68131+ structures (task, fs, cred, etc) and all structures composed entirely
68132+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68133+ This can introduce the requirement of an additional infoleak
68134+ vulnerability for exploits targeting these structure types.
68135+
68136+ Enabling this feature will introduce some performance impact, slightly
68137+ increase memory usage, and prevent the use of forensic tools like
68138+ Volatility against the system (unless the kernel source tree isn't
68139+ cleaned after kernel installation).
68140+
68141+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68142+ It remains after a make clean to allow for external modules to be compiled
68143+ with the existing seed and will be removed by a make mrproper or
68144+ make distclean.
68145+
68146+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68147+ to install the supporting headers explicitly in addition to the normal
68148+ gcc package.
68149+
68150+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68151+ bool "Use cacheline-aware structure randomization"
68152+ depends on GRKERNSEC_RANDSTRUCT
68153+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68154+ help
68155+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68156+ at restricting randomization to cacheline-sized groups of elements. It
68157+ will further not randomize bitfields in structures. This reduces the
68158+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68159+
68160+config GRKERNSEC_KERN_LOCKOUT
68161+ bool "Active kernel exploit response"
68162+ default y if GRKERNSEC_CONFIG_AUTO
68163+ depends on X86 || ARM || PPC || SPARC
68164+ help
68165+ If you say Y here, when a PaX alert is triggered due to suspicious
68166+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68167+ or an OOPS occurs due to bad memory accesses, instead of just
68168+ terminating the offending process (and potentially allowing
68169+ a subsequent exploit from the same user), we will take one of two
68170+ actions:
68171+ If the user was root, we will panic the system
68172+ If the user was non-root, we will log the attempt, terminate
68173+ all processes owned by the user, then prevent them from creating
68174+ any new processes until the system is restarted
68175+ This deters repeated kernel exploitation/bruteforcing attempts
68176+ and is useful for later forensics.
68177+
68178+config GRKERNSEC_OLD_ARM_USERLAND
68179+ bool "Old ARM userland compatibility"
68180+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68181+ help
68182+ If you say Y here, stubs of executable code to perform such operations
68183+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68184+ table. This is unfortunately needed for old ARM userland meant to run
68185+ across a wide range of processors. Without this option enabled,
68186+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68187+ which is enough for Linaro userlands or other userlands designed for v6
68188+ and newer ARM CPUs. It's recommended that you try without this option enabled
68189+ first, and only enable it if your userland does not boot (it will likely fail
68190+ at init time).
68191+
68192+endmenu
68193+menu "Role Based Access Control Options"
68194+depends on GRKERNSEC
68195+
68196+config GRKERNSEC_RBAC_DEBUG
68197+ bool
68198+
68199+config GRKERNSEC_NO_RBAC
68200+ bool "Disable RBAC system"
68201+ help
68202+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68203+ preventing the RBAC system from being enabled. You should only say Y
68204+ here if you have no intention of using the RBAC system, so as to prevent
68205+ an attacker with root access from misusing the RBAC system to hide files
68206+ and processes when loadable module support and /dev/[k]mem have been
68207+ locked down.
68208+
68209+config GRKERNSEC_ACL_HIDEKERN
68210+ bool "Hide kernel processes"
68211+ help
68212+ If you say Y here, all kernel threads will be hidden to all
68213+ processes but those whose subject has the "view hidden processes"
68214+ flag.
68215+
68216+config GRKERNSEC_ACL_MAXTRIES
68217+ int "Maximum tries before password lockout"
68218+ default 3
68219+ help
68220+ This option enforces the maximum number of times a user can attempt
68221+ to authorize themselves with the grsecurity RBAC system before being
68222+ denied the ability to attempt authorization again for a specified time.
68223+ The lower the number, the harder it will be to brute-force a password.
68224+
68225+config GRKERNSEC_ACL_TIMEOUT
68226+ int "Time to wait after max password tries, in seconds"
68227+ default 30
68228+ help
68229+ This option specifies the time the user must wait after attempting to
68230+ authorize to the RBAC system with the maximum number of invalid
68231+ passwords. The higher the number, the harder it will be to brute-force
68232+ a password.
68233+
68234+endmenu
68235+menu "Filesystem Protections"
68236+depends on GRKERNSEC
68237+
68238+config GRKERNSEC_PROC
68239+ bool "Proc restrictions"
68240+ default y if GRKERNSEC_CONFIG_AUTO
68241+ help
68242+ If you say Y here, the permissions of the /proc filesystem
68243+ will be altered to enhance system security and privacy. You MUST
68244+ choose either a user only restriction or a user and group restriction.
68245+ Depending upon the option you choose, you can either restrict users to
68246+ see only the processes they themselves run, or choose a group that can
68247+ view all processes and files normally restricted to root if you choose
68248+ the "restrict to user only" option. NOTE: If you're running identd or
68249+ ntpd as a non-root user, you will have to run it as the group you
68250+ specify here.
68251+
68252+config GRKERNSEC_PROC_USER
68253+ bool "Restrict /proc to user only"
68254+ depends on GRKERNSEC_PROC
68255+ help
68256+ If you say Y here, non-root users will only be able to view their own
68257+ processes, and restricts them from viewing network-related information,
68258+ and viewing kernel symbol and module information.
68259+
68260+config GRKERNSEC_PROC_USERGROUP
68261+ bool "Allow special group"
68262+ default y if GRKERNSEC_CONFIG_AUTO
68263+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68264+ help
68265+ If you say Y here, you will be able to select a group that will be
68266+ able to view all processes and network-related information. If you've
68267+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68268+ remain hidden. This option is useful if you want to run identd as
68269+ a non-root user. The group you select may also be chosen at boot time
68270+ via "grsec_proc_gid=" on the kernel commandline.
68271+
68272+config GRKERNSEC_PROC_GID
68273+ int "GID for special group"
68274+ depends on GRKERNSEC_PROC_USERGROUP
68275+ default 1001
68276+
68277+config GRKERNSEC_PROC_ADD
68278+ bool "Additional restrictions"
68279+ default y if GRKERNSEC_CONFIG_AUTO
68280+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68281+ help
68282+ If you say Y here, additional restrictions will be placed on
68283+ /proc that keep normal users from viewing device information and
68284+ slabinfo information that could be useful for exploits.
68285+
68286+config GRKERNSEC_LINK
68287+ bool "Linking restrictions"
68288+ default y if GRKERNSEC_CONFIG_AUTO
68289+ help
68290+ If you say Y here, /tmp race exploits will be prevented, since users
68291+ will no longer be able to follow symlinks owned by other users in
68292+ world-writable +t directories (e.g. /tmp), unless the owner of the
68293+ symlink is the owner of the directory. users will also not be
68294+ able to hardlink to files they do not own. If the sysctl option is
68295+ enabled, a sysctl option with name "linking_restrictions" is created.
68296+
68297+config GRKERNSEC_SYMLINKOWN
68298+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68299+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68300+ help
68301+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68302+ that prevents it from being used as a security feature. As Apache
68303+ verifies the symlink by performing a stat() against the target of
68304+ the symlink before it is followed, an attacker can setup a symlink
68305+ to point to a same-owned file, then replace the symlink with one
68306+ that targets another user's file just after Apache "validates" the
68307+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68308+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68309+ will be in place for the group you specify. If the sysctl option
68310+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68311+ created.
68312+
68313+config GRKERNSEC_SYMLINKOWN_GID
68314+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68315+ depends on GRKERNSEC_SYMLINKOWN
68316+ default 1006
68317+ help
68318+ Setting this GID determines what group kernel-enforced
68319+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68320+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68321+
68322+config GRKERNSEC_FIFO
68323+ bool "FIFO restrictions"
68324+ default y if GRKERNSEC_CONFIG_AUTO
68325+ help
68326+ If you say Y here, users will not be able to write to FIFOs they don't
68327+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68328+ the FIFO is the same owner of the directory it's held in. If the sysctl
68329+ option is enabled, a sysctl option with name "fifo_restrictions" is
68330+ created.
68331+
68332+config GRKERNSEC_SYSFS_RESTRICT
68333+ bool "Sysfs/debugfs restriction"
68334+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68335+ depends on SYSFS
68336+ help
68337+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68338+ any filesystem normally mounted under it (e.g. debugfs) will be
68339+ mostly accessible only by root. These filesystems generally provide access
68340+ to hardware and debug information that isn't appropriate for unprivileged
68341+ users of the system. Sysfs and debugfs have also become a large source
68342+ of new vulnerabilities, ranging from infoleaks to local compromise.
68343+ There has been very little oversight with an eye toward security involved
68344+ in adding new exporters of information to these filesystems, so their
68345+ use is discouraged.
68346+ For reasons of compatibility, a few directories have been whitelisted
68347+ for access by non-root users:
68348+ /sys/fs/selinux
68349+ /sys/fs/fuse
68350+ /sys/devices/system/cpu
68351+
68352+config GRKERNSEC_ROFS
68353+ bool "Runtime read-only mount protection"
68354+ depends on SYSCTL
68355+ help
68356+ If you say Y here, a sysctl option with name "romount_protect" will
68357+ be created. By setting this option to 1 at runtime, filesystems
68358+ will be protected in the following ways:
68359+ * No new writable mounts will be allowed
68360+ * Existing read-only mounts won't be able to be remounted read/write
68361+ * Write operations will be denied on all block devices
68362+ This option acts independently of grsec_lock: once it is set to 1,
68363+ it cannot be turned off. Therefore, please be mindful of the resulting
68364+ behavior if this option is enabled in an init script on a read-only
68365+ filesystem.
68366+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
68367+ and GRKERNSEC_IO should be enabled and module loading disabled via
68368+ config or at runtime.
68369+ This feature is mainly intended for secure embedded systems.
68370+
68371+
68372+config GRKERNSEC_DEVICE_SIDECHANNEL
68373+ bool "Eliminate stat/notify-based device sidechannels"
68374+ default y if GRKERNSEC_CONFIG_AUTO
68375+ help
68376+ If you say Y here, timing analyses on block or character
68377+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
68378+ will be thwarted for unprivileged users. If a process without
68379+ CAP_MKNOD stats such a device, the last access and last modify times
68380+ will match the device's create time. No access or modify events
68381+ will be triggered through inotify/dnotify/fanotify for such devices.
68382+ This feature will prevent attacks that may at a minimum
68383+ allow an attacker to determine the administrator's password length.
68384+
68385+config GRKERNSEC_CHROOT
68386+ bool "Chroot jail restrictions"
68387+ default y if GRKERNSEC_CONFIG_AUTO
68388+ help
68389+ If you say Y here, you will be able to choose several options that will
68390+ make breaking out of a chrooted jail much more difficult. If you
68391+ encounter no software incompatibilities with the following options, it
68392+ is recommended that you enable each one.
68393+
68394+ Note that the chroot restrictions are not intended to apply to "chroots"
68395+ to directories that are simple bind mounts of the global root filesystem.
68396+ For several other reasons, a user shouldn't expect any significant
68397+ security by performing such a chroot.
68398+
68399+config GRKERNSEC_CHROOT_MOUNT
68400+ bool "Deny mounts"
68401+ default y if GRKERNSEC_CONFIG_AUTO
68402+ depends on GRKERNSEC_CHROOT
68403+ help
68404+ If you say Y here, processes inside a chroot will not be able to
68405+ mount or remount filesystems. If the sysctl option is enabled, a
68406+ sysctl option with name "chroot_deny_mount" is created.
68407+
68408+config GRKERNSEC_CHROOT_DOUBLE
68409+ bool "Deny double-chroots"
68410+ default y if GRKERNSEC_CONFIG_AUTO
68411+ depends on GRKERNSEC_CHROOT
68412+ help
68413+ If you say Y here, processes inside a chroot will not be able to chroot
68414+ again outside the chroot. This is a widely used method of breaking
68415+ out of a chroot jail and should not be allowed. If the sysctl
68416+ option is enabled, a sysctl option with name
68417+ "chroot_deny_chroot" is created.
68418+
68419+config GRKERNSEC_CHROOT_PIVOT
68420+ bool "Deny pivot_root in chroot"
68421+ default y if GRKERNSEC_CONFIG_AUTO
68422+ depends on GRKERNSEC_CHROOT
68423+ help
68424+ If you say Y here, processes inside a chroot will not be able to use
68425+ a function called pivot_root() that was introduced in Linux 2.3.41. It
68426+ works similar to chroot in that it changes the root filesystem. This
68427+ function could be misused in a chrooted process to attempt to break out
68428+ of the chroot, and therefore should not be allowed. If the sysctl
68429+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
68430+ created.
68431+
68432+config GRKERNSEC_CHROOT_CHDIR
68433+ bool "Enforce chdir(\"/\") on all chroots"
68434+ default y if GRKERNSEC_CONFIG_AUTO
68435+ depends on GRKERNSEC_CHROOT
68436+ help
68437+ If you say Y here, the current working directory of all newly-chrooted
68438+ applications will be set to the the root directory of the chroot.
68439+ The man page on chroot(2) states:
68440+ Note that this call does not change the current working
68441+ directory, so that `.' can be outside the tree rooted at
68442+ `/'. In particular, the super-user can escape from a
68443+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
68444+
68445+ It is recommended that you say Y here, since it's not known to break
68446+ any software. If the sysctl option is enabled, a sysctl option with
68447+ name "chroot_enforce_chdir" is created.
68448+
68449+config GRKERNSEC_CHROOT_CHMOD
68450+ bool "Deny (f)chmod +s"
68451+ default y if GRKERNSEC_CONFIG_AUTO
68452+ depends on GRKERNSEC_CHROOT
68453+ help
68454+ If you say Y here, processes inside a chroot will not be able to chmod
68455+ or fchmod files to make them have suid or sgid bits. This protects
68456+ against another published method of breaking a chroot. If the sysctl
68457+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
68458+ created.
68459+
68460+config GRKERNSEC_CHROOT_FCHDIR
68461+ bool "Deny fchdir and fhandle out of chroot"
68462+ default y if GRKERNSEC_CONFIG_AUTO
68463+ depends on GRKERNSEC_CHROOT
68464+ help
68465+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
68466+ to a file descriptor of the chrooting process that points to a directory
68467+ outside the filesystem will be stopped. Additionally, this option prevents
68468+ use of the recently-created syscall for opening files by a guessable "file
68469+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
68470+ with name "chroot_deny_fchdir" is created.
68471+
68472+config GRKERNSEC_CHROOT_MKNOD
68473+ bool "Deny mknod"
68474+ default y if GRKERNSEC_CONFIG_AUTO
68475+ depends on GRKERNSEC_CHROOT
68476+ help
68477+ If you say Y here, processes inside a chroot will not be allowed to
68478+ mknod. The problem with using mknod inside a chroot is that it
68479+ would allow an attacker to create a device entry that is the same
68480+ as one on the physical root of your system, which could range from
68481+ anything from the console device to a device for your harddrive (which
68482+ they could then use to wipe the drive or steal data). It is recommended
68483+ that you say Y here, unless you run into software incompatibilities.
68484+ If the sysctl option is enabled, a sysctl option with name
68485+ "chroot_deny_mknod" is created.
68486+
68487+config GRKERNSEC_CHROOT_SHMAT
68488+ bool "Deny shmat() out of chroot"
68489+ default y if GRKERNSEC_CONFIG_AUTO
68490+ depends on GRKERNSEC_CHROOT
68491+ help
68492+ If you say Y here, processes inside a chroot will not be able to attach
68493+ to shared memory segments that were created outside of the chroot jail.
68494+ It is recommended that you say Y here. If the sysctl option is enabled,
68495+ a sysctl option with name "chroot_deny_shmat" is created.
68496+
68497+config GRKERNSEC_CHROOT_UNIX
68498+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
68499+ default y if GRKERNSEC_CONFIG_AUTO
68500+ depends on GRKERNSEC_CHROOT
68501+ help
68502+ If you say Y here, processes inside a chroot will not be able to
68503+ connect to abstract (meaning not belonging to a filesystem) Unix
68504+ domain sockets that were bound outside of a chroot. It is recommended
68505+ that you say Y here. If the sysctl option is enabled, a sysctl option
68506+ with name "chroot_deny_unix" is created.
68507+
68508+config GRKERNSEC_CHROOT_FINDTASK
68509+ bool "Protect outside processes"
68510+ default y if GRKERNSEC_CONFIG_AUTO
68511+ depends on GRKERNSEC_CHROOT
68512+ help
68513+ If you say Y here, processes inside a chroot will not be able to
68514+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
68515+ getsid, or view any process outside of the chroot. If the sysctl
68516+ option is enabled, a sysctl option with name "chroot_findtask" is
68517+ created.
68518+
68519+config GRKERNSEC_CHROOT_NICE
68520+ bool "Restrict priority changes"
68521+ default y if GRKERNSEC_CONFIG_AUTO
68522+ depends on GRKERNSEC_CHROOT
68523+ help
68524+ If you say Y here, processes inside a chroot will not be able to raise
68525+ the priority of processes in the chroot, or alter the priority of
68526+ processes outside the chroot. This provides more security than simply
68527+ removing CAP_SYS_NICE from the process' capability set. If the
68528+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
68529+ is created.
68530+
68531+config GRKERNSEC_CHROOT_SYSCTL
68532+ bool "Deny sysctl writes"
68533+ default y if GRKERNSEC_CONFIG_AUTO
68534+ depends on GRKERNSEC_CHROOT
68535+ help
68536+ If you say Y here, an attacker in a chroot will not be able to
68537+ write to sysctl entries, either by sysctl(2) or through a /proc
68538+ interface. It is strongly recommended that you say Y here. If the
68539+ sysctl option is enabled, a sysctl option with name
68540+ "chroot_deny_sysctl" is created.
68541+
68542+config GRKERNSEC_CHROOT_RENAME
68543+ bool "Deny bad renames"
68544+ default y if GRKERNSEC_CONFIG_AUTO
68545+ depends on GRKERNSEC_CHROOT
68546+ help
68547+ If you say Y here, an attacker in a chroot will not be able to
68548+ abuse the ability to create double chroots to break out of the
68549+ chroot by exploiting a race condition between a rename of a directory
68550+ within a chroot against an open of a symlink with relative path
68551+ components. This feature will likewise prevent an accomplice outside
68552+ a chroot from enabling a user inside the chroot to break out and make
68553+ use of their credentials on the global filesystem. Enabling this
68554+ feature is essential to prevent root users from breaking out of a
68555+ chroot. If the sysctl option is enabled, a sysctl option with name
68556+ "chroot_deny_bad_rename" is created.
68557+
68558+config GRKERNSEC_CHROOT_CAPS
68559+ bool "Capability restrictions"
68560+ default y if GRKERNSEC_CONFIG_AUTO
68561+ depends on GRKERNSEC_CHROOT
68562+ help
68563+ If you say Y here, the capabilities on all processes within a
68564+ chroot jail will be lowered to stop module insertion, raw i/o,
68565+ system and net admin tasks, rebooting the system, modifying immutable
68566+ files, modifying IPC owned by another, and changing the system time.
68567+ This is left an option because it can break some apps. Disable this
68568+ if your chrooted apps are having problems performing those kinds of
68569+ tasks. If the sysctl option is enabled, a sysctl option with
68570+ name "chroot_caps" is created.
68571+
68572+config GRKERNSEC_CHROOT_INITRD
68573+ bool "Exempt initrd tasks from restrictions"
68574+ default y if GRKERNSEC_CONFIG_AUTO
68575+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
68576+ help
68577+ If you say Y here, tasks started prior to init will be exempted from
68578+ grsecurity's chroot restrictions. This option is mainly meant to
68579+ resolve Plymouth's performing privileged operations unnecessarily
68580+ in a chroot.
68581+
68582+endmenu
68583+menu "Kernel Auditing"
68584+depends on GRKERNSEC
68585+
68586+config GRKERNSEC_AUDIT_GROUP
68587+ bool "Single group for auditing"
68588+ help
68589+ If you say Y here, the exec and chdir logging features will only operate
68590+ on a group you specify. This option is recommended if you only want to
68591+ watch certain users instead of having a large amount of logs from the
68592+ entire system. If the sysctl option is enabled, a sysctl option with
68593+ name "audit_group" is created.
68594+
68595+config GRKERNSEC_AUDIT_GID
68596+ int "GID for auditing"
68597+ depends on GRKERNSEC_AUDIT_GROUP
68598+ default 1007
68599+
68600+config GRKERNSEC_EXECLOG
68601+ bool "Exec logging"
68602+ help
68603+ If you say Y here, all execve() calls will be logged (since the
68604+ other exec*() calls are frontends to execve(), all execution
68605+ will be logged). Useful for shell-servers that like to keep track
68606+ of their users. If the sysctl option is enabled, a sysctl option with
68607+ name "exec_logging" is created.
68608+ WARNING: This option when enabled will produce a LOT of logs, especially
68609+ on an active system.
68610+
68611+config GRKERNSEC_RESLOG
68612+ bool "Resource logging"
68613+ default y if GRKERNSEC_CONFIG_AUTO
68614+ help
68615+ If you say Y here, all attempts to overstep resource limits will
68616+ be logged with the resource name, the requested size, and the current
68617+ limit. It is highly recommended that you say Y here. If the sysctl
68618+ option is enabled, a sysctl option with name "resource_logging" is
68619+ created. If the RBAC system is enabled, the sysctl value is ignored.
68620+
68621+config GRKERNSEC_CHROOT_EXECLOG
68622+ bool "Log execs within chroot"
68623+ help
68624+ If you say Y here, all executions inside a chroot jail will be logged
68625+ to syslog. This can cause a large amount of logs if certain
68626+ applications (eg. djb's daemontools) are installed on the system, and
68627+ is therefore left as an option. If the sysctl option is enabled, a
68628+ sysctl option with name "chroot_execlog" is created.
68629+
68630+config GRKERNSEC_AUDIT_PTRACE
68631+ bool "Ptrace logging"
68632+ help
68633+ If you say Y here, all attempts to attach to a process via ptrace
68634+ will be logged. If the sysctl option is enabled, a sysctl option
68635+ with name "audit_ptrace" is created.
68636+
68637+config GRKERNSEC_AUDIT_CHDIR
68638+ bool "Chdir logging"
68639+ help
68640+ If you say Y here, all chdir() calls will be logged. If the sysctl
68641+ option is enabled, a sysctl option with name "audit_chdir" is created.
68642+
68643+config GRKERNSEC_AUDIT_MOUNT
68644+ bool "(Un)Mount logging"
68645+ help
68646+ If you say Y here, all mounts and unmounts will be logged. If the
68647+ sysctl option is enabled, a sysctl option with name "audit_mount" is
68648+ created.
68649+
68650+config GRKERNSEC_SIGNAL
68651+ bool "Signal logging"
68652+ default y if GRKERNSEC_CONFIG_AUTO
68653+ help
68654+ If you say Y here, certain important signals will be logged, such as
68655+ SIGSEGV, which will as a result inform you of when a error in a program
68656+ occurred, which in some cases could mean a possible exploit attempt.
68657+ If the sysctl option is enabled, a sysctl option with name
68658+ "signal_logging" is created.
68659+
68660+config GRKERNSEC_FORKFAIL
68661+ bool "Fork failure logging"
68662+ help
68663+ If you say Y here, all failed fork() attempts will be logged.
68664+ This could suggest a fork bomb, or someone attempting to overstep
68665+ their process limit. If the sysctl option is enabled, a sysctl option
68666+ with name "forkfail_logging" is created.
68667+
68668+config GRKERNSEC_TIME
68669+ bool "Time change logging"
68670+ default y if GRKERNSEC_CONFIG_AUTO
68671+ help
68672+ If you say Y here, any changes of the system clock will be logged.
68673+ If the sysctl option is enabled, a sysctl option with name
68674+ "timechange_logging" is created.
68675+
68676+config GRKERNSEC_PROC_IPADDR
68677+ bool "/proc/<pid>/ipaddr support"
68678+ default y if GRKERNSEC_CONFIG_AUTO
68679+ help
68680+ If you say Y here, a new entry will be added to each /proc/<pid>
68681+ directory that contains the IP address of the person using the task.
68682+ The IP is carried across local TCP and AF_UNIX stream sockets.
68683+ This information can be useful for IDS/IPSes to perform remote response
68684+ to a local attack. The entry is readable by only the owner of the
68685+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
68686+ the RBAC system), and thus does not create privacy concerns.
68687+
68688+config GRKERNSEC_RWXMAP_LOG
68689+ bool 'Denied RWX mmap/mprotect logging'
68690+ default y if GRKERNSEC_CONFIG_AUTO
68691+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
68692+ help
68693+ If you say Y here, calls to mmap() and mprotect() with explicit
68694+ usage of PROT_WRITE and PROT_EXEC together will be logged when
68695+ denied by the PAX_MPROTECT feature. This feature will also
68696+ log other problematic scenarios that can occur when PAX_MPROTECT
68697+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
68698+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
68699+ is created.
68700+
68701+endmenu
68702+
68703+menu "Executable Protections"
68704+depends on GRKERNSEC
68705+
68706+config GRKERNSEC_DMESG
68707+ bool "Dmesg(8) restriction"
68708+ default y if GRKERNSEC_CONFIG_AUTO
68709+ help
68710+ If you say Y here, non-root users will not be able to use dmesg(8)
68711+ to view the contents of the kernel's circular log buffer.
68712+ The kernel's log buffer often contains kernel addresses and other
68713+ identifying information useful to an attacker in fingerprinting a
68714+ system for a targeted exploit.
68715+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
68716+ created.
68717+
68718+config GRKERNSEC_HARDEN_PTRACE
68719+ bool "Deter ptrace-based process snooping"
68720+ default y if GRKERNSEC_CONFIG_AUTO
68721+ help
68722+ If you say Y here, TTY sniffers and other malicious monitoring
68723+ programs implemented through ptrace will be defeated. If you
68724+ have been using the RBAC system, this option has already been
68725+ enabled for several years for all users, with the ability to make
68726+ fine-grained exceptions.
68727+
68728+ This option only affects the ability of non-root users to ptrace
68729+ processes that are not a descendent of the ptracing process.
68730+ This means that strace ./binary and gdb ./binary will still work,
68731+ but attaching to arbitrary processes will not. If the sysctl
68732+ option is enabled, a sysctl option with name "harden_ptrace" is
68733+ created.
68734+
68735+config GRKERNSEC_PTRACE_READEXEC
68736+ bool "Require read access to ptrace sensitive binaries"
68737+ default y if GRKERNSEC_CONFIG_AUTO
68738+ help
68739+ If you say Y here, unprivileged users will not be able to ptrace unreadable
68740+ binaries. This option is useful in environments that
68741+ remove the read bits (e.g. file mode 4711) from suid binaries to
68742+ prevent infoleaking of their contents. This option adds
68743+ consistency to the use of that file mode, as the binary could normally
68744+ be read out when run without privileges while ptracing.
68745+
68746+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
68747+ is created.
68748+
68749+config GRKERNSEC_SETXID
68750+ bool "Enforce consistent multithreaded privileges"
68751+ default y if GRKERNSEC_CONFIG_AUTO
68752+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
68753+ help
68754+ If you say Y here, a change from a root uid to a non-root uid
68755+ in a multithreaded application will cause the resulting uids,
68756+ gids, supplementary groups, and capabilities in that thread
68757+ to be propagated to the other threads of the process. In most
68758+ cases this is unnecessary, as glibc will emulate this behavior
68759+ on behalf of the application. Other libcs do not act in the
68760+ same way, allowing the other threads of the process to continue
68761+ running with root privileges. If the sysctl option is enabled,
68762+ a sysctl option with name "consistent_setxid" is created.
68763+
68764+config GRKERNSEC_HARDEN_IPC
68765+ bool "Disallow access to overly-permissive IPC objects"
68766+ default y if GRKERNSEC_CONFIG_AUTO
68767+ depends on SYSVIPC
68768+ help
68769+ If you say Y here, access to overly-permissive IPC objects (shared
68770+ memory, message queues, and semaphores) will be denied for processes
68771+ given the following criteria beyond normal permission checks:
68772+ 1) If the IPC object is world-accessible and the euid doesn't match
68773+ that of the creator or current uid for the IPC object
68774+ 2) If the IPC object is group-accessible and the egid doesn't
68775+ match that of the creator or current gid for the IPC object
68776+ It's a common error to grant too much permission to these objects,
68777+ with impact ranging from denial of service and information leaking to
68778+ privilege escalation. This feature was developed in response to
68779+ research by Tim Brown:
68780+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
68781+ who found hundreds of such insecure usages. Processes with
68782+ CAP_IPC_OWNER are still permitted to access these IPC objects.
68783+ If the sysctl option is enabled, a sysctl option with name
68784+ "harden_ipc" is created.
68785+
68786+config GRKERNSEC_TPE
68787+ bool "Trusted Path Execution (TPE)"
68788+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68789+ help
68790+ If you say Y here, you will be able to choose a gid to add to the
68791+ supplementary groups of users you want to mark as "untrusted."
68792+ These users will not be able to execute any files that are not in
68793+ root-owned directories writable only by root. If the sysctl option
68794+ is enabled, a sysctl option with name "tpe" is created.
68795+
68796+config GRKERNSEC_TPE_ALL
68797+ bool "Partially restrict all non-root users"
68798+ depends on GRKERNSEC_TPE
68799+ help
68800+ If you say Y here, all non-root users will be covered under
68801+ a weaker TPE restriction. This is separate from, and in addition to,
68802+ the main TPE options that you have selected elsewhere. Thus, if a
68803+ "trusted" GID is chosen, this restriction applies to even that GID.
68804+ Under this restriction, all non-root users will only be allowed to
68805+ execute files in directories they own that are not group or
68806+ world-writable, or in directories owned by root and writable only by
68807+ root. If the sysctl option is enabled, a sysctl option with name
68808+ "tpe_restrict_all" is created.
68809+
68810+config GRKERNSEC_TPE_INVERT
68811+ bool "Invert GID option"
68812+ depends on GRKERNSEC_TPE
68813+ help
68814+ If you say Y here, the group you specify in the TPE configuration will
68815+ decide what group TPE restrictions will be *disabled* for. This
68816+ option is useful if you want TPE restrictions to be applied to most
68817+ users on the system. If the sysctl option is enabled, a sysctl option
68818+ with name "tpe_invert" is created. Unlike other sysctl options, this
68819+ entry will default to on for backward-compatibility.
68820+
68821+config GRKERNSEC_TPE_GID
68822+ int
68823+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
68824+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
68825+
68826+config GRKERNSEC_TPE_UNTRUSTED_GID
68827+ int "GID for TPE-untrusted users"
68828+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
68829+ default 1005
68830+ help
68831+ Setting this GID determines what group TPE restrictions will be
68832+ *enabled* for. If the sysctl option is enabled, a sysctl option
68833+ with name "tpe_gid" is created.
68834+
68835+config GRKERNSEC_TPE_TRUSTED_GID
68836+ int "GID for TPE-trusted users"
68837+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
68838+ default 1005
68839+ help
68840+ Setting this GID determines what group TPE restrictions will be
68841+ *disabled* for. If the sysctl option is enabled, a sysctl option
68842+ with name "tpe_gid" is created.
68843+
68844+endmenu
68845+menu "Network Protections"
68846+depends on GRKERNSEC
68847+
68848+config GRKERNSEC_BLACKHOLE
68849+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
68850+ default y if GRKERNSEC_CONFIG_AUTO
68851+ depends on NET
68852+ help
68853+ If you say Y here, neither TCP resets nor ICMP
68854+ destination-unreachable packets will be sent in response to packets
68855+ sent to ports for which no associated listening process exists.
68856+ It will also prevent the sending of ICMP protocol unreachable packets
68857+ in response to packets with unknown protocols.
68858+ This feature supports both IPV4 and IPV6 and exempts the
68859+ loopback interface from blackholing. Enabling this feature
68860+ makes a host more resilient to DoS attacks and reduces network
68861+ visibility against scanners.
68862+
68863+ The blackhole feature as-implemented is equivalent to the FreeBSD
68864+ blackhole feature, as it prevents RST responses to all packets, not
68865+ just SYNs. Under most application behavior this causes no
68866+ problems, but applications (like haproxy) may not close certain
68867+ connections in a way that cleanly terminates them on the remote
68868+ end, leaving the remote host in LAST_ACK state. Because of this
68869+ side-effect and to prevent intentional LAST_ACK DoSes, this
68870+ feature also adds automatic mitigation against such attacks.
68871+ The mitigation drastically reduces the amount of time a socket
68872+ can spend in LAST_ACK state. If you're using haproxy and not
68873+ all servers it connects to have this option enabled, consider
68874+ disabling this feature on the haproxy host.
68875+
68876+ If the sysctl option is enabled, two sysctl options with names
68877+ "ip_blackhole" and "lastack_retries" will be created.
68878+ While "ip_blackhole" takes the standard zero/non-zero on/off
68879+ toggle, "lastack_retries" uses the same kinds of values as
68880+ "tcp_retries1" and "tcp_retries2". The default value of 4
68881+ prevents a socket from lasting more than 45 seconds in LAST_ACK
68882+ state.
68883+
68884+config GRKERNSEC_NO_SIMULT_CONNECT
68885+ bool "Disable TCP Simultaneous Connect"
68886+ default y if GRKERNSEC_CONFIG_AUTO
68887+ depends on NET
68888+ help
68889+ If you say Y here, a feature by Willy Tarreau will be enabled that
68890+ removes a weakness in Linux's strict implementation of TCP that
68891+ allows two clients to connect to each other without either entering
68892+ a listening state. The weakness allows an attacker to easily prevent
68893+ a client from connecting to a known server provided the source port
68894+ for the connection is guessed correctly.
68895+
68896+ As the weakness could be used to prevent an antivirus or IPS from
68897+ fetching updates, or prevent an SSL gateway from fetching a CRL,
68898+ it should be eliminated by enabling this option. Though Linux is
68899+ one of few operating systems supporting simultaneous connect, it
68900+ has no legitimate use in practice and is rarely supported by firewalls.
68901+
68902+config GRKERNSEC_SOCKET
68903+ bool "Socket restrictions"
68904+ depends on NET
68905+ help
68906+ If you say Y here, you will be able to choose from several options.
68907+ If you assign a GID on your system and add it to the supplementary
68908+ groups of users you want to restrict socket access to, this patch
68909+ will perform up to three things, based on the option(s) you choose.
68910+
68911+config GRKERNSEC_SOCKET_ALL
68912+ bool "Deny any sockets to group"
68913+ depends on GRKERNSEC_SOCKET
68914+ help
68915+ If you say Y here, you will be able to choose a GID of whose users will
68916+ be unable to connect to other hosts from your machine or run server
68917+ applications from your machine. If the sysctl option is enabled, a
68918+ sysctl option with name "socket_all" is created.
68919+
68920+config GRKERNSEC_SOCKET_ALL_GID
68921+ int "GID to deny all sockets for"
68922+ depends on GRKERNSEC_SOCKET_ALL
68923+ default 1004
68924+ help
68925+ Here you can choose the GID to disable socket access for. Remember to
68926+ add the users you want socket access disabled for to the GID
68927+ specified here. If the sysctl option is enabled, a sysctl option
68928+ with name "socket_all_gid" is created.
68929+
68930+config GRKERNSEC_SOCKET_CLIENT
68931+ bool "Deny client sockets to group"
68932+ depends on GRKERNSEC_SOCKET
68933+ help
68934+ If you say Y here, you will be able to choose a GID of whose users will
68935+ be unable to connect to other hosts from your machine, but will be
68936+ able to run servers. If this option is enabled, all users in the group
68937+ you specify will have to use passive mode when initiating ftp transfers
68938+ from the shell on your machine. If the sysctl option is enabled, a
68939+ sysctl option with name "socket_client" is created.
68940+
68941+config GRKERNSEC_SOCKET_CLIENT_GID
68942+ int "GID to deny client sockets for"
68943+ depends on GRKERNSEC_SOCKET_CLIENT
68944+ default 1003
68945+ help
68946+ Here you can choose the GID to disable client socket access for.
68947+ Remember to add the users you want client socket access disabled for to
68948+ the GID specified here. If the sysctl option is enabled, a sysctl
68949+ option with name "socket_client_gid" is created.
68950+
68951+config GRKERNSEC_SOCKET_SERVER
68952+ bool "Deny server sockets to group"
68953+ depends on GRKERNSEC_SOCKET
68954+ help
68955+ If you say Y here, you will be able to choose a GID of whose users will
68956+ be unable to run server applications from your machine. If the sysctl
68957+ option is enabled, a sysctl option with name "socket_server" is created.
68958+
68959+config GRKERNSEC_SOCKET_SERVER_GID
68960+ int "GID to deny server sockets for"
68961+ depends on GRKERNSEC_SOCKET_SERVER
68962+ default 1002
68963+ help
68964+ Here you can choose the GID to disable server socket access for.
68965+ Remember to add the users you want server socket access disabled for to
68966+ the GID specified here. If the sysctl option is enabled, a sysctl
68967+ option with name "socket_server_gid" is created.
68968+
68969+endmenu
68970+
68971+menu "Physical Protections"
68972+depends on GRKERNSEC
68973+
68974+config GRKERNSEC_DENYUSB
68975+ bool "Deny new USB connections after toggle"
68976+ default y if GRKERNSEC_CONFIG_AUTO
68977+ depends on SYSCTL && USB_SUPPORT
68978+ help
68979+ If you say Y here, a new sysctl option with name "deny_new_usb"
68980+ will be created. Setting its value to 1 will prevent any new
68981+ USB devices from being recognized by the OS. Any attempted USB
68982+ device insertion will be logged. This option is intended to be
68983+ used against custom USB devices designed to exploit vulnerabilities
68984+ in various USB device drivers.
68985+
68986+ For greatest effectiveness, this sysctl should be set after any
68987+ relevant init scripts. This option is safe to enable in distros
68988+ as each user can choose whether or not to toggle the sysctl.
68989+
68990+config GRKERNSEC_DENYUSB_FORCE
68991+ bool "Reject all USB devices not connected at boot"
68992+ select USB
68993+ depends on GRKERNSEC_DENYUSB
68994+ help
68995+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
68996+ that doesn't involve a sysctl entry. This option should only be
68997+ enabled if you're sure you want to deny all new USB connections
68998+ at runtime and don't want to modify init scripts. This should not
68999+ be enabled by distros. It forces the core USB code to be built
69000+ into the kernel image so that all devices connected at boot time
69001+ can be recognized and new USB device connections can be prevented
69002+ prior to init running.
69003+
69004+endmenu
69005+
69006+menu "Sysctl Support"
69007+depends on GRKERNSEC && SYSCTL
69008+
69009+config GRKERNSEC_SYSCTL
69010+ bool "Sysctl support"
69011+ default y if GRKERNSEC_CONFIG_AUTO
69012+ help
69013+ If you say Y here, you will be able to change the options that
69014+ grsecurity runs with at bootup, without having to recompile your
69015+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69016+ to enable (1) or disable (0) various features. All the sysctl entries
69017+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69018+ All features enabled in the kernel configuration are disabled at boot
69019+ if you do not say Y to the "Turn on features by default" option.
69020+ All options should be set at startup, and the grsec_lock entry should
69021+ be set to a non-zero value after all the options are set.
69022+ *THIS IS EXTREMELY IMPORTANT*
69023+
69024+config GRKERNSEC_SYSCTL_DISTRO
69025+ bool "Extra sysctl support for distro makers (READ HELP)"
69026+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69027+ help
69028+ If you say Y here, additional sysctl options will be created
69029+ for features that affect processes running as root. Therefore,
69030+ it is critical when using this option that the grsec_lock entry be
69031+ enabled after boot. Only distros with prebuilt kernel packages
69032+ with this option enabled that can ensure grsec_lock is enabled
69033+ after boot should use this option.
69034+ *Failure to set grsec_lock after boot makes all grsec features
69035+ this option covers useless*
69036+
69037+ Currently this option creates the following sysctl entries:
69038+ "Disable Privileged I/O": "disable_priv_io"
69039+
69040+config GRKERNSEC_SYSCTL_ON
69041+ bool "Turn on features by default"
69042+ default y if GRKERNSEC_CONFIG_AUTO
69043+ depends on GRKERNSEC_SYSCTL
69044+ help
69045+ If you say Y here, instead of having all features enabled in the
69046+ kernel configuration disabled at boot time, the features will be
69047+ enabled at boot time. It is recommended you say Y here unless
69048+ there is some reason you would want all sysctl-tunable features to
69049+ be disabled by default. As mentioned elsewhere, it is important
69050+ to enable the grsec_lock entry once you have finished modifying
69051+ the sysctl entries.
69052+
69053+endmenu
69054+menu "Logging Options"
69055+depends on GRKERNSEC
69056+
69057+config GRKERNSEC_FLOODTIME
69058+ int "Seconds in between log messages (minimum)"
69059+ default 10
69060+ help
69061+ This option allows you to enforce the number of seconds between
69062+ grsecurity log messages. The default should be suitable for most
69063+ people, however, if you choose to change it, choose a value small enough
69064+ to allow informative logs to be produced, but large enough to
69065+ prevent flooding.
69066+
69067+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69068+ any rate limiting on grsecurity log messages.
69069+
69070+config GRKERNSEC_FLOODBURST
69071+ int "Number of messages in a burst (maximum)"
69072+ default 6
69073+ help
69074+ This option allows you to choose the maximum number of messages allowed
69075+ within the flood time interval you chose in a separate option. The
69076+ default should be suitable for most people, however if you find that
69077+ many of your logs are being interpreted as flooding, you may want to
69078+ raise this value.
69079+
69080+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69081+ any rate limiting on grsecurity log messages.
69082+
69083+endmenu
69084diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69085new file mode 100644
69086index 0000000..30ababb
69087--- /dev/null
69088+++ b/grsecurity/Makefile
69089@@ -0,0 +1,54 @@
69090+# grsecurity – access control and security hardening for Linux
69091+# All code in this directory and various hooks located throughout the Linux kernel are
69092+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69093+# http://www.grsecurity.net spender@grsecurity.net
69094+#
69095+# This program is free software; you can redistribute it and/or
69096+# modify it under the terms of the GNU General Public License version 2
69097+# as published by the Free Software Foundation.
69098+#
69099+# This program is distributed in the hope that it will be useful,
69100+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69101+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69102+# GNU General Public License for more details.
69103+#
69104+# You should have received a copy of the GNU General Public License
69105+# along with this program; if not, write to the Free Software
69106+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69107+
69108+KBUILD_CFLAGS += -Werror
69109+
69110+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69111+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69112+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69113+ grsec_usb.o grsec_ipc.o grsec_proc.o
69114+
69115+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69116+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69117+ gracl_learn.o grsec_log.o gracl_policy.o
69118+ifdef CONFIG_COMPAT
69119+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69120+endif
69121+
69122+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69123+
69124+ifdef CONFIG_NET
69125+obj-y += grsec_sock.o
69126+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69127+endif
69128+
69129+ifndef CONFIG_GRKERNSEC
69130+obj-y += grsec_disabled.o
69131+endif
69132+
69133+ifdef CONFIG_GRKERNSEC_HIDESYM
69134+extra-y := grsec_hidesym.o
69135+$(obj)/grsec_hidesym.o:
69136+ @-chmod -f 500 /boot
69137+ @-chmod -f 500 /lib/modules
69138+ @-chmod -f 500 /lib64/modules
69139+ @-chmod -f 500 /lib32/modules
69140+ @-chmod -f 700 .
69141+ @-chmod -f 700 $(objtree)
69142+ @echo ' grsec: protected kernel image paths'
69143+endif
69144diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69145new file mode 100644
69146index 0000000..6c1e154
69147--- /dev/null
69148+++ b/grsecurity/gracl.c
69149@@ -0,0 +1,2749 @@
69150+#include <linux/kernel.h>
69151+#include <linux/module.h>
69152+#include <linux/sched.h>
69153+#include <linux/mm.h>
69154+#include <linux/file.h>
69155+#include <linux/fs.h>
69156+#include <linux/namei.h>
69157+#include <linux/mount.h>
69158+#include <linux/tty.h>
69159+#include <linux/proc_fs.h>
69160+#include <linux/lglock.h>
69161+#include <linux/slab.h>
69162+#include <linux/vmalloc.h>
69163+#include <linux/types.h>
69164+#include <linux/sysctl.h>
69165+#include <linux/netdevice.h>
69166+#include <linux/ptrace.h>
69167+#include <linux/gracl.h>
69168+#include <linux/gralloc.h>
69169+#include <linux/security.h>
69170+#include <linux/grinternal.h>
69171+#include <linux/pid_namespace.h>
69172+#include <linux/stop_machine.h>
69173+#include <linux/fdtable.h>
69174+#include <linux/percpu.h>
69175+#include <linux/lglock.h>
69176+#include <linux/hugetlb.h>
69177+#include <linux/posix-timers.h>
69178+#include <linux/prefetch.h>
69179+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69180+#include <linux/magic.h>
69181+#include <linux/pagemap.h>
69182+#include "../fs/btrfs/async-thread.h"
69183+#include "../fs/btrfs/ctree.h"
69184+#include "../fs/btrfs/btrfs_inode.h"
69185+#endif
69186+#include "../fs/mount.h"
69187+
69188+#include <asm/uaccess.h>
69189+#include <asm/errno.h>
69190+#include <asm/mman.h>
69191+
69192+#define FOR_EACH_ROLE_START(role) \
69193+ role = running_polstate.role_list; \
69194+ while (role) {
69195+
69196+#define FOR_EACH_ROLE_END(role) \
69197+ role = role->prev; \
69198+ }
69199+
69200+extern struct path gr_real_root;
69201+
69202+static struct gr_policy_state running_polstate;
69203+struct gr_policy_state *polstate = &running_polstate;
69204+extern struct gr_alloc_state *current_alloc_state;
69205+
69206+extern char *gr_shared_page[4];
69207+DEFINE_RWLOCK(gr_inode_lock);
69208+
69209+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69210+
69211+#ifdef CONFIG_NET
69212+extern struct vfsmount *sock_mnt;
69213+#endif
69214+
69215+extern struct vfsmount *pipe_mnt;
69216+extern struct vfsmount *shm_mnt;
69217+
69218+#ifdef CONFIG_HUGETLBFS
69219+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69220+#endif
69221+
69222+extern u16 acl_sp_role_value;
69223+extern struct acl_object_label *fakefs_obj_rw;
69224+extern struct acl_object_label *fakefs_obj_rwx;
69225+
69226+int gr_acl_is_enabled(void)
69227+{
69228+ return (gr_status & GR_READY);
69229+}
69230+
69231+void gr_enable_rbac_system(void)
69232+{
69233+ pax_open_kernel();
69234+ gr_status |= GR_READY;
69235+ pax_close_kernel();
69236+}
69237+
69238+int gr_rbac_disable(void *unused)
69239+{
69240+ pax_open_kernel();
69241+ gr_status &= ~GR_READY;
69242+ pax_close_kernel();
69243+
69244+ return 0;
69245+}
69246+
69247+static inline dev_t __get_dev(const struct dentry *dentry)
69248+{
69249+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69250+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69251+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69252+ else
69253+#endif
69254+ return dentry->d_sb->s_dev;
69255+}
69256+
69257+static inline u64 __get_ino(const struct dentry *dentry)
69258+{
69259+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69260+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69261+ return btrfs_ino(dentry->d_inode);
69262+ else
69263+#endif
69264+ return dentry->d_inode->i_ino;
69265+}
69266+
69267+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69268+{
69269+ return __get_dev(dentry);
69270+}
69271+
69272+u64 gr_get_ino_from_dentry(struct dentry *dentry)
69273+{
69274+ return __get_ino(dentry);
69275+}
69276+
69277+static char gr_task_roletype_to_char(struct task_struct *task)
69278+{
69279+ switch (task->role->roletype &
69280+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69281+ GR_ROLE_SPECIAL)) {
69282+ case GR_ROLE_DEFAULT:
69283+ return 'D';
69284+ case GR_ROLE_USER:
69285+ return 'U';
69286+ case GR_ROLE_GROUP:
69287+ return 'G';
69288+ case GR_ROLE_SPECIAL:
69289+ return 'S';
69290+ }
69291+
69292+ return 'X';
69293+}
69294+
69295+char gr_roletype_to_char(void)
69296+{
69297+ return gr_task_roletype_to_char(current);
69298+}
69299+
69300+__inline__ int
69301+gr_acl_tpe_check(void)
69302+{
69303+ if (unlikely(!(gr_status & GR_READY)))
69304+ return 0;
69305+ if (current->role->roletype & GR_ROLE_TPE)
69306+ return 1;
69307+ else
69308+ return 0;
69309+}
69310+
69311+int
69312+gr_handle_rawio(const struct inode *inode)
69313+{
69314+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69315+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69316+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69317+ !capable(CAP_SYS_RAWIO))
69318+ return 1;
69319+#endif
69320+ return 0;
69321+}
69322+
69323+int
69324+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69325+{
69326+ if (likely(lena != lenb))
69327+ return 0;
69328+
69329+ return !memcmp(a, b, lena);
69330+}
69331+
69332+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69333+{
69334+ *buflen -= namelen;
69335+ if (*buflen < 0)
69336+ return -ENAMETOOLONG;
69337+ *buffer -= namelen;
69338+ memcpy(*buffer, str, namelen);
69339+ return 0;
69340+}
69341+
69342+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69343+{
69344+ return prepend(buffer, buflen, name->name, name->len);
69345+}
69346+
69347+static int prepend_path(const struct path *path, struct path *root,
69348+ char **buffer, int *buflen)
69349+{
69350+ struct dentry *dentry = path->dentry;
69351+ struct vfsmount *vfsmnt = path->mnt;
69352+ struct mount *mnt = real_mount(vfsmnt);
69353+ bool slash = false;
69354+ int error = 0;
69355+
69356+ while (dentry != root->dentry || vfsmnt != root->mnt) {
69357+ struct dentry * parent;
69358+
69359+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
69360+ /* Global root? */
69361+ if (!mnt_has_parent(mnt)) {
69362+ goto out;
69363+ }
69364+ dentry = mnt->mnt_mountpoint;
69365+ mnt = mnt->mnt_parent;
69366+ vfsmnt = &mnt->mnt;
69367+ continue;
69368+ }
69369+ parent = dentry->d_parent;
69370+ prefetch(parent);
69371+ spin_lock(&dentry->d_lock);
69372+ error = prepend_name(buffer, buflen, &dentry->d_name);
69373+ spin_unlock(&dentry->d_lock);
69374+ if (!error)
69375+ error = prepend(buffer, buflen, "/", 1);
69376+ if (error)
69377+ break;
69378+
69379+ slash = true;
69380+ dentry = parent;
69381+ }
69382+
69383+out:
69384+ if (!error && !slash)
69385+ error = prepend(buffer, buflen, "/", 1);
69386+
69387+ return error;
69388+}
69389+
69390+/* this must be called with mount_lock and rename_lock held */
69391+
69392+static char *__our_d_path(const struct path *path, struct path *root,
69393+ char *buf, int buflen)
69394+{
69395+ char *res = buf + buflen;
69396+ int error;
69397+
69398+ prepend(&res, &buflen, "\0", 1);
69399+ error = prepend_path(path, root, &res, &buflen);
69400+ if (error)
69401+ return ERR_PTR(error);
69402+
69403+ return res;
69404+}
69405+
69406+static char *
69407+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
69408+{
69409+ char *retval;
69410+
69411+ retval = __our_d_path(path, root, buf, buflen);
69412+ if (unlikely(IS_ERR(retval)))
69413+ retval = strcpy(buf, "<path too long>");
69414+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
69415+ retval[1] = '\0';
69416+
69417+ return retval;
69418+}
69419+
69420+static char *
69421+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69422+ char *buf, int buflen)
69423+{
69424+ struct path path;
69425+ char *res;
69426+
69427+ path.dentry = (struct dentry *)dentry;
69428+ path.mnt = (struct vfsmount *)vfsmnt;
69429+
69430+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
69431+ by the RBAC system */
69432+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
69433+
69434+ return res;
69435+}
69436+
69437+static char *
69438+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69439+ char *buf, int buflen)
69440+{
69441+ char *res;
69442+ struct path path;
69443+ struct path root;
69444+ struct task_struct *reaper = init_pid_ns.child_reaper;
69445+
69446+ path.dentry = (struct dentry *)dentry;
69447+ path.mnt = (struct vfsmount *)vfsmnt;
69448+
69449+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
69450+ get_fs_root(reaper->fs, &root);
69451+
69452+ read_seqlock_excl(&mount_lock);
69453+ write_seqlock(&rename_lock);
69454+ res = gen_full_path(&path, &root, buf, buflen);
69455+ write_sequnlock(&rename_lock);
69456+ read_sequnlock_excl(&mount_lock);
69457+
69458+ path_put(&root);
69459+ return res;
69460+}
69461+
69462+char *
69463+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69464+{
69465+ char *ret;
69466+ read_seqlock_excl(&mount_lock);
69467+ write_seqlock(&rename_lock);
69468+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69469+ PAGE_SIZE);
69470+ write_sequnlock(&rename_lock);
69471+ read_sequnlock_excl(&mount_lock);
69472+ return ret;
69473+}
69474+
69475+static char *
69476+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69477+{
69478+ char *ret;
69479+ char *buf;
69480+ int buflen;
69481+
69482+ read_seqlock_excl(&mount_lock);
69483+ write_seqlock(&rename_lock);
69484+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
69485+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
69486+ buflen = (int)(ret - buf);
69487+ if (buflen >= 5)
69488+ prepend(&ret, &buflen, "/proc", 5);
69489+ else
69490+ ret = strcpy(buf, "<path too long>");
69491+ write_sequnlock(&rename_lock);
69492+ read_sequnlock_excl(&mount_lock);
69493+ return ret;
69494+}
69495+
69496+char *
69497+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
69498+{
69499+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69500+ PAGE_SIZE);
69501+}
69502+
69503+char *
69504+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
69505+{
69506+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69507+ PAGE_SIZE);
69508+}
69509+
69510+char *
69511+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
69512+{
69513+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
69514+ PAGE_SIZE);
69515+}
69516+
69517+char *
69518+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
69519+{
69520+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
69521+ PAGE_SIZE);
69522+}
69523+
69524+char *
69525+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
69526+{
69527+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
69528+ PAGE_SIZE);
69529+}
69530+
69531+__inline__ __u32
69532+to_gr_audit(const __u32 reqmode)
69533+{
69534+ /* masks off auditable permission flags, then shifts them to create
69535+ auditing flags, and adds the special case of append auditing if
69536+ we're requesting write */
69537+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
69538+}
69539+
69540+struct acl_role_label *
69541+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
69542+ const gid_t gid)
69543+{
69544+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
69545+ struct acl_role_label *match;
69546+ struct role_allowed_ip *ipp;
69547+ unsigned int x;
69548+ u32 curr_ip = task->signal->saved_ip;
69549+
69550+ match = state->acl_role_set.r_hash[index];
69551+
69552+ while (match) {
69553+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
69554+ for (x = 0; x < match->domain_child_num; x++) {
69555+ if (match->domain_children[x] == uid)
69556+ goto found;
69557+ }
69558+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
69559+ break;
69560+ match = match->next;
69561+ }
69562+found:
69563+ if (match == NULL) {
69564+ try_group:
69565+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
69566+ match = state->acl_role_set.r_hash[index];
69567+
69568+ while (match) {
69569+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
69570+ for (x = 0; x < match->domain_child_num; x++) {
69571+ if (match->domain_children[x] == gid)
69572+ goto found2;
69573+ }
69574+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
69575+ break;
69576+ match = match->next;
69577+ }
69578+found2:
69579+ if (match == NULL)
69580+ match = state->default_role;
69581+ if (match->allowed_ips == NULL)
69582+ return match;
69583+ else {
69584+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69585+ if (likely
69586+ ((ntohl(curr_ip) & ipp->netmask) ==
69587+ (ntohl(ipp->addr) & ipp->netmask)))
69588+ return match;
69589+ }
69590+ match = state->default_role;
69591+ }
69592+ } else if (match->allowed_ips == NULL) {
69593+ return match;
69594+ } else {
69595+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69596+ if (likely
69597+ ((ntohl(curr_ip) & ipp->netmask) ==
69598+ (ntohl(ipp->addr) & ipp->netmask)))
69599+ return match;
69600+ }
69601+ goto try_group;
69602+ }
69603+
69604+ return match;
69605+}
69606+
69607+static struct acl_role_label *
69608+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
69609+ const gid_t gid)
69610+{
69611+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
69612+}
69613+
69614+struct acl_subject_label *
69615+lookup_acl_subj_label(const u64 ino, const dev_t dev,
69616+ const struct acl_role_label *role)
69617+{
69618+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69619+ struct acl_subject_label *match;
69620+
69621+ match = role->subj_hash[index];
69622+
69623+ while (match && (match->inode != ino || match->device != dev ||
69624+ (match->mode & GR_DELETED))) {
69625+ match = match->next;
69626+ }
69627+
69628+ if (match && !(match->mode & GR_DELETED))
69629+ return match;
69630+ else
69631+ return NULL;
69632+}
69633+
69634+struct acl_subject_label *
69635+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
69636+ const struct acl_role_label *role)
69637+{
69638+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69639+ struct acl_subject_label *match;
69640+
69641+ match = role->subj_hash[index];
69642+
69643+ while (match && (match->inode != ino || match->device != dev ||
69644+ !(match->mode & GR_DELETED))) {
69645+ match = match->next;
69646+ }
69647+
69648+ if (match && (match->mode & GR_DELETED))
69649+ return match;
69650+ else
69651+ return NULL;
69652+}
69653+
69654+static struct acl_object_label *
69655+lookup_acl_obj_label(const u64 ino, const dev_t dev,
69656+ const struct acl_subject_label *subj)
69657+{
69658+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69659+ struct acl_object_label *match;
69660+
69661+ match = subj->obj_hash[index];
69662+
69663+ while (match && (match->inode != ino || match->device != dev ||
69664+ (match->mode & GR_DELETED))) {
69665+ match = match->next;
69666+ }
69667+
69668+ if (match && !(match->mode & GR_DELETED))
69669+ return match;
69670+ else
69671+ return NULL;
69672+}
69673+
69674+static struct acl_object_label *
69675+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
69676+ const struct acl_subject_label *subj)
69677+{
69678+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69679+ struct acl_object_label *match;
69680+
69681+ match = subj->obj_hash[index];
69682+
69683+ while (match && (match->inode != ino || match->device != dev ||
69684+ !(match->mode & GR_DELETED))) {
69685+ match = match->next;
69686+ }
69687+
69688+ if (match && (match->mode & GR_DELETED))
69689+ return match;
69690+
69691+ match = subj->obj_hash[index];
69692+
69693+ while (match && (match->inode != ino || match->device != dev ||
69694+ (match->mode & GR_DELETED))) {
69695+ match = match->next;
69696+ }
69697+
69698+ if (match && !(match->mode & GR_DELETED))
69699+ return match;
69700+ else
69701+ return NULL;
69702+}
69703+
69704+struct name_entry *
69705+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
69706+{
69707+ unsigned int len = strlen(name);
69708+ unsigned int key = full_name_hash(name, len);
69709+ unsigned int index = key % state->name_set.n_size;
69710+ struct name_entry *match;
69711+
69712+ match = state->name_set.n_hash[index];
69713+
69714+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
69715+ match = match->next;
69716+
69717+ return match;
69718+}
69719+
69720+static struct name_entry *
69721+lookup_name_entry(const char *name)
69722+{
69723+ return __lookup_name_entry(&running_polstate, name);
69724+}
69725+
69726+static struct name_entry *
69727+lookup_name_entry_create(const char *name)
69728+{
69729+ unsigned int len = strlen(name);
69730+ unsigned int key = full_name_hash(name, len);
69731+ unsigned int index = key % running_polstate.name_set.n_size;
69732+ struct name_entry *match;
69733+
69734+ match = running_polstate.name_set.n_hash[index];
69735+
69736+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69737+ !match->deleted))
69738+ match = match->next;
69739+
69740+ if (match && match->deleted)
69741+ return match;
69742+
69743+ match = running_polstate.name_set.n_hash[index];
69744+
69745+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69746+ match->deleted))
69747+ match = match->next;
69748+
69749+ if (match && !match->deleted)
69750+ return match;
69751+ else
69752+ return NULL;
69753+}
69754+
69755+static struct inodev_entry *
69756+lookup_inodev_entry(const u64 ino, const dev_t dev)
69757+{
69758+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
69759+ struct inodev_entry *match;
69760+
69761+ match = running_polstate.inodev_set.i_hash[index];
69762+
69763+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
69764+ match = match->next;
69765+
69766+ return match;
69767+}
69768+
69769+void
69770+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
69771+{
69772+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
69773+ state->inodev_set.i_size);
69774+ struct inodev_entry **curr;
69775+
69776+ entry->prev = NULL;
69777+
69778+ curr = &state->inodev_set.i_hash[index];
69779+ if (*curr != NULL)
69780+ (*curr)->prev = entry;
69781+
69782+ entry->next = *curr;
69783+ *curr = entry;
69784+
69785+ return;
69786+}
69787+
69788+static void
69789+insert_inodev_entry(struct inodev_entry *entry)
69790+{
69791+ __insert_inodev_entry(&running_polstate, entry);
69792+}
69793+
69794+void
69795+insert_acl_obj_label(struct acl_object_label *obj,
69796+ struct acl_subject_label *subj)
69797+{
69798+ unsigned int index =
69799+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
69800+ struct acl_object_label **curr;
69801+
69802+ obj->prev = NULL;
69803+
69804+ curr = &subj->obj_hash[index];
69805+ if (*curr != NULL)
69806+ (*curr)->prev = obj;
69807+
69808+ obj->next = *curr;
69809+ *curr = obj;
69810+
69811+ return;
69812+}
69813+
69814+void
69815+insert_acl_subj_label(struct acl_subject_label *obj,
69816+ struct acl_role_label *role)
69817+{
69818+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
69819+ struct acl_subject_label **curr;
69820+
69821+ obj->prev = NULL;
69822+
69823+ curr = &role->subj_hash[index];
69824+ if (*curr != NULL)
69825+ (*curr)->prev = obj;
69826+
69827+ obj->next = *curr;
69828+ *curr = obj;
69829+
69830+ return;
69831+}
69832+
69833+/* derived from glibc fnmatch() 0: match, 1: no match*/
69834+
69835+static int
69836+glob_match(const char *p, const char *n)
69837+{
69838+ char c;
69839+
69840+ while ((c = *p++) != '\0') {
69841+ switch (c) {
69842+ case '?':
69843+ if (*n == '\0')
69844+ return 1;
69845+ else if (*n == '/')
69846+ return 1;
69847+ break;
69848+ case '\\':
69849+ if (*n != c)
69850+ return 1;
69851+ break;
69852+ case '*':
69853+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
69854+ if (*n == '/')
69855+ return 1;
69856+ else if (c == '?') {
69857+ if (*n == '\0')
69858+ return 1;
69859+ else
69860+ ++n;
69861+ }
69862+ }
69863+ if (c == '\0') {
69864+ return 0;
69865+ } else {
69866+ const char *endp;
69867+
69868+ if ((endp = strchr(n, '/')) == NULL)
69869+ endp = n + strlen(n);
69870+
69871+ if (c == '[') {
69872+ for (--p; n < endp; ++n)
69873+ if (!glob_match(p, n))
69874+ return 0;
69875+ } else if (c == '/') {
69876+ while (*n != '\0' && *n != '/')
69877+ ++n;
69878+ if (*n == '/' && !glob_match(p, n + 1))
69879+ return 0;
69880+ } else {
69881+ for (--p; n < endp; ++n)
69882+ if (*n == c && !glob_match(p, n))
69883+ return 0;
69884+ }
69885+
69886+ return 1;
69887+ }
69888+ case '[':
69889+ {
69890+ int not;
69891+ char cold;
69892+
69893+ if (*n == '\0' || *n == '/')
69894+ return 1;
69895+
69896+ not = (*p == '!' || *p == '^');
69897+ if (not)
69898+ ++p;
69899+
69900+ c = *p++;
69901+ for (;;) {
69902+ unsigned char fn = (unsigned char)*n;
69903+
69904+ if (c == '\0')
69905+ return 1;
69906+ else {
69907+ if (c == fn)
69908+ goto matched;
69909+ cold = c;
69910+ c = *p++;
69911+
69912+ if (c == '-' && *p != ']') {
69913+ unsigned char cend = *p++;
69914+
69915+ if (cend == '\0')
69916+ return 1;
69917+
69918+ if (cold <= fn && fn <= cend)
69919+ goto matched;
69920+
69921+ c = *p++;
69922+ }
69923+ }
69924+
69925+ if (c == ']')
69926+ break;
69927+ }
69928+ if (!not)
69929+ return 1;
69930+ break;
69931+ matched:
69932+ while (c != ']') {
69933+ if (c == '\0')
69934+ return 1;
69935+
69936+ c = *p++;
69937+ }
69938+ if (not)
69939+ return 1;
69940+ }
69941+ break;
69942+ default:
69943+ if (c != *n)
69944+ return 1;
69945+ }
69946+
69947+ ++n;
69948+ }
69949+
69950+ if (*n == '\0')
69951+ return 0;
69952+
69953+ if (*n == '/')
69954+ return 0;
69955+
69956+ return 1;
69957+}
69958+
69959+static struct acl_object_label *
69960+chk_glob_label(struct acl_object_label *globbed,
69961+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
69962+{
69963+ struct acl_object_label *tmp;
69964+
69965+ if (*path == NULL)
69966+ *path = gr_to_filename_nolock(dentry, mnt);
69967+
69968+ tmp = globbed;
69969+
69970+ while (tmp) {
69971+ if (!glob_match(tmp->filename, *path))
69972+ return tmp;
69973+ tmp = tmp->next;
69974+ }
69975+
69976+ return NULL;
69977+}
69978+
69979+static struct acl_object_label *
69980+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
69981+ const u64 curr_ino, const dev_t curr_dev,
69982+ const struct acl_subject_label *subj, char **path, const int checkglob)
69983+{
69984+ struct acl_subject_label *tmpsubj;
69985+ struct acl_object_label *retval;
69986+ struct acl_object_label *retval2;
69987+
69988+ tmpsubj = (struct acl_subject_label *) subj;
69989+ read_lock(&gr_inode_lock);
69990+ do {
69991+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
69992+ if (retval) {
69993+ if (checkglob && retval->globbed) {
69994+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
69995+ if (retval2)
69996+ retval = retval2;
69997+ }
69998+ break;
69999+ }
70000+ } while ((tmpsubj = tmpsubj->parent_subject));
70001+ read_unlock(&gr_inode_lock);
70002+
70003+ return retval;
70004+}
70005+
70006+static __inline__ struct acl_object_label *
70007+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70008+ struct dentry *curr_dentry,
70009+ const struct acl_subject_label *subj, char **path, const int checkglob)
70010+{
70011+ int newglob = checkglob;
70012+ u64 inode;
70013+ dev_t device;
70014+
70015+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70016+ as we don't want a / * rule to match instead of the / object
70017+ don't do this for create lookups that call this function though, since they're looking up
70018+ on the parent and thus need globbing checks on all paths
70019+ */
70020+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70021+ newglob = GR_NO_GLOB;
70022+
70023+ spin_lock(&curr_dentry->d_lock);
70024+ inode = __get_ino(curr_dentry);
70025+ device = __get_dev(curr_dentry);
70026+ spin_unlock(&curr_dentry->d_lock);
70027+
70028+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70029+}
70030+
70031+#ifdef CONFIG_HUGETLBFS
70032+static inline bool
70033+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70034+{
70035+ int i;
70036+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70037+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70038+ return true;
70039+ }
70040+
70041+ return false;
70042+}
70043+#endif
70044+
70045+static struct acl_object_label *
70046+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70047+ const struct acl_subject_label *subj, char *path, const int checkglob)
70048+{
70049+ struct dentry *dentry = (struct dentry *) l_dentry;
70050+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70051+ struct mount *real_mnt = real_mount(mnt);
70052+ struct acl_object_label *retval;
70053+ struct dentry *parent;
70054+
70055+ read_seqlock_excl(&mount_lock);
70056+ write_seqlock(&rename_lock);
70057+
70058+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70059+#ifdef CONFIG_NET
70060+ mnt == sock_mnt ||
70061+#endif
70062+#ifdef CONFIG_HUGETLBFS
70063+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70064+#endif
70065+ /* ignore Eric Biederman */
70066+ IS_PRIVATE(l_dentry->d_inode))) {
70067+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70068+ goto out;
70069+ }
70070+
70071+ for (;;) {
70072+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70073+ break;
70074+
70075+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70076+ if (!mnt_has_parent(real_mnt))
70077+ break;
70078+
70079+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70080+ if (retval != NULL)
70081+ goto out;
70082+
70083+ dentry = real_mnt->mnt_mountpoint;
70084+ real_mnt = real_mnt->mnt_parent;
70085+ mnt = &real_mnt->mnt;
70086+ continue;
70087+ }
70088+
70089+ parent = dentry->d_parent;
70090+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70091+ if (retval != NULL)
70092+ goto out;
70093+
70094+ dentry = parent;
70095+ }
70096+
70097+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70098+
70099+ /* gr_real_root is pinned so we don't have to hold a reference */
70100+ if (retval == NULL)
70101+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70102+out:
70103+ write_sequnlock(&rename_lock);
70104+ read_sequnlock_excl(&mount_lock);
70105+
70106+ BUG_ON(retval == NULL);
70107+
70108+ return retval;
70109+}
70110+
70111+static __inline__ struct acl_object_label *
70112+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70113+ const struct acl_subject_label *subj)
70114+{
70115+ char *path = NULL;
70116+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70117+}
70118+
70119+static __inline__ struct acl_object_label *
70120+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70121+ const struct acl_subject_label *subj)
70122+{
70123+ char *path = NULL;
70124+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70125+}
70126+
70127+static __inline__ struct acl_object_label *
70128+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70129+ const struct acl_subject_label *subj, char *path)
70130+{
70131+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70132+}
70133+
70134+struct acl_subject_label *
70135+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70136+ const struct acl_role_label *role)
70137+{
70138+ struct dentry *dentry = (struct dentry *) l_dentry;
70139+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70140+ struct mount *real_mnt = real_mount(mnt);
70141+ struct acl_subject_label *retval;
70142+ struct dentry *parent;
70143+
70144+ read_seqlock_excl(&mount_lock);
70145+ write_seqlock(&rename_lock);
70146+
70147+ for (;;) {
70148+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70149+ break;
70150+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70151+ if (!mnt_has_parent(real_mnt))
70152+ break;
70153+
70154+ spin_lock(&dentry->d_lock);
70155+ read_lock(&gr_inode_lock);
70156+ retval =
70157+ lookup_acl_subj_label(__get_ino(dentry),
70158+ __get_dev(dentry), role);
70159+ read_unlock(&gr_inode_lock);
70160+ spin_unlock(&dentry->d_lock);
70161+ if (retval != NULL)
70162+ goto out;
70163+
70164+ dentry = real_mnt->mnt_mountpoint;
70165+ real_mnt = real_mnt->mnt_parent;
70166+ mnt = &real_mnt->mnt;
70167+ continue;
70168+ }
70169+
70170+ spin_lock(&dentry->d_lock);
70171+ read_lock(&gr_inode_lock);
70172+ retval = lookup_acl_subj_label(__get_ino(dentry),
70173+ __get_dev(dentry), role);
70174+ read_unlock(&gr_inode_lock);
70175+ parent = dentry->d_parent;
70176+ spin_unlock(&dentry->d_lock);
70177+
70178+ if (retval != NULL)
70179+ goto out;
70180+
70181+ dentry = parent;
70182+ }
70183+
70184+ spin_lock(&dentry->d_lock);
70185+ read_lock(&gr_inode_lock);
70186+ retval = lookup_acl_subj_label(__get_ino(dentry),
70187+ __get_dev(dentry), role);
70188+ read_unlock(&gr_inode_lock);
70189+ spin_unlock(&dentry->d_lock);
70190+
70191+ if (unlikely(retval == NULL)) {
70192+ /* gr_real_root is pinned, we don't need to hold a reference */
70193+ read_lock(&gr_inode_lock);
70194+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
70195+ __get_dev(gr_real_root.dentry), role);
70196+ read_unlock(&gr_inode_lock);
70197+ }
70198+out:
70199+ write_sequnlock(&rename_lock);
70200+ read_sequnlock_excl(&mount_lock);
70201+
70202+ BUG_ON(retval == NULL);
70203+
70204+ return retval;
70205+}
70206+
70207+void
70208+assign_special_role(const char *rolename)
70209+{
70210+ struct acl_object_label *obj;
70211+ struct acl_role_label *r;
70212+ struct acl_role_label *assigned = NULL;
70213+ struct task_struct *tsk;
70214+ struct file *filp;
70215+
70216+ FOR_EACH_ROLE_START(r)
70217+ if (!strcmp(rolename, r->rolename) &&
70218+ (r->roletype & GR_ROLE_SPECIAL)) {
70219+ assigned = r;
70220+ break;
70221+ }
70222+ FOR_EACH_ROLE_END(r)
70223+
70224+ if (!assigned)
70225+ return;
70226+
70227+ read_lock(&tasklist_lock);
70228+ read_lock(&grsec_exec_file_lock);
70229+
70230+ tsk = current->real_parent;
70231+ if (tsk == NULL)
70232+ goto out_unlock;
70233+
70234+ filp = tsk->exec_file;
70235+ if (filp == NULL)
70236+ goto out_unlock;
70237+
70238+ tsk->is_writable = 0;
70239+ tsk->inherited = 0;
70240+
70241+ tsk->acl_sp_role = 1;
70242+ tsk->acl_role_id = ++acl_sp_role_value;
70243+ tsk->role = assigned;
70244+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70245+
70246+ /* ignore additional mmap checks for processes that are writable
70247+ by the default ACL */
70248+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70249+ if (unlikely(obj->mode & GR_WRITE))
70250+ tsk->is_writable = 1;
70251+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70252+ if (unlikely(obj->mode & GR_WRITE))
70253+ tsk->is_writable = 1;
70254+
70255+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70256+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70257+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70258+#endif
70259+
70260+out_unlock:
70261+ read_unlock(&grsec_exec_file_lock);
70262+ read_unlock(&tasklist_lock);
70263+ return;
70264+}
70265+
70266+
70267+static void
70268+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70269+{
70270+ struct task_struct *task = current;
70271+ const struct cred *cred = current_cred();
70272+
70273+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70274+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70275+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70276+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70277+
70278+ return;
70279+}
70280+
70281+static void
70282+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70283+{
70284+ struct task_struct *task = current;
70285+ const struct cred *cred = current_cred();
70286+
70287+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70288+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70289+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70290+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70291+
70292+ return;
70293+}
70294+
70295+static void
70296+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70297+{
70298+ struct task_struct *task = current;
70299+ const struct cred *cred = current_cred();
70300+
70301+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70302+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70303+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70304+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70305+
70306+ return;
70307+}
70308+
70309+static void
70310+gr_set_proc_res(struct task_struct *task)
70311+{
70312+ struct acl_subject_label *proc;
70313+ unsigned short i;
70314+
70315+ proc = task->acl;
70316+
70317+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70318+ return;
70319+
70320+ for (i = 0; i < RLIM_NLIMITS; i++) {
70321+ unsigned long rlim_cur, rlim_max;
70322+
70323+ if (!(proc->resmask & (1U << i)))
70324+ continue;
70325+
70326+ rlim_cur = proc->res[i].rlim_cur;
70327+ rlim_max = proc->res[i].rlim_max;
70328+
70329+ if (i == RLIMIT_NOFILE) {
70330+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
70331+ if (rlim_cur > saved_sysctl_nr_open)
70332+ rlim_cur = saved_sysctl_nr_open;
70333+ if (rlim_max > saved_sysctl_nr_open)
70334+ rlim_max = saved_sysctl_nr_open;
70335+ }
70336+
70337+ task->signal->rlim[i].rlim_cur = rlim_cur;
70338+ task->signal->rlim[i].rlim_max = rlim_max;
70339+
70340+ if (i == RLIMIT_CPU)
70341+ update_rlimit_cpu(task, rlim_cur);
70342+ }
70343+
70344+ return;
70345+}
70346+
70347+/* both of the below must be called with
70348+ rcu_read_lock();
70349+ read_lock(&tasklist_lock);
70350+ read_lock(&grsec_exec_file_lock);
70351+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
70352+*/
70353+
70354+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
70355+{
70356+ char *tmpname;
70357+ struct acl_subject_label *tmpsubj;
70358+ struct file *filp;
70359+ struct name_entry *nmatch;
70360+
70361+ filp = task->exec_file;
70362+ if (filp == NULL)
70363+ return NULL;
70364+
70365+ /* the following is to apply the correct subject
70366+ on binaries running when the RBAC system
70367+ is enabled, when the binaries have been
70368+ replaced or deleted since their execution
70369+ -----
70370+ when the RBAC system starts, the inode/dev
70371+ from exec_file will be one the RBAC system
70372+ is unaware of. It only knows the inode/dev
70373+ of the present file on disk, or the absence
70374+ of it.
70375+ */
70376+
70377+ if (filename)
70378+ nmatch = __lookup_name_entry(state, filename);
70379+ else {
70380+ preempt_disable();
70381+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
70382+
70383+ nmatch = __lookup_name_entry(state, tmpname);
70384+ preempt_enable();
70385+ }
70386+ tmpsubj = NULL;
70387+ if (nmatch) {
70388+ if (nmatch->deleted)
70389+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
70390+ else
70391+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
70392+ }
70393+ /* this also works for the reload case -- if we don't match a potentially inherited subject
70394+ then we fall back to a normal lookup based on the binary's ino/dev
70395+ */
70396+ if (tmpsubj == NULL && fallback)
70397+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
70398+
70399+ return tmpsubj;
70400+}
70401+
70402+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
70403+{
70404+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
70405+}
70406+
70407+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
70408+{
70409+ struct acl_object_label *obj;
70410+ struct file *filp;
70411+
70412+ filp = task->exec_file;
70413+
70414+ task->acl = subj;
70415+ task->is_writable = 0;
70416+ /* ignore additional mmap checks for processes that are writable
70417+ by the default ACL */
70418+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
70419+ if (unlikely(obj->mode & GR_WRITE))
70420+ task->is_writable = 1;
70421+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70422+ if (unlikely(obj->mode & GR_WRITE))
70423+ task->is_writable = 1;
70424+
70425+ gr_set_proc_res(task);
70426+
70427+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70428+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70429+#endif
70430+}
70431+
70432+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
70433+{
70434+ __gr_apply_subject_to_task(&running_polstate, task, subj);
70435+}
70436+
70437+__u32
70438+gr_search_file(const struct dentry * dentry, const __u32 mode,
70439+ const struct vfsmount * mnt)
70440+{
70441+ __u32 retval = mode;
70442+ struct acl_subject_label *curracl;
70443+ struct acl_object_label *currobj;
70444+
70445+ if (unlikely(!(gr_status & GR_READY)))
70446+ return (mode & ~GR_AUDITS);
70447+
70448+ curracl = current->acl;
70449+
70450+ currobj = chk_obj_label(dentry, mnt, curracl);
70451+ retval = currobj->mode & mode;
70452+
70453+ /* if we're opening a specified transfer file for writing
70454+ (e.g. /dev/initctl), then transfer our role to init
70455+ */
70456+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
70457+ current->role->roletype & GR_ROLE_PERSIST)) {
70458+ struct task_struct *task = init_pid_ns.child_reaper;
70459+
70460+ if (task->role != current->role) {
70461+ struct acl_subject_label *subj;
70462+
70463+ task->acl_sp_role = 0;
70464+ task->acl_role_id = current->acl_role_id;
70465+ task->role = current->role;
70466+ rcu_read_lock();
70467+ read_lock(&grsec_exec_file_lock);
70468+ subj = gr_get_subject_for_task(task, NULL, 1);
70469+ gr_apply_subject_to_task(task, subj);
70470+ read_unlock(&grsec_exec_file_lock);
70471+ rcu_read_unlock();
70472+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
70473+ }
70474+ }
70475+
70476+ if (unlikely
70477+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
70478+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
70479+ __u32 new_mode = mode;
70480+
70481+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70482+
70483+ retval = new_mode;
70484+
70485+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
70486+ new_mode |= GR_INHERIT;
70487+
70488+ if (!(mode & GR_NOLEARN))
70489+ gr_log_learn(dentry, mnt, new_mode);
70490+ }
70491+
70492+ return retval;
70493+}
70494+
70495+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
70496+ const struct dentry *parent,
70497+ const struct vfsmount *mnt)
70498+{
70499+ struct name_entry *match;
70500+ struct acl_object_label *matchpo;
70501+ struct acl_subject_label *curracl;
70502+ char *path;
70503+
70504+ if (unlikely(!(gr_status & GR_READY)))
70505+ return NULL;
70506+
70507+ preempt_disable();
70508+ path = gr_to_filename_rbac(new_dentry, mnt);
70509+ match = lookup_name_entry_create(path);
70510+
70511+ curracl = current->acl;
70512+
70513+ if (match) {
70514+ read_lock(&gr_inode_lock);
70515+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
70516+ read_unlock(&gr_inode_lock);
70517+
70518+ if (matchpo) {
70519+ preempt_enable();
70520+ return matchpo;
70521+ }
70522+ }
70523+
70524+ // lookup parent
70525+
70526+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
70527+
70528+ preempt_enable();
70529+ return matchpo;
70530+}
70531+
70532+__u32
70533+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
70534+ const struct vfsmount * mnt, const __u32 mode)
70535+{
70536+ struct acl_object_label *matchpo;
70537+ __u32 retval;
70538+
70539+ if (unlikely(!(gr_status & GR_READY)))
70540+ return (mode & ~GR_AUDITS);
70541+
70542+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
70543+
70544+ retval = matchpo->mode & mode;
70545+
70546+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
70547+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70548+ __u32 new_mode = mode;
70549+
70550+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70551+
70552+ gr_log_learn(new_dentry, mnt, new_mode);
70553+ return new_mode;
70554+ }
70555+
70556+ return retval;
70557+}
70558+
70559+__u32
70560+gr_check_link(const struct dentry * new_dentry,
70561+ const struct dentry * parent_dentry,
70562+ const struct vfsmount * parent_mnt,
70563+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
70564+{
70565+ struct acl_object_label *obj;
70566+ __u32 oldmode, newmode;
70567+ __u32 needmode;
70568+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
70569+ GR_DELETE | GR_INHERIT;
70570+
70571+ if (unlikely(!(gr_status & GR_READY)))
70572+ return (GR_CREATE | GR_LINK);
70573+
70574+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
70575+ oldmode = obj->mode;
70576+
70577+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
70578+ newmode = obj->mode;
70579+
70580+ needmode = newmode & checkmodes;
70581+
70582+ // old name for hardlink must have at least the permissions of the new name
70583+ if ((oldmode & needmode) != needmode)
70584+ goto bad;
70585+
70586+ // if old name had restrictions/auditing, make sure the new name does as well
70587+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
70588+
70589+ // don't allow hardlinking of suid/sgid/fcapped files without permission
70590+ if (is_privileged_binary(old_dentry))
70591+ needmode |= GR_SETID;
70592+
70593+ if ((newmode & needmode) != needmode)
70594+ goto bad;
70595+
70596+ // enforce minimum permissions
70597+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
70598+ return newmode;
70599+bad:
70600+ needmode = oldmode;
70601+ if (is_privileged_binary(old_dentry))
70602+ needmode |= GR_SETID;
70603+
70604+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
70605+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
70606+ return (GR_CREATE | GR_LINK);
70607+ } else if (newmode & GR_SUPPRESS)
70608+ return GR_SUPPRESS;
70609+ else
70610+ return 0;
70611+}
70612+
70613+int
70614+gr_check_hidden_task(const struct task_struct *task)
70615+{
70616+ if (unlikely(!(gr_status & GR_READY)))
70617+ return 0;
70618+
70619+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
70620+ return 1;
70621+
70622+ return 0;
70623+}
70624+
70625+int
70626+gr_check_protected_task(const struct task_struct *task)
70627+{
70628+ if (unlikely(!(gr_status & GR_READY) || !task))
70629+ return 0;
70630+
70631+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70632+ task->acl != current->acl)
70633+ return 1;
70634+
70635+ return 0;
70636+}
70637+
70638+int
70639+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70640+{
70641+ struct task_struct *p;
70642+ int ret = 0;
70643+
70644+ if (unlikely(!(gr_status & GR_READY) || !pid))
70645+ return ret;
70646+
70647+ read_lock(&tasklist_lock);
70648+ do_each_pid_task(pid, type, p) {
70649+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70650+ p->acl != current->acl) {
70651+ ret = 1;
70652+ goto out;
70653+ }
70654+ } while_each_pid_task(pid, type, p);
70655+out:
70656+ read_unlock(&tasklist_lock);
70657+
70658+ return ret;
70659+}
70660+
70661+void
70662+gr_copy_label(struct task_struct *tsk)
70663+{
70664+ struct task_struct *p = current;
70665+
70666+ tsk->inherited = p->inherited;
70667+ tsk->acl_sp_role = 0;
70668+ tsk->acl_role_id = p->acl_role_id;
70669+ tsk->acl = p->acl;
70670+ tsk->role = p->role;
70671+ tsk->signal->used_accept = 0;
70672+ tsk->signal->curr_ip = p->signal->curr_ip;
70673+ tsk->signal->saved_ip = p->signal->saved_ip;
70674+ if (p->exec_file)
70675+ get_file(p->exec_file);
70676+ tsk->exec_file = p->exec_file;
70677+ tsk->is_writable = p->is_writable;
70678+ if (unlikely(p->signal->used_accept)) {
70679+ p->signal->curr_ip = 0;
70680+ p->signal->saved_ip = 0;
70681+ }
70682+
70683+ return;
70684+}
70685+
70686+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
70687+
70688+int
70689+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
70690+{
70691+ unsigned int i;
70692+ __u16 num;
70693+ uid_t *uidlist;
70694+ uid_t curuid;
70695+ int realok = 0;
70696+ int effectiveok = 0;
70697+ int fsok = 0;
70698+ uid_t globalreal, globaleffective, globalfs;
70699+
70700+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
70701+ struct user_struct *user;
70702+
70703+ if (!uid_valid(real))
70704+ goto skipit;
70705+
70706+ /* find user based on global namespace */
70707+
70708+ globalreal = GR_GLOBAL_UID(real);
70709+
70710+ user = find_user(make_kuid(&init_user_ns, globalreal));
70711+ if (user == NULL)
70712+ goto skipit;
70713+
70714+ if (gr_process_kernel_setuid_ban(user)) {
70715+ /* for find_user */
70716+ free_uid(user);
70717+ return 1;
70718+ }
70719+
70720+ /* for find_user */
70721+ free_uid(user);
70722+
70723+skipit:
70724+#endif
70725+
70726+ if (unlikely(!(gr_status & GR_READY)))
70727+ return 0;
70728+
70729+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70730+ gr_log_learn_uid_change(real, effective, fs);
70731+
70732+ num = current->acl->user_trans_num;
70733+ uidlist = current->acl->user_transitions;
70734+
70735+ if (uidlist == NULL)
70736+ return 0;
70737+
70738+ if (!uid_valid(real)) {
70739+ realok = 1;
70740+ globalreal = (uid_t)-1;
70741+ } else {
70742+ globalreal = GR_GLOBAL_UID(real);
70743+ }
70744+ if (!uid_valid(effective)) {
70745+ effectiveok = 1;
70746+ globaleffective = (uid_t)-1;
70747+ } else {
70748+ globaleffective = GR_GLOBAL_UID(effective);
70749+ }
70750+ if (!uid_valid(fs)) {
70751+ fsok = 1;
70752+ globalfs = (uid_t)-1;
70753+ } else {
70754+ globalfs = GR_GLOBAL_UID(fs);
70755+ }
70756+
70757+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
70758+ for (i = 0; i < num; i++) {
70759+ curuid = uidlist[i];
70760+ if (globalreal == curuid)
70761+ realok = 1;
70762+ if (globaleffective == curuid)
70763+ effectiveok = 1;
70764+ if (globalfs == curuid)
70765+ fsok = 1;
70766+ }
70767+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
70768+ for (i = 0; i < num; i++) {
70769+ curuid = uidlist[i];
70770+ if (globalreal == curuid)
70771+ break;
70772+ if (globaleffective == curuid)
70773+ break;
70774+ if (globalfs == curuid)
70775+ break;
70776+ }
70777+ /* not in deny list */
70778+ if (i == num) {
70779+ realok = 1;
70780+ effectiveok = 1;
70781+ fsok = 1;
70782+ }
70783+ }
70784+
70785+ if (realok && effectiveok && fsok)
70786+ return 0;
70787+ else {
70788+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70789+ return 1;
70790+ }
70791+}
70792+
70793+int
70794+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
70795+{
70796+ unsigned int i;
70797+ __u16 num;
70798+ gid_t *gidlist;
70799+ gid_t curgid;
70800+ int realok = 0;
70801+ int effectiveok = 0;
70802+ int fsok = 0;
70803+ gid_t globalreal, globaleffective, globalfs;
70804+
70805+ if (unlikely(!(gr_status & GR_READY)))
70806+ return 0;
70807+
70808+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70809+ gr_log_learn_gid_change(real, effective, fs);
70810+
70811+ num = current->acl->group_trans_num;
70812+ gidlist = current->acl->group_transitions;
70813+
70814+ if (gidlist == NULL)
70815+ return 0;
70816+
70817+ if (!gid_valid(real)) {
70818+ realok = 1;
70819+ globalreal = (gid_t)-1;
70820+ } else {
70821+ globalreal = GR_GLOBAL_GID(real);
70822+ }
70823+ if (!gid_valid(effective)) {
70824+ effectiveok = 1;
70825+ globaleffective = (gid_t)-1;
70826+ } else {
70827+ globaleffective = GR_GLOBAL_GID(effective);
70828+ }
70829+ if (!gid_valid(fs)) {
70830+ fsok = 1;
70831+ globalfs = (gid_t)-1;
70832+ } else {
70833+ globalfs = GR_GLOBAL_GID(fs);
70834+ }
70835+
70836+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
70837+ for (i = 0; i < num; i++) {
70838+ curgid = gidlist[i];
70839+ if (globalreal == curgid)
70840+ realok = 1;
70841+ if (globaleffective == curgid)
70842+ effectiveok = 1;
70843+ if (globalfs == curgid)
70844+ fsok = 1;
70845+ }
70846+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
70847+ for (i = 0; i < num; i++) {
70848+ curgid = gidlist[i];
70849+ if (globalreal == curgid)
70850+ break;
70851+ if (globaleffective == curgid)
70852+ break;
70853+ if (globalfs == curgid)
70854+ break;
70855+ }
70856+ /* not in deny list */
70857+ if (i == num) {
70858+ realok = 1;
70859+ effectiveok = 1;
70860+ fsok = 1;
70861+ }
70862+ }
70863+
70864+ if (realok && effectiveok && fsok)
70865+ return 0;
70866+ else {
70867+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70868+ return 1;
70869+ }
70870+}
70871+
70872+extern int gr_acl_is_capable(const int cap);
70873+
70874+void
70875+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
70876+{
70877+ struct acl_role_label *role = task->role;
70878+ struct acl_role_label *origrole = role;
70879+ struct acl_subject_label *subj = NULL;
70880+ struct acl_object_label *obj;
70881+ struct file *filp;
70882+ uid_t uid;
70883+ gid_t gid;
70884+
70885+ if (unlikely(!(gr_status & GR_READY)))
70886+ return;
70887+
70888+ uid = GR_GLOBAL_UID(kuid);
70889+ gid = GR_GLOBAL_GID(kgid);
70890+
70891+ filp = task->exec_file;
70892+
70893+ /* kernel process, we'll give them the kernel role */
70894+ if (unlikely(!filp)) {
70895+ task->role = running_polstate.kernel_role;
70896+ task->acl = running_polstate.kernel_role->root_label;
70897+ return;
70898+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
70899+ /* save the current ip at time of role lookup so that the proper
70900+ IP will be learned for role_allowed_ip */
70901+ task->signal->saved_ip = task->signal->curr_ip;
70902+ role = lookup_acl_role_label(task, uid, gid);
70903+ }
70904+
70905+ /* don't change the role if we're not a privileged process */
70906+ if (role && task->role != role &&
70907+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
70908+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
70909+ return;
70910+
70911+ task->role = role;
70912+
70913+ if (task->inherited) {
70914+ /* if we reached our subject through inheritance, then first see
70915+ if there's a subject of the same name in the new role that has
70916+ an object that would result in the same inherited subject
70917+ */
70918+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
70919+ if (subj) {
70920+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
70921+ if (!(obj->mode & GR_INHERIT))
70922+ subj = NULL;
70923+ }
70924+
70925+ }
70926+ if (subj == NULL) {
70927+ /* otherwise:
70928+ perform subject lookup in possibly new role
70929+ we can use this result below in the case where role == task->role
70930+ */
70931+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
70932+ }
70933+
70934+ /* if we changed uid/gid, but result in the same role
70935+ and are using inheritance, don't lose the inherited subject
70936+ if current subject is other than what normal lookup
70937+ would result in, we arrived via inheritance, don't
70938+ lose subject
70939+ */
70940+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
70941+ (subj == task->acl)))
70942+ task->acl = subj;
70943+
70944+ /* leave task->inherited unaffected */
70945+
70946+ task->is_writable = 0;
70947+
70948+ /* ignore additional mmap checks for processes that are writable
70949+ by the default ACL */
70950+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70951+ if (unlikely(obj->mode & GR_WRITE))
70952+ task->is_writable = 1;
70953+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70954+ if (unlikely(obj->mode & GR_WRITE))
70955+ task->is_writable = 1;
70956+
70957+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70958+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70959+#endif
70960+
70961+ gr_set_proc_res(task);
70962+
70963+ return;
70964+}
70965+
70966+int
70967+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
70968+ const int unsafe_flags)
70969+{
70970+ struct task_struct *task = current;
70971+ struct acl_subject_label *newacl;
70972+ struct acl_object_label *obj;
70973+ __u32 retmode;
70974+
70975+ if (unlikely(!(gr_status & GR_READY)))
70976+ return 0;
70977+
70978+ newacl = chk_subj_label(dentry, mnt, task->role);
70979+
70980+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
70981+ did an exec
70982+ */
70983+ rcu_read_lock();
70984+ read_lock(&tasklist_lock);
70985+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
70986+ (task->parent->acl->mode & GR_POVERRIDE))) {
70987+ read_unlock(&tasklist_lock);
70988+ rcu_read_unlock();
70989+ goto skip_check;
70990+ }
70991+ read_unlock(&tasklist_lock);
70992+ rcu_read_unlock();
70993+
70994+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
70995+ !(task->role->roletype & GR_ROLE_GOD) &&
70996+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
70997+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70998+ if (unsafe_flags & LSM_UNSAFE_SHARE)
70999+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
71000+ else
71001+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
71002+ return -EACCES;
71003+ }
71004+
71005+skip_check:
71006+
71007+ obj = chk_obj_label(dentry, mnt, task->acl);
71008+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71009+
71010+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71011+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71012+ if (obj->nested)
71013+ task->acl = obj->nested;
71014+ else
71015+ task->acl = newacl;
71016+ task->inherited = 0;
71017+ } else {
71018+ task->inherited = 1;
71019+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71020+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71021+ }
71022+
71023+ task->is_writable = 0;
71024+
71025+ /* ignore additional mmap checks for processes that are writable
71026+ by the default ACL */
71027+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71028+ if (unlikely(obj->mode & GR_WRITE))
71029+ task->is_writable = 1;
71030+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71031+ if (unlikely(obj->mode & GR_WRITE))
71032+ task->is_writable = 1;
71033+
71034+ gr_set_proc_res(task);
71035+
71036+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71037+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71038+#endif
71039+ return 0;
71040+}
71041+
71042+/* always called with valid inodev ptr */
71043+static void
71044+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
71045+{
71046+ struct acl_object_label *matchpo;
71047+ struct acl_subject_label *matchps;
71048+ struct acl_subject_label *subj;
71049+ struct acl_role_label *role;
71050+ unsigned int x;
71051+
71052+ FOR_EACH_ROLE_START(role)
71053+ FOR_EACH_SUBJECT_START(role, subj, x)
71054+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71055+ matchpo->mode |= GR_DELETED;
71056+ FOR_EACH_SUBJECT_END(subj,x)
71057+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71058+ /* nested subjects aren't in the role's subj_hash table */
71059+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71060+ matchpo->mode |= GR_DELETED;
71061+ FOR_EACH_NESTED_SUBJECT_END(subj)
71062+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71063+ matchps->mode |= GR_DELETED;
71064+ FOR_EACH_ROLE_END(role)
71065+
71066+ inodev->nentry->deleted = 1;
71067+
71068+ return;
71069+}
71070+
71071+void
71072+gr_handle_delete(const u64 ino, const dev_t dev)
71073+{
71074+ struct inodev_entry *inodev;
71075+
71076+ if (unlikely(!(gr_status & GR_READY)))
71077+ return;
71078+
71079+ write_lock(&gr_inode_lock);
71080+ inodev = lookup_inodev_entry(ino, dev);
71081+ if (inodev != NULL)
71082+ do_handle_delete(inodev, ino, dev);
71083+ write_unlock(&gr_inode_lock);
71084+
71085+ return;
71086+}
71087+
71088+static void
71089+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
71090+ const u64 newinode, const dev_t newdevice,
71091+ struct acl_subject_label *subj)
71092+{
71093+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71094+ struct acl_object_label *match;
71095+
71096+ match = subj->obj_hash[index];
71097+
71098+ while (match && (match->inode != oldinode ||
71099+ match->device != olddevice ||
71100+ !(match->mode & GR_DELETED)))
71101+ match = match->next;
71102+
71103+ if (match && (match->inode == oldinode)
71104+ && (match->device == olddevice)
71105+ && (match->mode & GR_DELETED)) {
71106+ if (match->prev == NULL) {
71107+ subj->obj_hash[index] = match->next;
71108+ if (match->next != NULL)
71109+ match->next->prev = NULL;
71110+ } else {
71111+ match->prev->next = match->next;
71112+ if (match->next != NULL)
71113+ match->next->prev = match->prev;
71114+ }
71115+ match->prev = NULL;
71116+ match->next = NULL;
71117+ match->inode = newinode;
71118+ match->device = newdevice;
71119+ match->mode &= ~GR_DELETED;
71120+
71121+ insert_acl_obj_label(match, subj);
71122+ }
71123+
71124+ return;
71125+}
71126+
71127+static void
71128+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
71129+ const u64 newinode, const dev_t newdevice,
71130+ struct acl_role_label *role)
71131+{
71132+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71133+ struct acl_subject_label *match;
71134+
71135+ match = role->subj_hash[index];
71136+
71137+ while (match && (match->inode != oldinode ||
71138+ match->device != olddevice ||
71139+ !(match->mode & GR_DELETED)))
71140+ match = match->next;
71141+
71142+ if (match && (match->inode == oldinode)
71143+ && (match->device == olddevice)
71144+ && (match->mode & GR_DELETED)) {
71145+ if (match->prev == NULL) {
71146+ role->subj_hash[index] = match->next;
71147+ if (match->next != NULL)
71148+ match->next->prev = NULL;
71149+ } else {
71150+ match->prev->next = match->next;
71151+ if (match->next != NULL)
71152+ match->next->prev = match->prev;
71153+ }
71154+ match->prev = NULL;
71155+ match->next = NULL;
71156+ match->inode = newinode;
71157+ match->device = newdevice;
71158+ match->mode &= ~GR_DELETED;
71159+
71160+ insert_acl_subj_label(match, role);
71161+ }
71162+
71163+ return;
71164+}
71165+
71166+static void
71167+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
71168+ const u64 newinode, const dev_t newdevice)
71169+{
71170+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71171+ struct inodev_entry *match;
71172+
71173+ match = running_polstate.inodev_set.i_hash[index];
71174+
71175+ while (match && (match->nentry->inode != oldinode ||
71176+ match->nentry->device != olddevice || !match->nentry->deleted))
71177+ match = match->next;
71178+
71179+ if (match && (match->nentry->inode == oldinode)
71180+ && (match->nentry->device == olddevice) &&
71181+ match->nentry->deleted) {
71182+ if (match->prev == NULL) {
71183+ running_polstate.inodev_set.i_hash[index] = match->next;
71184+ if (match->next != NULL)
71185+ match->next->prev = NULL;
71186+ } else {
71187+ match->prev->next = match->next;
71188+ if (match->next != NULL)
71189+ match->next->prev = match->prev;
71190+ }
71191+ match->prev = NULL;
71192+ match->next = NULL;
71193+ match->nentry->inode = newinode;
71194+ match->nentry->device = newdevice;
71195+ match->nentry->deleted = 0;
71196+
71197+ insert_inodev_entry(match);
71198+ }
71199+
71200+ return;
71201+}
71202+
71203+static void
71204+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
71205+{
71206+ struct acl_subject_label *subj;
71207+ struct acl_role_label *role;
71208+ unsigned int x;
71209+
71210+ FOR_EACH_ROLE_START(role)
71211+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71212+
71213+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71214+ if ((subj->inode == ino) && (subj->device == dev)) {
71215+ subj->inode = ino;
71216+ subj->device = dev;
71217+ }
71218+ /* nested subjects aren't in the role's subj_hash table */
71219+ update_acl_obj_label(matchn->inode, matchn->device,
71220+ ino, dev, subj);
71221+ FOR_EACH_NESTED_SUBJECT_END(subj)
71222+ FOR_EACH_SUBJECT_START(role, subj, x)
71223+ update_acl_obj_label(matchn->inode, matchn->device,
71224+ ino, dev, subj);
71225+ FOR_EACH_SUBJECT_END(subj,x)
71226+ FOR_EACH_ROLE_END(role)
71227+
71228+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71229+
71230+ return;
71231+}
71232+
71233+static void
71234+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71235+ const struct vfsmount *mnt)
71236+{
71237+ u64 ino = __get_ino(dentry);
71238+ dev_t dev = __get_dev(dentry);
71239+
71240+ __do_handle_create(matchn, ino, dev);
71241+
71242+ return;
71243+}
71244+
71245+void
71246+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71247+{
71248+ struct name_entry *matchn;
71249+
71250+ if (unlikely(!(gr_status & GR_READY)))
71251+ return;
71252+
71253+ preempt_disable();
71254+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71255+
71256+ if (unlikely((unsigned long)matchn)) {
71257+ write_lock(&gr_inode_lock);
71258+ do_handle_create(matchn, dentry, mnt);
71259+ write_unlock(&gr_inode_lock);
71260+ }
71261+ preempt_enable();
71262+
71263+ return;
71264+}
71265+
71266+void
71267+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71268+{
71269+ struct name_entry *matchn;
71270+
71271+ if (unlikely(!(gr_status & GR_READY)))
71272+ return;
71273+
71274+ preempt_disable();
71275+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71276+
71277+ if (unlikely((unsigned long)matchn)) {
71278+ write_lock(&gr_inode_lock);
71279+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71280+ write_unlock(&gr_inode_lock);
71281+ }
71282+ preempt_enable();
71283+
71284+ return;
71285+}
71286+
71287+void
71288+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71289+ struct dentry *old_dentry,
71290+ struct dentry *new_dentry,
71291+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71292+{
71293+ struct name_entry *matchn;
71294+ struct name_entry *matchn2 = NULL;
71295+ struct inodev_entry *inodev;
71296+ struct inode *inode = new_dentry->d_inode;
71297+ u64 old_ino = __get_ino(old_dentry);
71298+ dev_t old_dev = __get_dev(old_dentry);
71299+ unsigned int exchange = flags & RENAME_EXCHANGE;
71300+
71301+ /* vfs_rename swaps the name and parent link for old_dentry and
71302+ new_dentry
71303+ at this point, old_dentry has the new name, parent link, and inode
71304+ for the renamed file
71305+ if a file is being replaced by a rename, new_dentry has the inode
71306+ and name for the replaced file
71307+ */
71308+
71309+ if (unlikely(!(gr_status & GR_READY)))
71310+ return;
71311+
71312+ preempt_disable();
71313+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71314+
71315+ /* exchange cases:
71316+ a filename exists for the source, but not dest
71317+ do a recreate on source
71318+ a filename exists for the dest, but not source
71319+ do a recreate on dest
71320+ a filename exists for both source and dest
71321+ delete source and dest, then create source and dest
71322+ a filename exists for neither source nor dest
71323+ no updates needed
71324+
71325+ the name entry lookups get us the old inode/dev associated with
71326+ each name, so do the deletes first (if possible) so that when
71327+ we do the create, we pick up on the right entries
71328+ */
71329+
71330+ if (exchange)
71331+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71332+
71333+ /* we wouldn't have to check d_inode if it weren't for
71334+ NFS silly-renaming
71335+ */
71336+
71337+ write_lock(&gr_inode_lock);
71338+ if (unlikely((replace || exchange) && inode)) {
71339+ u64 new_ino = __get_ino(new_dentry);
71340+ dev_t new_dev = __get_dev(new_dentry);
71341+
71342+ inodev = lookup_inodev_entry(new_ino, new_dev);
71343+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71344+ do_handle_delete(inodev, new_ino, new_dev);
71345+ }
71346+
71347+ inodev = lookup_inodev_entry(old_ino, old_dev);
71348+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
71349+ do_handle_delete(inodev, old_ino, old_dev);
71350+
71351+ if (unlikely(matchn != NULL))
71352+ do_handle_create(matchn, old_dentry, mnt);
71353+
71354+ if (unlikely(matchn2 != NULL))
71355+ do_handle_create(matchn2, new_dentry, mnt);
71356+
71357+ write_unlock(&gr_inode_lock);
71358+ preempt_enable();
71359+
71360+ return;
71361+}
71362+
71363+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
71364+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
71365+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
71366+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
71367+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
71368+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
71369+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
71370+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
71371+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
71372+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
71373+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
71374+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
71375+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
71376+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
71377+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
71378+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
71379+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
71380+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
71381+};
71382+
71383+void
71384+gr_learn_resource(const struct task_struct *task,
71385+ const int res, const unsigned long wanted, const int gt)
71386+{
71387+ struct acl_subject_label *acl;
71388+ const struct cred *cred;
71389+
71390+ if (unlikely((gr_status & GR_READY) &&
71391+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
71392+ goto skip_reslog;
71393+
71394+ gr_log_resource(task, res, wanted, gt);
71395+skip_reslog:
71396+
71397+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
71398+ return;
71399+
71400+ acl = task->acl;
71401+
71402+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
71403+ !(acl->resmask & (1U << (unsigned short) res))))
71404+ return;
71405+
71406+ if (wanted >= acl->res[res].rlim_cur) {
71407+ unsigned long res_add;
71408+
71409+ res_add = wanted + res_learn_bumps[res];
71410+
71411+ acl->res[res].rlim_cur = res_add;
71412+
71413+ if (wanted > acl->res[res].rlim_max)
71414+ acl->res[res].rlim_max = res_add;
71415+
71416+ /* only log the subject filename, since resource logging is supported for
71417+ single-subject learning only */
71418+ rcu_read_lock();
71419+ cred = __task_cred(task);
71420+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
71421+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
71422+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
71423+ "", (unsigned long) res, &task->signal->saved_ip);
71424+ rcu_read_unlock();
71425+ }
71426+
71427+ return;
71428+}
71429+EXPORT_SYMBOL_GPL(gr_learn_resource);
71430+#endif
71431+
71432+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
71433+void
71434+pax_set_initial_flags(struct linux_binprm *bprm)
71435+{
71436+ struct task_struct *task = current;
71437+ struct acl_subject_label *proc;
71438+ unsigned long flags;
71439+
71440+ if (unlikely(!(gr_status & GR_READY)))
71441+ return;
71442+
71443+ flags = pax_get_flags(task);
71444+
71445+ proc = task->acl;
71446+
71447+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
71448+ flags &= ~MF_PAX_PAGEEXEC;
71449+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
71450+ flags &= ~MF_PAX_SEGMEXEC;
71451+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
71452+ flags &= ~MF_PAX_RANDMMAP;
71453+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
71454+ flags &= ~MF_PAX_EMUTRAMP;
71455+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
71456+ flags &= ~MF_PAX_MPROTECT;
71457+
71458+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
71459+ flags |= MF_PAX_PAGEEXEC;
71460+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
71461+ flags |= MF_PAX_SEGMEXEC;
71462+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
71463+ flags |= MF_PAX_RANDMMAP;
71464+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
71465+ flags |= MF_PAX_EMUTRAMP;
71466+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
71467+ flags |= MF_PAX_MPROTECT;
71468+
71469+ pax_set_flags(task, flags);
71470+
71471+ return;
71472+}
71473+#endif
71474+
71475+int
71476+gr_handle_proc_ptrace(struct task_struct *task)
71477+{
71478+ struct file *filp;
71479+ struct task_struct *tmp = task;
71480+ struct task_struct *curtemp = current;
71481+ __u32 retmode;
71482+
71483+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71484+ if (unlikely(!(gr_status & GR_READY)))
71485+ return 0;
71486+#endif
71487+
71488+ read_lock(&tasklist_lock);
71489+ read_lock(&grsec_exec_file_lock);
71490+ filp = task->exec_file;
71491+
71492+ while (task_pid_nr(tmp) > 0) {
71493+ if (tmp == curtemp)
71494+ break;
71495+ tmp = tmp->real_parent;
71496+ }
71497+
71498+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71499+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
71500+ read_unlock(&grsec_exec_file_lock);
71501+ read_unlock(&tasklist_lock);
71502+ return 1;
71503+ }
71504+
71505+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71506+ if (!(gr_status & GR_READY)) {
71507+ read_unlock(&grsec_exec_file_lock);
71508+ read_unlock(&tasklist_lock);
71509+ return 0;
71510+ }
71511+#endif
71512+
71513+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
71514+ read_unlock(&grsec_exec_file_lock);
71515+ read_unlock(&tasklist_lock);
71516+
71517+ if (retmode & GR_NOPTRACE)
71518+ return 1;
71519+
71520+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
71521+ && (current->acl != task->acl || (current->acl != current->role->root_label
71522+ && task_pid_nr(current) != task_pid_nr(task))))
71523+ return 1;
71524+
71525+ return 0;
71526+}
71527+
71528+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
71529+{
71530+ if (unlikely(!(gr_status & GR_READY)))
71531+ return;
71532+
71533+ if (!(current->role->roletype & GR_ROLE_GOD))
71534+ return;
71535+
71536+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
71537+ p->role->rolename, gr_task_roletype_to_char(p),
71538+ p->acl->filename);
71539+}
71540+
71541+int
71542+gr_handle_ptrace(struct task_struct *task, const long request)
71543+{
71544+ struct task_struct *tmp = task;
71545+ struct task_struct *curtemp = current;
71546+ __u32 retmode;
71547+
71548+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71549+ if (unlikely(!(gr_status & GR_READY)))
71550+ return 0;
71551+#endif
71552+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71553+ read_lock(&tasklist_lock);
71554+ while (task_pid_nr(tmp) > 0) {
71555+ if (tmp == curtemp)
71556+ break;
71557+ tmp = tmp->real_parent;
71558+ }
71559+
71560+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71561+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
71562+ read_unlock(&tasklist_lock);
71563+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71564+ return 1;
71565+ }
71566+ read_unlock(&tasklist_lock);
71567+ }
71568+
71569+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71570+ if (!(gr_status & GR_READY))
71571+ return 0;
71572+#endif
71573+
71574+ read_lock(&grsec_exec_file_lock);
71575+ if (unlikely(!task->exec_file)) {
71576+ read_unlock(&grsec_exec_file_lock);
71577+ return 0;
71578+ }
71579+
71580+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
71581+ read_unlock(&grsec_exec_file_lock);
71582+
71583+ if (retmode & GR_NOPTRACE) {
71584+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71585+ return 1;
71586+ }
71587+
71588+ if (retmode & GR_PTRACERD) {
71589+ switch (request) {
71590+ case PTRACE_SEIZE:
71591+ case PTRACE_POKETEXT:
71592+ case PTRACE_POKEDATA:
71593+ case PTRACE_POKEUSR:
71594+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
71595+ case PTRACE_SETREGS:
71596+ case PTRACE_SETFPREGS:
71597+#endif
71598+#ifdef CONFIG_X86
71599+ case PTRACE_SETFPXREGS:
71600+#endif
71601+#ifdef CONFIG_ALTIVEC
71602+ case PTRACE_SETVRREGS:
71603+#endif
71604+ return 1;
71605+ default:
71606+ return 0;
71607+ }
71608+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
71609+ !(current->role->roletype & GR_ROLE_GOD) &&
71610+ (current->acl != task->acl)) {
71611+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71612+ return 1;
71613+ }
71614+
71615+ return 0;
71616+}
71617+
71618+static int is_writable_mmap(const struct file *filp)
71619+{
71620+ struct task_struct *task = current;
71621+ struct acl_object_label *obj, *obj2;
71622+
71623+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71624+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
71625+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71626+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
71627+ task->role->root_label);
71628+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
71629+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
71630+ return 1;
71631+ }
71632+ }
71633+ return 0;
71634+}
71635+
71636+int
71637+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
71638+{
71639+ __u32 mode;
71640+
71641+ if (unlikely(!file || !(prot & PROT_EXEC)))
71642+ return 1;
71643+
71644+ if (is_writable_mmap(file))
71645+ return 0;
71646+
71647+ mode =
71648+ gr_search_file(file->f_path.dentry,
71649+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71650+ file->f_path.mnt);
71651+
71652+ if (!gr_tpe_allow(file))
71653+ return 0;
71654+
71655+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71656+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71657+ return 0;
71658+ } else if (unlikely(!(mode & GR_EXEC))) {
71659+ return 0;
71660+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71661+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71662+ return 1;
71663+ }
71664+
71665+ return 1;
71666+}
71667+
71668+int
71669+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
71670+{
71671+ __u32 mode;
71672+
71673+ if (unlikely(!file || !(prot & PROT_EXEC)))
71674+ return 1;
71675+
71676+ if (is_writable_mmap(file))
71677+ return 0;
71678+
71679+ mode =
71680+ gr_search_file(file->f_path.dentry,
71681+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71682+ file->f_path.mnt);
71683+
71684+ if (!gr_tpe_allow(file))
71685+ return 0;
71686+
71687+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71688+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71689+ return 0;
71690+ } else if (unlikely(!(mode & GR_EXEC))) {
71691+ return 0;
71692+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71693+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71694+ return 1;
71695+ }
71696+
71697+ return 1;
71698+}
71699+
71700+void
71701+gr_acl_handle_psacct(struct task_struct *task, const long code)
71702+{
71703+ unsigned long runtime, cputime;
71704+ cputime_t utime, stime;
71705+ unsigned int wday, cday;
71706+ __u8 whr, chr;
71707+ __u8 wmin, cmin;
71708+ __u8 wsec, csec;
71709+ struct timespec curtime, starttime;
71710+
71711+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
71712+ !(task->acl->mode & GR_PROCACCT)))
71713+ return;
71714+
71715+ curtime = ns_to_timespec(ktime_get_ns());
71716+ starttime = ns_to_timespec(task->start_time);
71717+ runtime = curtime.tv_sec - starttime.tv_sec;
71718+ wday = runtime / (60 * 60 * 24);
71719+ runtime -= wday * (60 * 60 * 24);
71720+ whr = runtime / (60 * 60);
71721+ runtime -= whr * (60 * 60);
71722+ wmin = runtime / 60;
71723+ runtime -= wmin * 60;
71724+ wsec = runtime;
71725+
71726+ task_cputime(task, &utime, &stime);
71727+ cputime = cputime_to_secs(utime + stime);
71728+ cday = cputime / (60 * 60 * 24);
71729+ cputime -= cday * (60 * 60 * 24);
71730+ chr = cputime / (60 * 60);
71731+ cputime -= chr * (60 * 60);
71732+ cmin = cputime / 60;
71733+ cputime -= cmin * 60;
71734+ csec = cputime;
71735+
71736+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
71737+
71738+ return;
71739+}
71740+
71741+#ifdef CONFIG_TASKSTATS
71742+int gr_is_taskstats_denied(int pid)
71743+{
71744+ struct task_struct *task;
71745+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71746+ const struct cred *cred;
71747+#endif
71748+ int ret = 0;
71749+
71750+ /* restrict taskstats viewing to un-chrooted root users
71751+ who have the 'view' subject flag if the RBAC system is enabled
71752+ */
71753+
71754+ rcu_read_lock();
71755+ read_lock(&tasklist_lock);
71756+ task = find_task_by_vpid(pid);
71757+ if (task) {
71758+#ifdef CONFIG_GRKERNSEC_CHROOT
71759+ if (proc_is_chrooted(task))
71760+ ret = -EACCES;
71761+#endif
71762+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71763+ cred = __task_cred(task);
71764+#ifdef CONFIG_GRKERNSEC_PROC_USER
71765+ if (gr_is_global_nonroot(cred->uid))
71766+ ret = -EACCES;
71767+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71768+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
71769+ ret = -EACCES;
71770+#endif
71771+#endif
71772+ if (gr_status & GR_READY) {
71773+ if (!(task->acl->mode & GR_VIEW))
71774+ ret = -EACCES;
71775+ }
71776+ } else
71777+ ret = -ENOENT;
71778+
71779+ read_unlock(&tasklist_lock);
71780+ rcu_read_unlock();
71781+
71782+ return ret;
71783+}
71784+#endif
71785+
71786+/* AUXV entries are filled via a descendant of search_binary_handler
71787+ after we've already applied the subject for the target
71788+*/
71789+int gr_acl_enable_at_secure(void)
71790+{
71791+ if (unlikely(!(gr_status & GR_READY)))
71792+ return 0;
71793+
71794+ if (current->acl->mode & GR_ATSECURE)
71795+ return 1;
71796+
71797+ return 0;
71798+}
71799+
71800+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
71801+{
71802+ struct task_struct *task = current;
71803+ struct dentry *dentry = file->f_path.dentry;
71804+ struct vfsmount *mnt = file->f_path.mnt;
71805+ struct acl_object_label *obj, *tmp;
71806+ struct acl_subject_label *subj;
71807+ unsigned int bufsize;
71808+ int is_not_root;
71809+ char *path;
71810+ dev_t dev = __get_dev(dentry);
71811+
71812+ if (unlikely(!(gr_status & GR_READY)))
71813+ return 1;
71814+
71815+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71816+ return 1;
71817+
71818+ /* ignore Eric Biederman */
71819+ if (IS_PRIVATE(dentry->d_inode))
71820+ return 1;
71821+
71822+ subj = task->acl;
71823+ read_lock(&gr_inode_lock);
71824+ do {
71825+ obj = lookup_acl_obj_label(ino, dev, subj);
71826+ if (obj != NULL) {
71827+ read_unlock(&gr_inode_lock);
71828+ return (obj->mode & GR_FIND) ? 1 : 0;
71829+ }
71830+ } while ((subj = subj->parent_subject));
71831+ read_unlock(&gr_inode_lock);
71832+
71833+ /* this is purely an optimization since we're looking for an object
71834+ for the directory we're doing a readdir on
71835+ if it's possible for any globbed object to match the entry we're
71836+ filling into the directory, then the object we find here will be
71837+ an anchor point with attached globbed objects
71838+ */
71839+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
71840+ if (obj->globbed == NULL)
71841+ return (obj->mode & GR_FIND) ? 1 : 0;
71842+
71843+ is_not_root = ((obj->filename[0] == '/') &&
71844+ (obj->filename[1] == '\0')) ? 0 : 1;
71845+ bufsize = PAGE_SIZE - namelen - is_not_root;
71846+
71847+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
71848+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
71849+ return 1;
71850+
71851+ preempt_disable();
71852+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
71853+ bufsize);
71854+
71855+ bufsize = strlen(path);
71856+
71857+ /* if base is "/", don't append an additional slash */
71858+ if (is_not_root)
71859+ *(path + bufsize) = '/';
71860+ memcpy(path + bufsize + is_not_root, name, namelen);
71861+ *(path + bufsize + namelen + is_not_root) = '\0';
71862+
71863+ tmp = obj->globbed;
71864+ while (tmp) {
71865+ if (!glob_match(tmp->filename, path)) {
71866+ preempt_enable();
71867+ return (tmp->mode & GR_FIND) ? 1 : 0;
71868+ }
71869+ tmp = tmp->next;
71870+ }
71871+ preempt_enable();
71872+ return (obj->mode & GR_FIND) ? 1 : 0;
71873+}
71874+
71875+void gr_put_exec_file(struct task_struct *task)
71876+{
71877+ struct file *filp;
71878+
71879+ write_lock(&grsec_exec_file_lock);
71880+ filp = task->exec_file;
71881+ task->exec_file = NULL;
71882+ write_unlock(&grsec_exec_file_lock);
71883+
71884+ if (filp)
71885+ fput(filp);
71886+
71887+ return;
71888+}
71889+
71890+
71891+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
71892+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
71893+#endif
71894+#ifdef CONFIG_SECURITY
71895+EXPORT_SYMBOL_GPL(gr_check_user_change);
71896+EXPORT_SYMBOL_GPL(gr_check_group_change);
71897+#endif
71898+
71899diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
71900new file mode 100644
71901index 0000000..18ffbbd
71902--- /dev/null
71903+++ b/grsecurity/gracl_alloc.c
71904@@ -0,0 +1,105 @@
71905+#include <linux/kernel.h>
71906+#include <linux/mm.h>
71907+#include <linux/slab.h>
71908+#include <linux/vmalloc.h>
71909+#include <linux/gracl.h>
71910+#include <linux/grsecurity.h>
71911+
71912+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
71913+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
71914+
71915+static __inline__ int
71916+alloc_pop(void)
71917+{
71918+ if (current_alloc_state->alloc_stack_next == 1)
71919+ return 0;
71920+
71921+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
71922+
71923+ current_alloc_state->alloc_stack_next--;
71924+
71925+ return 1;
71926+}
71927+
71928+static __inline__ int
71929+alloc_push(void *buf)
71930+{
71931+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
71932+ return 1;
71933+
71934+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
71935+
71936+ current_alloc_state->alloc_stack_next++;
71937+
71938+ return 0;
71939+}
71940+
71941+void *
71942+acl_alloc(unsigned long len)
71943+{
71944+ void *ret = NULL;
71945+
71946+ if (!len || len > PAGE_SIZE)
71947+ goto out;
71948+
71949+ ret = kmalloc(len, GFP_KERNEL);
71950+
71951+ if (ret) {
71952+ if (alloc_push(ret)) {
71953+ kfree(ret);
71954+ ret = NULL;
71955+ }
71956+ }
71957+
71958+out:
71959+ return ret;
71960+}
71961+
71962+void *
71963+acl_alloc_num(unsigned long num, unsigned long len)
71964+{
71965+ if (!len || (num > (PAGE_SIZE / len)))
71966+ return NULL;
71967+
71968+ return acl_alloc(num * len);
71969+}
71970+
71971+void
71972+acl_free_all(void)
71973+{
71974+ if (!current_alloc_state->alloc_stack)
71975+ return;
71976+
71977+ while (alloc_pop()) ;
71978+
71979+ if (current_alloc_state->alloc_stack) {
71980+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
71981+ kfree(current_alloc_state->alloc_stack);
71982+ else
71983+ vfree(current_alloc_state->alloc_stack);
71984+ }
71985+
71986+ current_alloc_state->alloc_stack = NULL;
71987+ current_alloc_state->alloc_stack_size = 1;
71988+ current_alloc_state->alloc_stack_next = 1;
71989+
71990+ return;
71991+}
71992+
71993+int
71994+acl_alloc_stack_init(unsigned long size)
71995+{
71996+ if ((size * sizeof (void *)) <= PAGE_SIZE)
71997+ current_alloc_state->alloc_stack =
71998+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
71999+ else
72000+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
72001+
72002+ current_alloc_state->alloc_stack_size = size;
72003+ current_alloc_state->alloc_stack_next = 1;
72004+
72005+ if (!current_alloc_state->alloc_stack)
72006+ return 0;
72007+ else
72008+ return 1;
72009+}
72010diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72011new file mode 100644
72012index 0000000..1a94c11
72013--- /dev/null
72014+++ b/grsecurity/gracl_cap.c
72015@@ -0,0 +1,127 @@
72016+#include <linux/kernel.h>
72017+#include <linux/module.h>
72018+#include <linux/sched.h>
72019+#include <linux/gracl.h>
72020+#include <linux/grsecurity.h>
72021+#include <linux/grinternal.h>
72022+
72023+extern const char *captab_log[];
72024+extern int captab_log_entries;
72025+
72026+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72027+{
72028+ struct acl_subject_label *curracl;
72029+
72030+ if (!gr_acl_is_enabled())
72031+ return 1;
72032+
72033+ curracl = task->acl;
72034+
72035+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72036+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72037+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72038+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72039+ gr_to_filename(task->exec_file->f_path.dentry,
72040+ task->exec_file->f_path.mnt) : curracl->filename,
72041+ curracl->filename, 0UL,
72042+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72043+ return 1;
72044+ }
72045+
72046+ return 0;
72047+}
72048+
72049+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72050+{
72051+ struct acl_subject_label *curracl;
72052+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72053+ kernel_cap_t cap_audit = __cap_empty_set;
72054+
72055+ if (!gr_acl_is_enabled())
72056+ return 1;
72057+
72058+ curracl = task->acl;
72059+
72060+ cap_drop = curracl->cap_lower;
72061+ cap_mask = curracl->cap_mask;
72062+ cap_audit = curracl->cap_invert_audit;
72063+
72064+ while ((curracl = curracl->parent_subject)) {
72065+ /* if the cap isn't specified in the current computed mask but is specified in the
72066+ current level subject, and is lowered in the current level subject, then add
72067+ it to the set of dropped capabilities
72068+ otherwise, add the current level subject's mask to the current computed mask
72069+ */
72070+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72071+ cap_raise(cap_mask, cap);
72072+ if (cap_raised(curracl->cap_lower, cap))
72073+ cap_raise(cap_drop, cap);
72074+ if (cap_raised(curracl->cap_invert_audit, cap))
72075+ cap_raise(cap_audit, cap);
72076+ }
72077+ }
72078+
72079+ if (!cap_raised(cap_drop, cap)) {
72080+ if (cap_raised(cap_audit, cap))
72081+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72082+ return 1;
72083+ }
72084+
72085+ /* only learn the capability use if the process has the capability in the
72086+ general case, the two uses in sys.c of gr_learn_cap are an exception
72087+ to this rule to ensure any role transition involves what the full-learned
72088+ policy believes in a privileged process
72089+ */
72090+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72091+ return 1;
72092+
72093+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72094+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72095+
72096+ return 0;
72097+}
72098+
72099+int
72100+gr_acl_is_capable(const int cap)
72101+{
72102+ return gr_task_acl_is_capable(current, current_cred(), cap);
72103+}
72104+
72105+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72106+{
72107+ struct acl_subject_label *curracl;
72108+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72109+
72110+ if (!gr_acl_is_enabled())
72111+ return 1;
72112+
72113+ curracl = task->acl;
72114+
72115+ cap_drop = curracl->cap_lower;
72116+ cap_mask = curracl->cap_mask;
72117+
72118+ while ((curracl = curracl->parent_subject)) {
72119+ /* if the cap isn't specified in the current computed mask but is specified in the
72120+ current level subject, and is lowered in the current level subject, then add
72121+ it to the set of dropped capabilities
72122+ otherwise, add the current level subject's mask to the current computed mask
72123+ */
72124+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72125+ cap_raise(cap_mask, cap);
72126+ if (cap_raised(curracl->cap_lower, cap))
72127+ cap_raise(cap_drop, cap);
72128+ }
72129+ }
72130+
72131+ if (!cap_raised(cap_drop, cap))
72132+ return 1;
72133+
72134+ return 0;
72135+}
72136+
72137+int
72138+gr_acl_is_capable_nolog(const int cap)
72139+{
72140+ return gr_task_acl_is_capable_nolog(current, cap);
72141+}
72142+
72143diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72144new file mode 100644
72145index 0000000..a43dd06
72146--- /dev/null
72147+++ b/grsecurity/gracl_compat.c
72148@@ -0,0 +1,269 @@
72149+#include <linux/kernel.h>
72150+#include <linux/gracl.h>
72151+#include <linux/compat.h>
72152+#include <linux/gracl_compat.h>
72153+
72154+#include <asm/uaccess.h>
72155+
72156+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72157+{
72158+ struct gr_arg_wrapper_compat uwrapcompat;
72159+
72160+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72161+ return -EFAULT;
72162+
72163+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
72164+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72165+ return -EINVAL;
72166+
72167+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72168+ uwrap->version = uwrapcompat.version;
72169+ uwrap->size = sizeof(struct gr_arg);
72170+
72171+ return 0;
72172+}
72173+
72174+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72175+{
72176+ struct gr_arg_compat argcompat;
72177+
72178+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72179+ return -EFAULT;
72180+
72181+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72182+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72183+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72184+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72185+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72186+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72187+
72188+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72189+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72190+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72191+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72192+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72193+ arg->segv_device = argcompat.segv_device;
72194+ arg->segv_inode = argcompat.segv_inode;
72195+ arg->segv_uid = argcompat.segv_uid;
72196+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72197+ arg->mode = argcompat.mode;
72198+
72199+ return 0;
72200+}
72201+
72202+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72203+{
72204+ struct acl_object_label_compat objcompat;
72205+
72206+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72207+ return -EFAULT;
72208+
72209+ obj->filename = compat_ptr(objcompat.filename);
72210+ obj->inode = objcompat.inode;
72211+ obj->device = objcompat.device;
72212+ obj->mode = objcompat.mode;
72213+
72214+ obj->nested = compat_ptr(objcompat.nested);
72215+ obj->globbed = compat_ptr(objcompat.globbed);
72216+
72217+ obj->prev = compat_ptr(objcompat.prev);
72218+ obj->next = compat_ptr(objcompat.next);
72219+
72220+ return 0;
72221+}
72222+
72223+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72224+{
72225+ unsigned int i;
72226+ struct acl_subject_label_compat subjcompat;
72227+
72228+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72229+ return -EFAULT;
72230+
72231+ subj->filename = compat_ptr(subjcompat.filename);
72232+ subj->inode = subjcompat.inode;
72233+ subj->device = subjcompat.device;
72234+ subj->mode = subjcompat.mode;
72235+ subj->cap_mask = subjcompat.cap_mask;
72236+ subj->cap_lower = subjcompat.cap_lower;
72237+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72238+
72239+ for (i = 0; i < GR_NLIMITS; i++) {
72240+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72241+ subj->res[i].rlim_cur = RLIM_INFINITY;
72242+ else
72243+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72244+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72245+ subj->res[i].rlim_max = RLIM_INFINITY;
72246+ else
72247+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72248+ }
72249+ subj->resmask = subjcompat.resmask;
72250+
72251+ subj->user_trans_type = subjcompat.user_trans_type;
72252+ subj->group_trans_type = subjcompat.group_trans_type;
72253+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72254+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72255+ subj->user_trans_num = subjcompat.user_trans_num;
72256+ subj->group_trans_num = subjcompat.group_trans_num;
72257+
72258+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72259+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72260+ subj->ip_type = subjcompat.ip_type;
72261+ subj->ips = compat_ptr(subjcompat.ips);
72262+ subj->ip_num = subjcompat.ip_num;
72263+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72264+
72265+ subj->crashes = subjcompat.crashes;
72266+ subj->expires = subjcompat.expires;
72267+
72268+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72269+ subj->hash = compat_ptr(subjcompat.hash);
72270+ subj->prev = compat_ptr(subjcompat.prev);
72271+ subj->next = compat_ptr(subjcompat.next);
72272+
72273+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72274+ subj->obj_hash_size = subjcompat.obj_hash_size;
72275+ subj->pax_flags = subjcompat.pax_flags;
72276+
72277+ return 0;
72278+}
72279+
72280+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72281+{
72282+ struct acl_role_label_compat rolecompat;
72283+
72284+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72285+ return -EFAULT;
72286+
72287+ role->rolename = compat_ptr(rolecompat.rolename);
72288+ role->uidgid = rolecompat.uidgid;
72289+ role->roletype = rolecompat.roletype;
72290+
72291+ role->auth_attempts = rolecompat.auth_attempts;
72292+ role->expires = rolecompat.expires;
72293+
72294+ role->root_label = compat_ptr(rolecompat.root_label);
72295+ role->hash = compat_ptr(rolecompat.hash);
72296+
72297+ role->prev = compat_ptr(rolecompat.prev);
72298+ role->next = compat_ptr(rolecompat.next);
72299+
72300+ role->transitions = compat_ptr(rolecompat.transitions);
72301+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72302+ role->domain_children = compat_ptr(rolecompat.domain_children);
72303+ role->domain_child_num = rolecompat.domain_child_num;
72304+
72305+ role->umask = rolecompat.umask;
72306+
72307+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72308+ role->subj_hash_size = rolecompat.subj_hash_size;
72309+
72310+ return 0;
72311+}
72312+
72313+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72314+{
72315+ struct role_allowed_ip_compat roleip_compat;
72316+
72317+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72318+ return -EFAULT;
72319+
72320+ roleip->addr = roleip_compat.addr;
72321+ roleip->netmask = roleip_compat.netmask;
72322+
72323+ roleip->prev = compat_ptr(roleip_compat.prev);
72324+ roleip->next = compat_ptr(roleip_compat.next);
72325+
72326+ return 0;
72327+}
72328+
72329+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72330+{
72331+ struct role_transition_compat trans_compat;
72332+
72333+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72334+ return -EFAULT;
72335+
72336+ trans->rolename = compat_ptr(trans_compat.rolename);
72337+
72338+ trans->prev = compat_ptr(trans_compat.prev);
72339+ trans->next = compat_ptr(trans_compat.next);
72340+
72341+ return 0;
72342+
72343+}
72344+
72345+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72346+{
72347+ struct gr_hash_struct_compat hash_compat;
72348+
72349+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
72350+ return -EFAULT;
72351+
72352+ hash->table = compat_ptr(hash_compat.table);
72353+ hash->nametable = compat_ptr(hash_compat.nametable);
72354+ hash->first = compat_ptr(hash_compat.first);
72355+
72356+ hash->table_size = hash_compat.table_size;
72357+ hash->used_size = hash_compat.used_size;
72358+
72359+ hash->type = hash_compat.type;
72360+
72361+ return 0;
72362+}
72363+
72364+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
72365+{
72366+ compat_uptr_t ptrcompat;
72367+
72368+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
72369+ return -EFAULT;
72370+
72371+ *(void **)ptr = compat_ptr(ptrcompat);
72372+
72373+ return 0;
72374+}
72375+
72376+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
72377+{
72378+ struct acl_ip_label_compat ip_compat;
72379+
72380+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
72381+ return -EFAULT;
72382+
72383+ ip->iface = compat_ptr(ip_compat.iface);
72384+ ip->addr = ip_compat.addr;
72385+ ip->netmask = ip_compat.netmask;
72386+ ip->low = ip_compat.low;
72387+ ip->high = ip_compat.high;
72388+ ip->mode = ip_compat.mode;
72389+ ip->type = ip_compat.type;
72390+
72391+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
72392+
72393+ ip->prev = compat_ptr(ip_compat.prev);
72394+ ip->next = compat_ptr(ip_compat.next);
72395+
72396+ return 0;
72397+}
72398+
72399+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
72400+{
72401+ struct sprole_pw_compat pw_compat;
72402+
72403+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
72404+ return -EFAULT;
72405+
72406+ pw->rolename = compat_ptr(pw_compat.rolename);
72407+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
72408+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
72409+
72410+ return 0;
72411+}
72412+
72413+size_t get_gr_arg_wrapper_size_compat(void)
72414+{
72415+ return sizeof(struct gr_arg_wrapper_compat);
72416+}
72417+
72418diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
72419new file mode 100644
72420index 0000000..8ee8e4f
72421--- /dev/null
72422+++ b/grsecurity/gracl_fs.c
72423@@ -0,0 +1,447 @@
72424+#include <linux/kernel.h>
72425+#include <linux/sched.h>
72426+#include <linux/types.h>
72427+#include <linux/fs.h>
72428+#include <linux/file.h>
72429+#include <linux/stat.h>
72430+#include <linux/grsecurity.h>
72431+#include <linux/grinternal.h>
72432+#include <linux/gracl.h>
72433+
72434+umode_t
72435+gr_acl_umask(void)
72436+{
72437+ if (unlikely(!gr_acl_is_enabled()))
72438+ return 0;
72439+
72440+ return current->role->umask;
72441+}
72442+
72443+__u32
72444+gr_acl_handle_hidden_file(const struct dentry * dentry,
72445+ const struct vfsmount * mnt)
72446+{
72447+ __u32 mode;
72448+
72449+ if (unlikely(d_is_negative(dentry)))
72450+ return GR_FIND;
72451+
72452+ mode =
72453+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
72454+
72455+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
72456+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72457+ return mode;
72458+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
72459+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72460+ return 0;
72461+ } else if (unlikely(!(mode & GR_FIND)))
72462+ return 0;
72463+
72464+ return GR_FIND;
72465+}
72466+
72467+__u32
72468+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72469+ int acc_mode)
72470+{
72471+ __u32 reqmode = GR_FIND;
72472+ __u32 mode;
72473+
72474+ if (unlikely(d_is_negative(dentry)))
72475+ return reqmode;
72476+
72477+ if (acc_mode & MAY_APPEND)
72478+ reqmode |= GR_APPEND;
72479+ else if (acc_mode & MAY_WRITE)
72480+ reqmode |= GR_WRITE;
72481+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
72482+ reqmode |= GR_READ;
72483+
72484+ mode =
72485+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72486+ mnt);
72487+
72488+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72489+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72490+ reqmode & GR_READ ? " reading" : "",
72491+ reqmode & GR_WRITE ? " writing" : reqmode &
72492+ GR_APPEND ? " appending" : "");
72493+ return reqmode;
72494+ } else
72495+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72496+ {
72497+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72498+ reqmode & GR_READ ? " reading" : "",
72499+ reqmode & GR_WRITE ? " writing" : reqmode &
72500+ GR_APPEND ? " appending" : "");
72501+ return 0;
72502+ } else if (unlikely((mode & reqmode) != reqmode))
72503+ return 0;
72504+
72505+ return reqmode;
72506+}
72507+
72508+__u32
72509+gr_acl_handle_creat(const struct dentry * dentry,
72510+ const struct dentry * p_dentry,
72511+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72512+ const int imode)
72513+{
72514+ __u32 reqmode = GR_WRITE | GR_CREATE;
72515+ __u32 mode;
72516+
72517+ if (acc_mode & MAY_APPEND)
72518+ reqmode |= GR_APPEND;
72519+ // if a directory was required or the directory already exists, then
72520+ // don't count this open as a read
72521+ if ((acc_mode & MAY_READ) &&
72522+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
72523+ reqmode |= GR_READ;
72524+ if ((open_flags & O_CREAT) &&
72525+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72526+ reqmode |= GR_SETID;
72527+
72528+ mode =
72529+ gr_check_create(dentry, p_dentry, p_mnt,
72530+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72531+
72532+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72533+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72534+ reqmode & GR_READ ? " reading" : "",
72535+ reqmode & GR_WRITE ? " writing" : reqmode &
72536+ GR_APPEND ? " appending" : "");
72537+ return reqmode;
72538+ } else
72539+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72540+ {
72541+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72542+ reqmode & GR_READ ? " reading" : "",
72543+ reqmode & GR_WRITE ? " writing" : reqmode &
72544+ GR_APPEND ? " appending" : "");
72545+ return 0;
72546+ } else if (unlikely((mode & reqmode) != reqmode))
72547+ return 0;
72548+
72549+ return reqmode;
72550+}
72551+
72552+__u32
72553+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
72554+ const int fmode)
72555+{
72556+ __u32 mode, reqmode = GR_FIND;
72557+
72558+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
72559+ reqmode |= GR_EXEC;
72560+ if (fmode & S_IWOTH)
72561+ reqmode |= GR_WRITE;
72562+ if (fmode & S_IROTH)
72563+ reqmode |= GR_READ;
72564+
72565+ mode =
72566+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72567+ mnt);
72568+
72569+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72570+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72571+ reqmode & GR_READ ? " reading" : "",
72572+ reqmode & GR_WRITE ? " writing" : "",
72573+ reqmode & GR_EXEC ? " executing" : "");
72574+ return reqmode;
72575+ } else
72576+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72577+ {
72578+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72579+ reqmode & GR_READ ? " reading" : "",
72580+ reqmode & GR_WRITE ? " writing" : "",
72581+ reqmode & GR_EXEC ? " executing" : "");
72582+ return 0;
72583+ } else if (unlikely((mode & reqmode) != reqmode))
72584+ return 0;
72585+
72586+ return reqmode;
72587+}
72588+
72589+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
72590+{
72591+ __u32 mode;
72592+
72593+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
72594+
72595+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72596+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
72597+ return mode;
72598+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72599+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
72600+ return 0;
72601+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72602+ return 0;
72603+
72604+ return (reqmode);
72605+}
72606+
72607+__u32
72608+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72609+{
72610+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
72611+}
72612+
72613+__u32
72614+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
72615+{
72616+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
72617+}
72618+
72619+__u32
72620+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
72621+{
72622+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
72623+}
72624+
72625+__u32
72626+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
72627+{
72628+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
72629+}
72630+
72631+__u32
72632+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
72633+ umode_t *modeptr)
72634+{
72635+ umode_t mode;
72636+
72637+ *modeptr &= ~gr_acl_umask();
72638+ mode = *modeptr;
72639+
72640+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
72641+ return 1;
72642+
72643+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
72644+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
72645+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
72646+ GR_CHMOD_ACL_MSG);
72647+ } else {
72648+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
72649+ }
72650+}
72651+
72652+__u32
72653+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
72654+{
72655+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
72656+}
72657+
72658+__u32
72659+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
72660+{
72661+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
72662+}
72663+
72664+__u32
72665+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
72666+{
72667+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
72668+}
72669+
72670+__u32
72671+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
72672+{
72673+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
72674+}
72675+
72676+__u32
72677+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
72678+{
72679+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
72680+ GR_UNIXCONNECT_ACL_MSG);
72681+}
72682+
72683+/* hardlinks require at minimum create and link permission,
72684+ any additional privilege required is based on the
72685+ privilege of the file being linked to
72686+*/
72687+__u32
72688+gr_acl_handle_link(const struct dentry * new_dentry,
72689+ const struct dentry * parent_dentry,
72690+ const struct vfsmount * parent_mnt,
72691+ const struct dentry * old_dentry,
72692+ const struct vfsmount * old_mnt, const struct filename *to)
72693+{
72694+ __u32 mode;
72695+ __u32 needmode = GR_CREATE | GR_LINK;
72696+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
72697+
72698+ mode =
72699+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
72700+ old_mnt);
72701+
72702+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
72703+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72704+ return mode;
72705+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72706+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72707+ return 0;
72708+ } else if (unlikely((mode & needmode) != needmode))
72709+ return 0;
72710+
72711+ return 1;
72712+}
72713+
72714+__u32
72715+gr_acl_handle_symlink(const struct dentry * new_dentry,
72716+ const struct dentry * parent_dentry,
72717+ const struct vfsmount * parent_mnt, const struct filename *from)
72718+{
72719+ __u32 needmode = GR_WRITE | GR_CREATE;
72720+ __u32 mode;
72721+
72722+ mode =
72723+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
72724+ GR_CREATE | GR_AUDIT_CREATE |
72725+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
72726+
72727+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
72728+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72729+ return mode;
72730+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72731+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72732+ return 0;
72733+ } else if (unlikely((mode & needmode) != needmode))
72734+ return 0;
72735+
72736+ return (GR_WRITE | GR_CREATE);
72737+}
72738+
72739+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
72740+{
72741+ __u32 mode;
72742+
72743+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72744+
72745+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72746+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
72747+ return mode;
72748+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72749+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
72750+ return 0;
72751+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72752+ return 0;
72753+
72754+ return (reqmode);
72755+}
72756+
72757+__u32
72758+gr_acl_handle_mknod(const struct dentry * new_dentry,
72759+ const struct dentry * parent_dentry,
72760+ const struct vfsmount * parent_mnt,
72761+ const int mode)
72762+{
72763+ __u32 reqmode = GR_WRITE | GR_CREATE;
72764+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72765+ reqmode |= GR_SETID;
72766+
72767+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72768+ reqmode, GR_MKNOD_ACL_MSG);
72769+}
72770+
72771+__u32
72772+gr_acl_handle_mkdir(const struct dentry *new_dentry,
72773+ const struct dentry *parent_dentry,
72774+ const struct vfsmount *parent_mnt)
72775+{
72776+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72777+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
72778+}
72779+
72780+#define RENAME_CHECK_SUCCESS(old, new) \
72781+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
72782+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
72783+
72784+int
72785+gr_acl_handle_rename(struct dentry *new_dentry,
72786+ struct dentry *parent_dentry,
72787+ const struct vfsmount *parent_mnt,
72788+ struct dentry *old_dentry,
72789+ struct inode *old_parent_inode,
72790+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
72791+{
72792+ __u32 comp1, comp2;
72793+ int error = 0;
72794+
72795+ if (unlikely(!gr_acl_is_enabled()))
72796+ return 0;
72797+
72798+ if (flags & RENAME_EXCHANGE) {
72799+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72800+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72801+ GR_SUPPRESS, parent_mnt);
72802+ comp2 =
72803+ gr_search_file(old_dentry,
72804+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72805+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72806+ } else if (d_is_negative(new_dentry)) {
72807+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
72808+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
72809+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
72810+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
72811+ GR_DELETE | GR_AUDIT_DELETE |
72812+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72813+ GR_SUPPRESS, old_mnt);
72814+ } else {
72815+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72816+ GR_CREATE | GR_DELETE |
72817+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
72818+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72819+ GR_SUPPRESS, parent_mnt);
72820+ comp2 =
72821+ gr_search_file(old_dentry,
72822+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72823+ GR_DELETE | GR_AUDIT_DELETE |
72824+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72825+ }
72826+
72827+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
72828+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
72829+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72830+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
72831+ && !(comp2 & GR_SUPPRESS)) {
72832+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72833+ error = -EACCES;
72834+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
72835+ error = -EACCES;
72836+
72837+ return error;
72838+}
72839+
72840+void
72841+gr_acl_handle_exit(void)
72842+{
72843+ u16 id;
72844+ char *rolename;
72845+
72846+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
72847+ !(current->role->roletype & GR_ROLE_PERSIST))) {
72848+ id = current->acl_role_id;
72849+ rolename = current->role->rolename;
72850+ gr_set_acls(1);
72851+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
72852+ }
72853+
72854+ gr_put_exec_file(current);
72855+ return;
72856+}
72857+
72858+int
72859+gr_acl_handle_procpidmem(const struct task_struct *task)
72860+{
72861+ if (unlikely(!gr_acl_is_enabled()))
72862+ return 0;
72863+
72864+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
72865+ !(current->acl->mode & GR_POVERRIDE) &&
72866+ !(current->role->roletype & GR_ROLE_GOD))
72867+ return -EACCES;
72868+
72869+ return 0;
72870+}
72871diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
72872new file mode 100644
72873index 0000000..f056b81
72874--- /dev/null
72875+++ b/grsecurity/gracl_ip.c
72876@@ -0,0 +1,386 @@
72877+#include <linux/kernel.h>
72878+#include <asm/uaccess.h>
72879+#include <asm/errno.h>
72880+#include <net/sock.h>
72881+#include <linux/file.h>
72882+#include <linux/fs.h>
72883+#include <linux/net.h>
72884+#include <linux/in.h>
72885+#include <linux/skbuff.h>
72886+#include <linux/ip.h>
72887+#include <linux/udp.h>
72888+#include <linux/types.h>
72889+#include <linux/sched.h>
72890+#include <linux/netdevice.h>
72891+#include <linux/inetdevice.h>
72892+#include <linux/gracl.h>
72893+#include <linux/grsecurity.h>
72894+#include <linux/grinternal.h>
72895+
72896+#define GR_BIND 0x01
72897+#define GR_CONNECT 0x02
72898+#define GR_INVERT 0x04
72899+#define GR_BINDOVERRIDE 0x08
72900+#define GR_CONNECTOVERRIDE 0x10
72901+#define GR_SOCK_FAMILY 0x20
72902+
72903+static const char * gr_protocols[IPPROTO_MAX] = {
72904+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
72905+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
72906+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
72907+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
72908+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
72909+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
72910+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
72911+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
72912+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
72913+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
72914+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
72915+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
72916+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
72917+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
72918+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
72919+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
72920+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
72921+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
72922+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
72923+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
72924+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
72925+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
72926+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
72927+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
72928+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
72929+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
72930+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
72931+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
72932+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
72933+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
72934+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
72935+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
72936+ };
72937+
72938+static const char * gr_socktypes[SOCK_MAX] = {
72939+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
72940+ "unknown:7", "unknown:8", "unknown:9", "packet"
72941+ };
72942+
72943+static const char * gr_sockfamilies[AF_MAX+1] = {
72944+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
72945+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
72946+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
72947+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
72948+ };
72949+
72950+const char *
72951+gr_proto_to_name(unsigned char proto)
72952+{
72953+ return gr_protocols[proto];
72954+}
72955+
72956+const char *
72957+gr_socktype_to_name(unsigned char type)
72958+{
72959+ return gr_socktypes[type];
72960+}
72961+
72962+const char *
72963+gr_sockfamily_to_name(unsigned char family)
72964+{
72965+ return gr_sockfamilies[family];
72966+}
72967+
72968+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
72969+
72970+int
72971+gr_search_socket(const int domain, const int type, const int protocol)
72972+{
72973+ struct acl_subject_label *curr;
72974+ const struct cred *cred = current_cred();
72975+
72976+ if (unlikely(!gr_acl_is_enabled()))
72977+ goto exit;
72978+
72979+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
72980+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
72981+ goto exit; // let the kernel handle it
72982+
72983+ curr = current->acl;
72984+
72985+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
72986+ /* the family is allowed, if this is PF_INET allow it only if
72987+ the extra sock type/protocol checks pass */
72988+ if (domain == PF_INET)
72989+ goto inet_check;
72990+ goto exit;
72991+ } else {
72992+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
72993+ __u32 fakeip = 0;
72994+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
72995+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
72996+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
72997+ gr_to_filename(current->exec_file->f_path.dentry,
72998+ current->exec_file->f_path.mnt) :
72999+ curr->filename, curr->filename,
73000+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
73001+ &current->signal->saved_ip);
73002+ goto exit;
73003+ }
73004+ goto exit_fail;
73005+ }
73006+
73007+inet_check:
73008+ /* the rest of this checking is for IPv4 only */
73009+ if (!curr->ips)
73010+ goto exit;
73011+
73012+ if ((curr->ip_type & (1U << type)) &&
73013+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73014+ goto exit;
73015+
73016+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73017+ /* we don't place acls on raw sockets , and sometimes
73018+ dgram/ip sockets are opened for ioctl and not
73019+ bind/connect, so we'll fake a bind learn log */
73020+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73021+ __u32 fakeip = 0;
73022+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73023+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73024+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73025+ gr_to_filename(current->exec_file->f_path.dentry,
73026+ current->exec_file->f_path.mnt) :
73027+ curr->filename, curr->filename,
73028+ &fakeip, 0, type,
73029+ protocol, GR_CONNECT, &current->signal->saved_ip);
73030+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73031+ __u32 fakeip = 0;
73032+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73033+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73034+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73035+ gr_to_filename(current->exec_file->f_path.dentry,
73036+ current->exec_file->f_path.mnt) :
73037+ curr->filename, curr->filename,
73038+ &fakeip, 0, type,
73039+ protocol, GR_BIND, &current->signal->saved_ip);
73040+ }
73041+ /* we'll log when they use connect or bind */
73042+ goto exit;
73043+ }
73044+
73045+exit_fail:
73046+ if (domain == PF_INET)
73047+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73048+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73049+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73050+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73051+ gr_socktype_to_name(type), protocol);
73052+
73053+ return 0;
73054+exit:
73055+ return 1;
73056+}
73057+
73058+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73059+{
73060+ if ((ip->mode & mode) &&
73061+ (ip_port >= ip->low) &&
73062+ (ip_port <= ip->high) &&
73063+ ((ntohl(ip_addr) & our_netmask) ==
73064+ (ntohl(our_addr) & our_netmask))
73065+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73066+ && (ip->type & (1U << type))) {
73067+ if (ip->mode & GR_INVERT)
73068+ return 2; // specifically denied
73069+ else
73070+ return 1; // allowed
73071+ }
73072+
73073+ return 0; // not specifically allowed, may continue parsing
73074+}
73075+
73076+static int
73077+gr_search_connectbind(const int full_mode, struct sock *sk,
73078+ struct sockaddr_in *addr, const int type)
73079+{
73080+ char iface[IFNAMSIZ] = {0};
73081+ struct acl_subject_label *curr;
73082+ struct acl_ip_label *ip;
73083+ struct inet_sock *isk;
73084+ struct net_device *dev;
73085+ struct in_device *idev;
73086+ unsigned long i;
73087+ int ret;
73088+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73089+ __u32 ip_addr = 0;
73090+ __u32 our_addr;
73091+ __u32 our_netmask;
73092+ char *p;
73093+ __u16 ip_port = 0;
73094+ const struct cred *cred = current_cred();
73095+
73096+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73097+ return 0;
73098+
73099+ curr = current->acl;
73100+ isk = inet_sk(sk);
73101+
73102+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73103+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73104+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73105+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73106+ struct sockaddr_in saddr;
73107+ int err;
73108+
73109+ saddr.sin_family = AF_INET;
73110+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73111+ saddr.sin_port = isk->inet_sport;
73112+
73113+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73114+ if (err)
73115+ return err;
73116+
73117+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73118+ if (err)
73119+ return err;
73120+ }
73121+
73122+ if (!curr->ips)
73123+ return 0;
73124+
73125+ ip_addr = addr->sin_addr.s_addr;
73126+ ip_port = ntohs(addr->sin_port);
73127+
73128+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73129+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73130+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73131+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73132+ gr_to_filename(current->exec_file->f_path.dentry,
73133+ current->exec_file->f_path.mnt) :
73134+ curr->filename, curr->filename,
73135+ &ip_addr, ip_port, type,
73136+ sk->sk_protocol, mode, &current->signal->saved_ip);
73137+ return 0;
73138+ }
73139+
73140+ for (i = 0; i < curr->ip_num; i++) {
73141+ ip = *(curr->ips + i);
73142+ if (ip->iface != NULL) {
73143+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73144+ p = strchr(iface, ':');
73145+ if (p != NULL)
73146+ *p = '\0';
73147+ dev = dev_get_by_name(sock_net(sk), iface);
73148+ if (dev == NULL)
73149+ continue;
73150+ idev = in_dev_get(dev);
73151+ if (idev == NULL) {
73152+ dev_put(dev);
73153+ continue;
73154+ }
73155+ rcu_read_lock();
73156+ for_ifa(idev) {
73157+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73158+ our_addr = ifa->ifa_address;
73159+ our_netmask = 0xffffffff;
73160+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73161+ if (ret == 1) {
73162+ rcu_read_unlock();
73163+ in_dev_put(idev);
73164+ dev_put(dev);
73165+ return 0;
73166+ } else if (ret == 2) {
73167+ rcu_read_unlock();
73168+ in_dev_put(idev);
73169+ dev_put(dev);
73170+ goto denied;
73171+ }
73172+ }
73173+ } endfor_ifa(idev);
73174+ rcu_read_unlock();
73175+ in_dev_put(idev);
73176+ dev_put(dev);
73177+ } else {
73178+ our_addr = ip->addr;
73179+ our_netmask = ip->netmask;
73180+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73181+ if (ret == 1)
73182+ return 0;
73183+ else if (ret == 2)
73184+ goto denied;
73185+ }
73186+ }
73187+
73188+denied:
73189+ if (mode == GR_BIND)
73190+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73191+ else if (mode == GR_CONNECT)
73192+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73193+
73194+ return -EACCES;
73195+}
73196+
73197+int
73198+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73199+{
73200+ /* always allow disconnection of dgram sockets with connect */
73201+ if (addr->sin_family == AF_UNSPEC)
73202+ return 0;
73203+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73204+}
73205+
73206+int
73207+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73208+{
73209+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73210+}
73211+
73212+int gr_search_listen(struct socket *sock)
73213+{
73214+ struct sock *sk = sock->sk;
73215+ struct sockaddr_in addr;
73216+
73217+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73218+ addr.sin_port = inet_sk(sk)->inet_sport;
73219+
73220+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73221+}
73222+
73223+int gr_search_accept(struct socket *sock)
73224+{
73225+ struct sock *sk = sock->sk;
73226+ struct sockaddr_in addr;
73227+
73228+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73229+ addr.sin_port = inet_sk(sk)->inet_sport;
73230+
73231+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73232+}
73233+
73234+int
73235+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73236+{
73237+ if (addr)
73238+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73239+ else {
73240+ struct sockaddr_in sin;
73241+ const struct inet_sock *inet = inet_sk(sk);
73242+
73243+ sin.sin_addr.s_addr = inet->inet_daddr;
73244+ sin.sin_port = inet->inet_dport;
73245+
73246+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73247+ }
73248+}
73249+
73250+int
73251+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73252+{
73253+ struct sockaddr_in sin;
73254+
73255+ if (unlikely(skb->len < sizeof (struct udphdr)))
73256+ return 0; // skip this packet
73257+
73258+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73259+ sin.sin_port = udp_hdr(skb)->source;
73260+
73261+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73262+}
73263diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73264new file mode 100644
73265index 0000000..25f54ef
73266--- /dev/null
73267+++ b/grsecurity/gracl_learn.c
73268@@ -0,0 +1,207 @@
73269+#include <linux/kernel.h>
73270+#include <linux/mm.h>
73271+#include <linux/sched.h>
73272+#include <linux/poll.h>
73273+#include <linux/string.h>
73274+#include <linux/file.h>
73275+#include <linux/types.h>
73276+#include <linux/vmalloc.h>
73277+#include <linux/grinternal.h>
73278+
73279+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73280+ size_t count, loff_t *ppos);
73281+extern int gr_acl_is_enabled(void);
73282+
73283+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73284+static int gr_learn_attached;
73285+
73286+/* use a 512k buffer */
73287+#define LEARN_BUFFER_SIZE (512 * 1024)
73288+
73289+static DEFINE_SPINLOCK(gr_learn_lock);
73290+static DEFINE_MUTEX(gr_learn_user_mutex);
73291+
73292+/* we need to maintain two buffers, so that the kernel context of grlearn
73293+ uses a semaphore around the userspace copying, and the other kernel contexts
73294+ use a spinlock when copying into the buffer, since they cannot sleep
73295+*/
73296+static char *learn_buffer;
73297+static char *learn_buffer_user;
73298+static int learn_buffer_len;
73299+static int learn_buffer_user_len;
73300+
73301+static ssize_t
73302+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73303+{
73304+ DECLARE_WAITQUEUE(wait, current);
73305+ ssize_t retval = 0;
73306+
73307+ add_wait_queue(&learn_wait, &wait);
73308+ set_current_state(TASK_INTERRUPTIBLE);
73309+ do {
73310+ mutex_lock(&gr_learn_user_mutex);
73311+ spin_lock(&gr_learn_lock);
73312+ if (learn_buffer_len)
73313+ break;
73314+ spin_unlock(&gr_learn_lock);
73315+ mutex_unlock(&gr_learn_user_mutex);
73316+ if (file->f_flags & O_NONBLOCK) {
73317+ retval = -EAGAIN;
73318+ goto out;
73319+ }
73320+ if (signal_pending(current)) {
73321+ retval = -ERESTARTSYS;
73322+ goto out;
73323+ }
73324+
73325+ schedule();
73326+ } while (1);
73327+
73328+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73329+ learn_buffer_user_len = learn_buffer_len;
73330+ retval = learn_buffer_len;
73331+ learn_buffer_len = 0;
73332+
73333+ spin_unlock(&gr_learn_lock);
73334+
73335+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73336+ retval = -EFAULT;
73337+
73338+ mutex_unlock(&gr_learn_user_mutex);
73339+out:
73340+ set_current_state(TASK_RUNNING);
73341+ remove_wait_queue(&learn_wait, &wait);
73342+ return retval;
73343+}
73344+
73345+static unsigned int
73346+poll_learn(struct file * file, poll_table * wait)
73347+{
73348+ poll_wait(file, &learn_wait, wait);
73349+
73350+ if (learn_buffer_len)
73351+ return (POLLIN | POLLRDNORM);
73352+
73353+ return 0;
73354+}
73355+
73356+void
73357+gr_clear_learn_entries(void)
73358+{
73359+ char *tmp;
73360+
73361+ mutex_lock(&gr_learn_user_mutex);
73362+ spin_lock(&gr_learn_lock);
73363+ tmp = learn_buffer;
73364+ learn_buffer = NULL;
73365+ spin_unlock(&gr_learn_lock);
73366+ if (tmp)
73367+ vfree(tmp);
73368+ if (learn_buffer_user != NULL) {
73369+ vfree(learn_buffer_user);
73370+ learn_buffer_user = NULL;
73371+ }
73372+ learn_buffer_len = 0;
73373+ mutex_unlock(&gr_learn_user_mutex);
73374+
73375+ return;
73376+}
73377+
73378+void
73379+gr_add_learn_entry(const char *fmt, ...)
73380+{
73381+ va_list args;
73382+ unsigned int len;
73383+
73384+ if (!gr_learn_attached)
73385+ return;
73386+
73387+ spin_lock(&gr_learn_lock);
73388+
73389+ /* leave a gap at the end so we know when it's "full" but don't have to
73390+ compute the exact length of the string we're trying to append
73391+ */
73392+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
73393+ spin_unlock(&gr_learn_lock);
73394+ wake_up_interruptible(&learn_wait);
73395+ return;
73396+ }
73397+ if (learn_buffer == NULL) {
73398+ spin_unlock(&gr_learn_lock);
73399+ return;
73400+ }
73401+
73402+ va_start(args, fmt);
73403+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
73404+ va_end(args);
73405+
73406+ learn_buffer_len += len + 1;
73407+
73408+ spin_unlock(&gr_learn_lock);
73409+ wake_up_interruptible(&learn_wait);
73410+
73411+ return;
73412+}
73413+
73414+static int
73415+open_learn(struct inode *inode, struct file *file)
73416+{
73417+ if (file->f_mode & FMODE_READ && gr_learn_attached)
73418+ return -EBUSY;
73419+ if (file->f_mode & FMODE_READ) {
73420+ int retval = 0;
73421+ mutex_lock(&gr_learn_user_mutex);
73422+ if (learn_buffer == NULL)
73423+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
73424+ if (learn_buffer_user == NULL)
73425+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
73426+ if (learn_buffer == NULL) {
73427+ retval = -ENOMEM;
73428+ goto out_error;
73429+ }
73430+ if (learn_buffer_user == NULL) {
73431+ retval = -ENOMEM;
73432+ goto out_error;
73433+ }
73434+ learn_buffer_len = 0;
73435+ learn_buffer_user_len = 0;
73436+ gr_learn_attached = 1;
73437+out_error:
73438+ mutex_unlock(&gr_learn_user_mutex);
73439+ return retval;
73440+ }
73441+ return 0;
73442+}
73443+
73444+static int
73445+close_learn(struct inode *inode, struct file *file)
73446+{
73447+ if (file->f_mode & FMODE_READ) {
73448+ char *tmp = NULL;
73449+ mutex_lock(&gr_learn_user_mutex);
73450+ spin_lock(&gr_learn_lock);
73451+ tmp = learn_buffer;
73452+ learn_buffer = NULL;
73453+ spin_unlock(&gr_learn_lock);
73454+ if (tmp)
73455+ vfree(tmp);
73456+ if (learn_buffer_user != NULL) {
73457+ vfree(learn_buffer_user);
73458+ learn_buffer_user = NULL;
73459+ }
73460+ learn_buffer_len = 0;
73461+ learn_buffer_user_len = 0;
73462+ gr_learn_attached = 0;
73463+ mutex_unlock(&gr_learn_user_mutex);
73464+ }
73465+
73466+ return 0;
73467+}
73468+
73469+const struct file_operations grsec_fops = {
73470+ .read = read_learn,
73471+ .write = write_grsec_handler,
73472+ .open = open_learn,
73473+ .release = close_learn,
73474+ .poll = poll_learn,
73475+};
73476diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
73477new file mode 100644
73478index 0000000..fd26052
73479--- /dev/null
73480+++ b/grsecurity/gracl_policy.c
73481@@ -0,0 +1,1781 @@
73482+#include <linux/kernel.h>
73483+#include <linux/module.h>
73484+#include <linux/sched.h>
73485+#include <linux/mm.h>
73486+#include <linux/file.h>
73487+#include <linux/fs.h>
73488+#include <linux/namei.h>
73489+#include <linux/mount.h>
73490+#include <linux/tty.h>
73491+#include <linux/proc_fs.h>
73492+#include <linux/lglock.h>
73493+#include <linux/slab.h>
73494+#include <linux/vmalloc.h>
73495+#include <linux/types.h>
73496+#include <linux/sysctl.h>
73497+#include <linux/netdevice.h>
73498+#include <linux/ptrace.h>
73499+#include <linux/gracl.h>
73500+#include <linux/gralloc.h>
73501+#include <linux/security.h>
73502+#include <linux/grinternal.h>
73503+#include <linux/pid_namespace.h>
73504+#include <linux/stop_machine.h>
73505+#include <linux/fdtable.h>
73506+#include <linux/percpu.h>
73507+#include <linux/lglock.h>
73508+#include <linux/hugetlb.h>
73509+#include <linux/posix-timers.h>
73510+#include "../fs/mount.h"
73511+
73512+#include <asm/uaccess.h>
73513+#include <asm/errno.h>
73514+#include <asm/mman.h>
73515+
73516+extern struct gr_policy_state *polstate;
73517+
73518+#define FOR_EACH_ROLE_START(role) \
73519+ role = polstate->role_list; \
73520+ while (role) {
73521+
73522+#define FOR_EACH_ROLE_END(role) \
73523+ role = role->prev; \
73524+ }
73525+
73526+struct path gr_real_root;
73527+
73528+extern struct gr_alloc_state *current_alloc_state;
73529+
73530+u16 acl_sp_role_value;
73531+
73532+static DEFINE_MUTEX(gr_dev_mutex);
73533+
73534+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
73535+extern void gr_clear_learn_entries(void);
73536+
73537+struct gr_arg *gr_usermode __read_only;
73538+unsigned char *gr_system_salt __read_only;
73539+unsigned char *gr_system_sum __read_only;
73540+
73541+static unsigned int gr_auth_attempts = 0;
73542+static unsigned long gr_auth_expires = 0UL;
73543+
73544+struct acl_object_label *fakefs_obj_rw;
73545+struct acl_object_label *fakefs_obj_rwx;
73546+
73547+extern int gr_init_uidset(void);
73548+extern void gr_free_uidset(void);
73549+extern void gr_remove_uid(uid_t uid);
73550+extern int gr_find_uid(uid_t uid);
73551+
73552+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
73553+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
73554+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
73555+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
73556+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
73557+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
73558+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
73559+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
73560+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
73561+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73562+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73563+extern void assign_special_role(const char *rolename);
73564+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
73565+extern int gr_rbac_disable(void *unused);
73566+extern void gr_enable_rbac_system(void);
73567+
73568+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
73569+{
73570+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
73571+ return -EFAULT;
73572+
73573+ return 0;
73574+}
73575+
73576+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73577+{
73578+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
73579+ return -EFAULT;
73580+
73581+ return 0;
73582+}
73583+
73584+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73585+{
73586+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
73587+ return -EFAULT;
73588+
73589+ return 0;
73590+}
73591+
73592+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
73593+{
73594+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
73595+ return -EFAULT;
73596+
73597+ return 0;
73598+}
73599+
73600+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73601+{
73602+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
73603+ return -EFAULT;
73604+
73605+ return 0;
73606+}
73607+
73608+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73609+{
73610+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
73611+ return -EFAULT;
73612+
73613+ return 0;
73614+}
73615+
73616+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73617+{
73618+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
73619+ return -EFAULT;
73620+
73621+ return 0;
73622+}
73623+
73624+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
73625+{
73626+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
73627+ return -EFAULT;
73628+
73629+ return 0;
73630+}
73631+
73632+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
73633+{
73634+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
73635+ return -EFAULT;
73636+
73637+ return 0;
73638+}
73639+
73640+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
73641+{
73642+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
73643+ return -EFAULT;
73644+
73645+ if ((uwrap->version != GRSECURITY_VERSION) ||
73646+ (uwrap->size != sizeof(struct gr_arg)))
73647+ return -EINVAL;
73648+
73649+ return 0;
73650+}
73651+
73652+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
73653+{
73654+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
73655+ return -EFAULT;
73656+
73657+ return 0;
73658+}
73659+
73660+static size_t get_gr_arg_wrapper_size_normal(void)
73661+{
73662+ return sizeof(struct gr_arg_wrapper);
73663+}
73664+
73665+#ifdef CONFIG_COMPAT
73666+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
73667+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
73668+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
73669+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
73670+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
73671+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
73672+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
73673+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
73674+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
73675+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
73676+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
73677+extern size_t get_gr_arg_wrapper_size_compat(void);
73678+
73679+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
73680+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
73681+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
73682+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
73683+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
73684+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
73685+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
73686+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
73687+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
73688+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
73689+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
73690+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
73691+
73692+#else
73693+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
73694+#define copy_gr_arg copy_gr_arg_normal
73695+#define copy_gr_hash_struct copy_gr_hash_struct_normal
73696+#define copy_acl_object_label copy_acl_object_label_normal
73697+#define copy_acl_subject_label copy_acl_subject_label_normal
73698+#define copy_acl_role_label copy_acl_role_label_normal
73699+#define copy_acl_ip_label copy_acl_ip_label_normal
73700+#define copy_pointer_from_array copy_pointer_from_array_normal
73701+#define copy_sprole_pw copy_sprole_pw_normal
73702+#define copy_role_transition copy_role_transition_normal
73703+#define copy_role_allowed_ip copy_role_allowed_ip_normal
73704+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
73705+#endif
73706+
73707+static struct acl_subject_label *
73708+lookup_subject_map(const struct acl_subject_label *userp)
73709+{
73710+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
73711+ struct subject_map *match;
73712+
73713+ match = polstate->subj_map_set.s_hash[index];
73714+
73715+ while (match && match->user != userp)
73716+ match = match->next;
73717+
73718+ if (match != NULL)
73719+ return match->kernel;
73720+ else
73721+ return NULL;
73722+}
73723+
73724+static void
73725+insert_subj_map_entry(struct subject_map *subjmap)
73726+{
73727+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
73728+ struct subject_map **curr;
73729+
73730+ subjmap->prev = NULL;
73731+
73732+ curr = &polstate->subj_map_set.s_hash[index];
73733+ if (*curr != NULL)
73734+ (*curr)->prev = subjmap;
73735+
73736+ subjmap->next = *curr;
73737+ *curr = subjmap;
73738+
73739+ return;
73740+}
73741+
73742+static void
73743+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
73744+{
73745+ unsigned int index =
73746+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
73747+ struct acl_role_label **curr;
73748+ struct acl_role_label *tmp, *tmp2;
73749+
73750+ curr = &polstate->acl_role_set.r_hash[index];
73751+
73752+ /* simple case, slot is empty, just set it to our role */
73753+ if (*curr == NULL) {
73754+ *curr = role;
73755+ } else {
73756+ /* example:
73757+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
73758+ 2 -> 3
73759+ */
73760+ /* first check to see if we can already be reached via this slot */
73761+ tmp = *curr;
73762+ while (tmp && tmp != role)
73763+ tmp = tmp->next;
73764+ if (tmp == role) {
73765+ /* we don't need to add ourselves to this slot's chain */
73766+ return;
73767+ }
73768+ /* we need to add ourselves to this chain, two cases */
73769+ if (role->next == NULL) {
73770+ /* simple case, append the current chain to our role */
73771+ role->next = *curr;
73772+ *curr = role;
73773+ } else {
73774+ /* 1 -> 2 -> 3 -> 4
73775+ 2 -> 3 -> 4
73776+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
73777+ */
73778+ /* trickier case: walk our role's chain until we find
73779+ the role for the start of the current slot's chain */
73780+ tmp = role;
73781+ tmp2 = *curr;
73782+ while (tmp->next && tmp->next != tmp2)
73783+ tmp = tmp->next;
73784+ if (tmp->next == tmp2) {
73785+ /* from example above, we found 3, so just
73786+ replace this slot's chain with ours */
73787+ *curr = role;
73788+ } else {
73789+ /* we didn't find a subset of our role's chain
73790+ in the current slot's chain, so append their
73791+ chain to ours, and set us as the first role in
73792+ the slot's chain
73793+
73794+ we could fold this case with the case above,
73795+ but making it explicit for clarity
73796+ */
73797+ tmp->next = tmp2;
73798+ *curr = role;
73799+ }
73800+ }
73801+ }
73802+
73803+ return;
73804+}
73805+
73806+static void
73807+insert_acl_role_label(struct acl_role_label *role)
73808+{
73809+ int i;
73810+
73811+ if (polstate->role_list == NULL) {
73812+ polstate->role_list = role;
73813+ role->prev = NULL;
73814+ } else {
73815+ role->prev = polstate->role_list;
73816+ polstate->role_list = role;
73817+ }
73818+
73819+ /* used for hash chains */
73820+ role->next = NULL;
73821+
73822+ if (role->roletype & GR_ROLE_DOMAIN) {
73823+ for (i = 0; i < role->domain_child_num; i++)
73824+ __insert_acl_role_label(role, role->domain_children[i]);
73825+ } else
73826+ __insert_acl_role_label(role, role->uidgid);
73827+}
73828+
73829+static int
73830+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
73831+{
73832+ struct name_entry **curr, *nentry;
73833+ struct inodev_entry *ientry;
73834+ unsigned int len = strlen(name);
73835+ unsigned int key = full_name_hash(name, len);
73836+ unsigned int index = key % polstate->name_set.n_size;
73837+
73838+ curr = &polstate->name_set.n_hash[index];
73839+
73840+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
73841+ curr = &((*curr)->next);
73842+
73843+ if (*curr != NULL)
73844+ return 1;
73845+
73846+ nentry = acl_alloc(sizeof (struct name_entry));
73847+ if (nentry == NULL)
73848+ return 0;
73849+ ientry = acl_alloc(sizeof (struct inodev_entry));
73850+ if (ientry == NULL)
73851+ return 0;
73852+ ientry->nentry = nentry;
73853+
73854+ nentry->key = key;
73855+ nentry->name = name;
73856+ nentry->inode = inode;
73857+ nentry->device = device;
73858+ nentry->len = len;
73859+ nentry->deleted = deleted;
73860+
73861+ nentry->prev = NULL;
73862+ curr = &polstate->name_set.n_hash[index];
73863+ if (*curr != NULL)
73864+ (*curr)->prev = nentry;
73865+ nentry->next = *curr;
73866+ *curr = nentry;
73867+
73868+ /* insert us into the table searchable by inode/dev */
73869+ __insert_inodev_entry(polstate, ientry);
73870+
73871+ return 1;
73872+}
73873+
73874+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
73875+
73876+static void *
73877+create_table(__u32 * len, int elementsize)
73878+{
73879+ unsigned int table_sizes[] = {
73880+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
73881+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
73882+ 4194301, 8388593, 16777213, 33554393, 67108859
73883+ };
73884+ void *newtable = NULL;
73885+ unsigned int pwr = 0;
73886+
73887+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
73888+ table_sizes[pwr] <= *len)
73889+ pwr++;
73890+
73891+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
73892+ return newtable;
73893+
73894+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
73895+ newtable =
73896+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
73897+ else
73898+ newtable = vmalloc(table_sizes[pwr] * elementsize);
73899+
73900+ *len = table_sizes[pwr];
73901+
73902+ return newtable;
73903+}
73904+
73905+static int
73906+init_variables(const struct gr_arg *arg, bool reload)
73907+{
73908+ struct task_struct *reaper = init_pid_ns.child_reaper;
73909+ unsigned int stacksize;
73910+
73911+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
73912+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
73913+ polstate->name_set.n_size = arg->role_db.num_objects;
73914+ polstate->inodev_set.i_size = arg->role_db.num_objects;
73915+
73916+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
73917+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
73918+ return 1;
73919+
73920+ if (!reload) {
73921+ if (!gr_init_uidset())
73922+ return 1;
73923+ }
73924+
73925+ /* set up the stack that holds allocation info */
73926+
73927+ stacksize = arg->role_db.num_pointers + 5;
73928+
73929+ if (!acl_alloc_stack_init(stacksize))
73930+ return 1;
73931+
73932+ if (!reload) {
73933+ /* grab reference for the real root dentry and vfsmount */
73934+ get_fs_root(reaper->fs, &gr_real_root);
73935+
73936+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
73937+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
73938+#endif
73939+
73940+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
73941+ if (fakefs_obj_rw == NULL)
73942+ return 1;
73943+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
73944+
73945+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
73946+ if (fakefs_obj_rwx == NULL)
73947+ return 1;
73948+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
73949+ }
73950+
73951+ polstate->subj_map_set.s_hash =
73952+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
73953+ polstate->acl_role_set.r_hash =
73954+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
73955+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
73956+ polstate->inodev_set.i_hash =
73957+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
73958+
73959+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
73960+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
73961+ return 1;
73962+
73963+ memset(polstate->subj_map_set.s_hash, 0,
73964+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
73965+ memset(polstate->acl_role_set.r_hash, 0,
73966+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
73967+ memset(polstate->name_set.n_hash, 0,
73968+ sizeof (struct name_entry *) * polstate->name_set.n_size);
73969+ memset(polstate->inodev_set.i_hash, 0,
73970+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
73971+
73972+ return 0;
73973+}
73974+
73975+/* free information not needed after startup
73976+ currently contains user->kernel pointer mappings for subjects
73977+*/
73978+
73979+static void
73980+free_init_variables(void)
73981+{
73982+ __u32 i;
73983+
73984+ if (polstate->subj_map_set.s_hash) {
73985+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
73986+ if (polstate->subj_map_set.s_hash[i]) {
73987+ kfree(polstate->subj_map_set.s_hash[i]);
73988+ polstate->subj_map_set.s_hash[i] = NULL;
73989+ }
73990+ }
73991+
73992+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
73993+ PAGE_SIZE)
73994+ kfree(polstate->subj_map_set.s_hash);
73995+ else
73996+ vfree(polstate->subj_map_set.s_hash);
73997+ }
73998+
73999+ return;
74000+}
74001+
74002+static void
74003+free_variables(bool reload)
74004+{
74005+ struct acl_subject_label *s;
74006+ struct acl_role_label *r;
74007+ struct task_struct *task, *task2;
74008+ unsigned int x;
74009+
74010+ if (!reload) {
74011+ gr_clear_learn_entries();
74012+
74013+ read_lock(&tasklist_lock);
74014+ do_each_thread(task2, task) {
74015+ task->acl_sp_role = 0;
74016+ task->acl_role_id = 0;
74017+ task->inherited = 0;
74018+ task->acl = NULL;
74019+ task->role = NULL;
74020+ } while_each_thread(task2, task);
74021+ read_unlock(&tasklist_lock);
74022+
74023+ kfree(fakefs_obj_rw);
74024+ fakefs_obj_rw = NULL;
74025+ kfree(fakefs_obj_rwx);
74026+ fakefs_obj_rwx = NULL;
74027+
74028+ /* release the reference to the real root dentry and vfsmount */
74029+ path_put(&gr_real_root);
74030+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74031+ }
74032+
74033+ /* free all object hash tables */
74034+
74035+ FOR_EACH_ROLE_START(r)
74036+ if (r->subj_hash == NULL)
74037+ goto next_role;
74038+ FOR_EACH_SUBJECT_START(r, s, x)
74039+ if (s->obj_hash == NULL)
74040+ break;
74041+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74042+ kfree(s->obj_hash);
74043+ else
74044+ vfree(s->obj_hash);
74045+ FOR_EACH_SUBJECT_END(s, x)
74046+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74047+ if (s->obj_hash == NULL)
74048+ break;
74049+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74050+ kfree(s->obj_hash);
74051+ else
74052+ vfree(s->obj_hash);
74053+ FOR_EACH_NESTED_SUBJECT_END(s)
74054+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74055+ kfree(r->subj_hash);
74056+ else
74057+ vfree(r->subj_hash);
74058+ r->subj_hash = NULL;
74059+next_role:
74060+ FOR_EACH_ROLE_END(r)
74061+
74062+ acl_free_all();
74063+
74064+ if (polstate->acl_role_set.r_hash) {
74065+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74066+ PAGE_SIZE)
74067+ kfree(polstate->acl_role_set.r_hash);
74068+ else
74069+ vfree(polstate->acl_role_set.r_hash);
74070+ }
74071+ if (polstate->name_set.n_hash) {
74072+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74073+ PAGE_SIZE)
74074+ kfree(polstate->name_set.n_hash);
74075+ else
74076+ vfree(polstate->name_set.n_hash);
74077+ }
74078+
74079+ if (polstate->inodev_set.i_hash) {
74080+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74081+ PAGE_SIZE)
74082+ kfree(polstate->inodev_set.i_hash);
74083+ else
74084+ vfree(polstate->inodev_set.i_hash);
74085+ }
74086+
74087+ if (!reload)
74088+ gr_free_uidset();
74089+
74090+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74091+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74092+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74093+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74094+
74095+ polstate->default_role = NULL;
74096+ polstate->kernel_role = NULL;
74097+ polstate->role_list = NULL;
74098+
74099+ return;
74100+}
74101+
74102+static struct acl_subject_label *
74103+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74104+
74105+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74106+{
74107+ unsigned int len = strnlen_user(*name, maxlen);
74108+ char *tmp;
74109+
74110+ if (!len || len >= maxlen)
74111+ return -EINVAL;
74112+
74113+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74114+ return -ENOMEM;
74115+
74116+ if (copy_from_user(tmp, *name, len))
74117+ return -EFAULT;
74118+
74119+ tmp[len-1] = '\0';
74120+ *name = tmp;
74121+
74122+ return 0;
74123+}
74124+
74125+static int
74126+copy_user_glob(struct acl_object_label *obj)
74127+{
74128+ struct acl_object_label *g_tmp, **guser;
74129+ int error;
74130+
74131+ if (obj->globbed == NULL)
74132+ return 0;
74133+
74134+ guser = &obj->globbed;
74135+ while (*guser) {
74136+ g_tmp = (struct acl_object_label *)
74137+ acl_alloc(sizeof (struct acl_object_label));
74138+ if (g_tmp == NULL)
74139+ return -ENOMEM;
74140+
74141+ if (copy_acl_object_label(g_tmp, *guser))
74142+ return -EFAULT;
74143+
74144+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74145+ if (error)
74146+ return error;
74147+
74148+ *guser = g_tmp;
74149+ guser = &(g_tmp->next);
74150+ }
74151+
74152+ return 0;
74153+}
74154+
74155+static int
74156+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74157+ struct acl_role_label *role)
74158+{
74159+ struct acl_object_label *o_tmp;
74160+ int ret;
74161+
74162+ while (userp) {
74163+ if ((o_tmp = (struct acl_object_label *)
74164+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74165+ return -ENOMEM;
74166+
74167+ if (copy_acl_object_label(o_tmp, userp))
74168+ return -EFAULT;
74169+
74170+ userp = o_tmp->prev;
74171+
74172+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74173+ if (ret)
74174+ return ret;
74175+
74176+ insert_acl_obj_label(o_tmp, subj);
74177+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74178+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74179+ return -ENOMEM;
74180+
74181+ ret = copy_user_glob(o_tmp);
74182+ if (ret)
74183+ return ret;
74184+
74185+ if (o_tmp->nested) {
74186+ int already_copied;
74187+
74188+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74189+ if (IS_ERR(o_tmp->nested))
74190+ return PTR_ERR(o_tmp->nested);
74191+
74192+ /* insert into nested subject list if we haven't copied this one yet
74193+ to prevent duplicate entries */
74194+ if (!already_copied) {
74195+ o_tmp->nested->next = role->hash->first;
74196+ role->hash->first = o_tmp->nested;
74197+ }
74198+ }
74199+ }
74200+
74201+ return 0;
74202+}
74203+
74204+static __u32
74205+count_user_subjs(struct acl_subject_label *userp)
74206+{
74207+ struct acl_subject_label s_tmp;
74208+ __u32 num = 0;
74209+
74210+ while (userp) {
74211+ if (copy_acl_subject_label(&s_tmp, userp))
74212+ break;
74213+
74214+ userp = s_tmp.prev;
74215+ }
74216+
74217+ return num;
74218+}
74219+
74220+static int
74221+copy_user_allowedips(struct acl_role_label *rolep)
74222+{
74223+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74224+
74225+ ruserip = rolep->allowed_ips;
74226+
74227+ while (ruserip) {
74228+ rlast = rtmp;
74229+
74230+ if ((rtmp = (struct role_allowed_ip *)
74231+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74232+ return -ENOMEM;
74233+
74234+ if (copy_role_allowed_ip(rtmp, ruserip))
74235+ return -EFAULT;
74236+
74237+ ruserip = rtmp->prev;
74238+
74239+ if (!rlast) {
74240+ rtmp->prev = NULL;
74241+ rolep->allowed_ips = rtmp;
74242+ } else {
74243+ rlast->next = rtmp;
74244+ rtmp->prev = rlast;
74245+ }
74246+
74247+ if (!ruserip)
74248+ rtmp->next = NULL;
74249+ }
74250+
74251+ return 0;
74252+}
74253+
74254+static int
74255+copy_user_transitions(struct acl_role_label *rolep)
74256+{
74257+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74258+ int error;
74259+
74260+ rusertp = rolep->transitions;
74261+
74262+ while (rusertp) {
74263+ rlast = rtmp;
74264+
74265+ if ((rtmp = (struct role_transition *)
74266+ acl_alloc(sizeof (struct role_transition))) == NULL)
74267+ return -ENOMEM;
74268+
74269+ if (copy_role_transition(rtmp, rusertp))
74270+ return -EFAULT;
74271+
74272+ rusertp = rtmp->prev;
74273+
74274+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74275+ if (error)
74276+ return error;
74277+
74278+ if (!rlast) {
74279+ rtmp->prev = NULL;
74280+ rolep->transitions = rtmp;
74281+ } else {
74282+ rlast->next = rtmp;
74283+ rtmp->prev = rlast;
74284+ }
74285+
74286+ if (!rusertp)
74287+ rtmp->next = NULL;
74288+ }
74289+
74290+ return 0;
74291+}
74292+
74293+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74294+{
74295+ struct acl_object_label o_tmp;
74296+ __u32 num = 0;
74297+
74298+ while (userp) {
74299+ if (copy_acl_object_label(&o_tmp, userp))
74300+ break;
74301+
74302+ userp = o_tmp.prev;
74303+ num++;
74304+ }
74305+
74306+ return num;
74307+}
74308+
74309+static struct acl_subject_label *
74310+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74311+{
74312+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74313+ __u32 num_objs;
74314+ struct acl_ip_label **i_tmp, *i_utmp2;
74315+ struct gr_hash_struct ghash;
74316+ struct subject_map *subjmap;
74317+ unsigned int i_num;
74318+ int err;
74319+
74320+ if (already_copied != NULL)
74321+ *already_copied = 0;
74322+
74323+ s_tmp = lookup_subject_map(userp);
74324+
74325+ /* we've already copied this subject into the kernel, just return
74326+ the reference to it, and don't copy it over again
74327+ */
74328+ if (s_tmp) {
74329+ if (already_copied != NULL)
74330+ *already_copied = 1;
74331+ return(s_tmp);
74332+ }
74333+
74334+ if ((s_tmp = (struct acl_subject_label *)
74335+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74336+ return ERR_PTR(-ENOMEM);
74337+
74338+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74339+ if (subjmap == NULL)
74340+ return ERR_PTR(-ENOMEM);
74341+
74342+ subjmap->user = userp;
74343+ subjmap->kernel = s_tmp;
74344+ insert_subj_map_entry(subjmap);
74345+
74346+ if (copy_acl_subject_label(s_tmp, userp))
74347+ return ERR_PTR(-EFAULT);
74348+
74349+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
74350+ if (err)
74351+ return ERR_PTR(err);
74352+
74353+ if (!strcmp(s_tmp->filename, "/"))
74354+ role->root_label = s_tmp;
74355+
74356+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
74357+ return ERR_PTR(-EFAULT);
74358+
74359+ /* copy user and group transition tables */
74360+
74361+ if (s_tmp->user_trans_num) {
74362+ uid_t *uidlist;
74363+
74364+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
74365+ if (uidlist == NULL)
74366+ return ERR_PTR(-ENOMEM);
74367+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
74368+ return ERR_PTR(-EFAULT);
74369+
74370+ s_tmp->user_transitions = uidlist;
74371+ }
74372+
74373+ if (s_tmp->group_trans_num) {
74374+ gid_t *gidlist;
74375+
74376+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
74377+ if (gidlist == NULL)
74378+ return ERR_PTR(-ENOMEM);
74379+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
74380+ return ERR_PTR(-EFAULT);
74381+
74382+ s_tmp->group_transitions = gidlist;
74383+ }
74384+
74385+ /* set up object hash table */
74386+ num_objs = count_user_objs(ghash.first);
74387+
74388+ s_tmp->obj_hash_size = num_objs;
74389+ s_tmp->obj_hash =
74390+ (struct acl_object_label **)
74391+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
74392+
74393+ if (!s_tmp->obj_hash)
74394+ return ERR_PTR(-ENOMEM);
74395+
74396+ memset(s_tmp->obj_hash, 0,
74397+ s_tmp->obj_hash_size *
74398+ sizeof (struct acl_object_label *));
74399+
74400+ /* add in objects */
74401+ err = copy_user_objs(ghash.first, s_tmp, role);
74402+
74403+ if (err)
74404+ return ERR_PTR(err);
74405+
74406+ /* set pointer for parent subject */
74407+ if (s_tmp->parent_subject) {
74408+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
74409+
74410+ if (IS_ERR(s_tmp2))
74411+ return s_tmp2;
74412+
74413+ s_tmp->parent_subject = s_tmp2;
74414+ }
74415+
74416+ /* add in ip acls */
74417+
74418+ if (!s_tmp->ip_num) {
74419+ s_tmp->ips = NULL;
74420+ goto insert;
74421+ }
74422+
74423+ i_tmp =
74424+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
74425+ sizeof (struct acl_ip_label *));
74426+
74427+ if (!i_tmp)
74428+ return ERR_PTR(-ENOMEM);
74429+
74430+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
74431+ *(i_tmp + i_num) =
74432+ (struct acl_ip_label *)
74433+ acl_alloc(sizeof (struct acl_ip_label));
74434+ if (!*(i_tmp + i_num))
74435+ return ERR_PTR(-ENOMEM);
74436+
74437+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
74438+ return ERR_PTR(-EFAULT);
74439+
74440+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
74441+ return ERR_PTR(-EFAULT);
74442+
74443+ if ((*(i_tmp + i_num))->iface == NULL)
74444+ continue;
74445+
74446+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
74447+ if (err)
74448+ return ERR_PTR(err);
74449+ }
74450+
74451+ s_tmp->ips = i_tmp;
74452+
74453+insert:
74454+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
74455+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
74456+ return ERR_PTR(-ENOMEM);
74457+
74458+ return s_tmp;
74459+}
74460+
74461+static int
74462+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
74463+{
74464+ struct acl_subject_label s_pre;
74465+ struct acl_subject_label * ret;
74466+ int err;
74467+
74468+ while (userp) {
74469+ if (copy_acl_subject_label(&s_pre, userp))
74470+ return -EFAULT;
74471+
74472+ ret = do_copy_user_subj(userp, role, NULL);
74473+
74474+ err = PTR_ERR(ret);
74475+ if (IS_ERR(ret))
74476+ return err;
74477+
74478+ insert_acl_subj_label(ret, role);
74479+
74480+ userp = s_pre.prev;
74481+ }
74482+
74483+ return 0;
74484+}
74485+
74486+static int
74487+copy_user_acl(struct gr_arg *arg)
74488+{
74489+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
74490+ struct acl_subject_label *subj_list;
74491+ struct sprole_pw *sptmp;
74492+ struct gr_hash_struct *ghash;
74493+ uid_t *domainlist;
74494+ unsigned int r_num;
74495+ int err = 0;
74496+ __u16 i;
74497+ __u32 num_subjs;
74498+
74499+ /* we need a default and kernel role */
74500+ if (arg->role_db.num_roles < 2)
74501+ return -EINVAL;
74502+
74503+ /* copy special role authentication info from userspace */
74504+
74505+ polstate->num_sprole_pws = arg->num_sprole_pws;
74506+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
74507+
74508+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
74509+ return -ENOMEM;
74510+
74511+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74512+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
74513+ if (!sptmp)
74514+ return -ENOMEM;
74515+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
74516+ return -EFAULT;
74517+
74518+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
74519+ if (err)
74520+ return err;
74521+
74522+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74523+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
74524+#endif
74525+
74526+ polstate->acl_special_roles[i] = sptmp;
74527+ }
74528+
74529+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
74530+
74531+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
74532+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
74533+
74534+ if (!r_tmp)
74535+ return -ENOMEM;
74536+
74537+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
74538+ return -EFAULT;
74539+
74540+ if (copy_acl_role_label(r_tmp, r_utmp2))
74541+ return -EFAULT;
74542+
74543+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
74544+ if (err)
74545+ return err;
74546+
74547+ if (!strcmp(r_tmp->rolename, "default")
74548+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
74549+ polstate->default_role = r_tmp;
74550+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
74551+ polstate->kernel_role = r_tmp;
74552+ }
74553+
74554+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
74555+ return -ENOMEM;
74556+
74557+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
74558+ return -EFAULT;
74559+
74560+ r_tmp->hash = ghash;
74561+
74562+ num_subjs = count_user_subjs(r_tmp->hash->first);
74563+
74564+ r_tmp->subj_hash_size = num_subjs;
74565+ r_tmp->subj_hash =
74566+ (struct acl_subject_label **)
74567+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
74568+
74569+ if (!r_tmp->subj_hash)
74570+ return -ENOMEM;
74571+
74572+ err = copy_user_allowedips(r_tmp);
74573+ if (err)
74574+ return err;
74575+
74576+ /* copy domain info */
74577+ if (r_tmp->domain_children != NULL) {
74578+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
74579+ if (domainlist == NULL)
74580+ return -ENOMEM;
74581+
74582+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
74583+ return -EFAULT;
74584+
74585+ r_tmp->domain_children = domainlist;
74586+ }
74587+
74588+ err = copy_user_transitions(r_tmp);
74589+ if (err)
74590+ return err;
74591+
74592+ memset(r_tmp->subj_hash, 0,
74593+ r_tmp->subj_hash_size *
74594+ sizeof (struct acl_subject_label *));
74595+
74596+ /* acquire the list of subjects, then NULL out
74597+ the list prior to parsing the subjects for this role,
74598+ as during this parsing the list is replaced with a list
74599+ of *nested* subjects for the role
74600+ */
74601+ subj_list = r_tmp->hash->first;
74602+
74603+ /* set nested subject list to null */
74604+ r_tmp->hash->first = NULL;
74605+
74606+ err = copy_user_subjs(subj_list, r_tmp);
74607+
74608+ if (err)
74609+ return err;
74610+
74611+ insert_acl_role_label(r_tmp);
74612+ }
74613+
74614+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
74615+ return -EINVAL;
74616+
74617+ return err;
74618+}
74619+
74620+static int gracl_reload_apply_policies(void *reload)
74621+{
74622+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
74623+ struct task_struct *task, *task2;
74624+ struct acl_role_label *role, *rtmp;
74625+ struct acl_subject_label *subj;
74626+ const struct cred *cred;
74627+ int role_applied;
74628+ int ret = 0;
74629+
74630+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
74631+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
74632+
74633+ /* first make sure we'll be able to apply the new policy cleanly */
74634+ do_each_thread(task2, task) {
74635+ if (task->exec_file == NULL)
74636+ continue;
74637+ role_applied = 0;
74638+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74639+ /* preserve special roles */
74640+ FOR_EACH_ROLE_START(role)
74641+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74642+ rtmp = task->role;
74643+ task->role = role;
74644+ role_applied = 1;
74645+ break;
74646+ }
74647+ FOR_EACH_ROLE_END(role)
74648+ }
74649+ if (!role_applied) {
74650+ cred = __task_cred(task);
74651+ rtmp = task->role;
74652+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74653+ }
74654+ /* this handles non-nested inherited subjects, nested subjects will still
74655+ be dropped currently */
74656+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74657+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
74658+ /* change the role back so that we've made no modifications to the policy */
74659+ task->role = rtmp;
74660+
74661+ if (subj == NULL || task->tmpacl == NULL) {
74662+ ret = -EINVAL;
74663+ goto out;
74664+ }
74665+ } while_each_thread(task2, task);
74666+
74667+ /* now actually apply the policy */
74668+
74669+ do_each_thread(task2, task) {
74670+ if (task->exec_file) {
74671+ role_applied = 0;
74672+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74673+ /* preserve special roles */
74674+ FOR_EACH_ROLE_START(role)
74675+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74676+ task->role = role;
74677+ role_applied = 1;
74678+ break;
74679+ }
74680+ FOR_EACH_ROLE_END(role)
74681+ }
74682+ if (!role_applied) {
74683+ cred = __task_cred(task);
74684+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74685+ }
74686+ /* this handles non-nested inherited subjects, nested subjects will still
74687+ be dropped currently */
74688+ if (!reload_state->oldmode && task->inherited)
74689+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74690+ else {
74691+ /* looked up and tagged to the task previously */
74692+ subj = task->tmpacl;
74693+ }
74694+ /* subj will be non-null */
74695+ __gr_apply_subject_to_task(polstate, task, subj);
74696+ if (reload_state->oldmode) {
74697+ task->acl_role_id = 0;
74698+ task->acl_sp_role = 0;
74699+ task->inherited = 0;
74700+ }
74701+ } else {
74702+ // it's a kernel process
74703+ task->role = polstate->kernel_role;
74704+ task->acl = polstate->kernel_role->root_label;
74705+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
74706+ task->acl->mode &= ~GR_PROCFIND;
74707+#endif
74708+ }
74709+ } while_each_thread(task2, task);
74710+
74711+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
74712+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
74713+
74714+out:
74715+
74716+ return ret;
74717+}
74718+
74719+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
74720+{
74721+ struct gr_reload_state new_reload_state = { };
74722+ int err;
74723+
74724+ new_reload_state.oldpolicy_ptr = polstate;
74725+ new_reload_state.oldalloc_ptr = current_alloc_state;
74726+ new_reload_state.oldmode = oldmode;
74727+
74728+ current_alloc_state = &new_reload_state.newalloc;
74729+ polstate = &new_reload_state.newpolicy;
74730+
74731+ /* everything relevant is now saved off, copy in the new policy */
74732+ if (init_variables(args, true)) {
74733+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74734+ err = -ENOMEM;
74735+ goto error;
74736+ }
74737+
74738+ err = copy_user_acl(args);
74739+ free_init_variables();
74740+ if (err)
74741+ goto error;
74742+ /* the new policy is copied in, with the old policy available via saved_state
74743+ first go through applying roles, making sure to preserve special roles
74744+ then apply new subjects, making sure to preserve inherited and nested subjects,
74745+ though currently only inherited subjects will be preserved
74746+ */
74747+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
74748+ if (err)
74749+ goto error;
74750+
74751+ /* we've now applied the new policy, so restore the old policy state to free it */
74752+ polstate = &new_reload_state.oldpolicy;
74753+ current_alloc_state = &new_reload_state.oldalloc;
74754+ free_variables(true);
74755+
74756+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
74757+ to running_polstate/current_alloc_state inside stop_machine
74758+ */
74759+ err = 0;
74760+ goto out;
74761+error:
74762+ /* on error of loading the new policy, we'll just keep the previous
74763+ policy set around
74764+ */
74765+ free_variables(true);
74766+
74767+ /* doesn't affect runtime, but maintains consistent state */
74768+out:
74769+ polstate = new_reload_state.oldpolicy_ptr;
74770+ current_alloc_state = new_reload_state.oldalloc_ptr;
74771+
74772+ return err;
74773+}
74774+
74775+static int
74776+gracl_init(struct gr_arg *args)
74777+{
74778+ int error = 0;
74779+
74780+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
74781+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
74782+
74783+ if (init_variables(args, false)) {
74784+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74785+ error = -ENOMEM;
74786+ goto out;
74787+ }
74788+
74789+ error = copy_user_acl(args);
74790+ free_init_variables();
74791+ if (error)
74792+ goto out;
74793+
74794+ error = gr_set_acls(0);
74795+ if (error)
74796+ goto out;
74797+
74798+ gr_enable_rbac_system();
74799+
74800+ return 0;
74801+
74802+out:
74803+ free_variables(false);
74804+ return error;
74805+}
74806+
74807+static int
74808+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
74809+ unsigned char **sum)
74810+{
74811+ struct acl_role_label *r;
74812+ struct role_allowed_ip *ipp;
74813+ struct role_transition *trans;
74814+ unsigned int i;
74815+ int found = 0;
74816+ u32 curr_ip = current->signal->curr_ip;
74817+
74818+ current->signal->saved_ip = curr_ip;
74819+
74820+ /* check transition table */
74821+
74822+ for (trans = current->role->transitions; trans; trans = trans->next) {
74823+ if (!strcmp(rolename, trans->rolename)) {
74824+ found = 1;
74825+ break;
74826+ }
74827+ }
74828+
74829+ if (!found)
74830+ return 0;
74831+
74832+ /* handle special roles that do not require authentication
74833+ and check ip */
74834+
74835+ FOR_EACH_ROLE_START(r)
74836+ if (!strcmp(rolename, r->rolename) &&
74837+ (r->roletype & GR_ROLE_SPECIAL)) {
74838+ found = 0;
74839+ if (r->allowed_ips != NULL) {
74840+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
74841+ if ((ntohl(curr_ip) & ipp->netmask) ==
74842+ (ntohl(ipp->addr) & ipp->netmask))
74843+ found = 1;
74844+ }
74845+ } else
74846+ found = 2;
74847+ if (!found)
74848+ return 0;
74849+
74850+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
74851+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
74852+ *salt = NULL;
74853+ *sum = NULL;
74854+ return 1;
74855+ }
74856+ }
74857+ FOR_EACH_ROLE_END(r)
74858+
74859+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74860+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
74861+ *salt = polstate->acl_special_roles[i]->salt;
74862+ *sum = polstate->acl_special_roles[i]->sum;
74863+ return 1;
74864+ }
74865+ }
74866+
74867+ return 0;
74868+}
74869+
74870+int gr_check_secure_terminal(struct task_struct *task)
74871+{
74872+ struct task_struct *p, *p2, *p3;
74873+ struct files_struct *files;
74874+ struct fdtable *fdt;
74875+ struct file *our_file = NULL, *file;
74876+ int i;
74877+
74878+ if (task->signal->tty == NULL)
74879+ return 1;
74880+
74881+ files = get_files_struct(task);
74882+ if (files != NULL) {
74883+ rcu_read_lock();
74884+ fdt = files_fdtable(files);
74885+ for (i=0; i < fdt->max_fds; i++) {
74886+ file = fcheck_files(files, i);
74887+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
74888+ get_file(file);
74889+ our_file = file;
74890+ }
74891+ }
74892+ rcu_read_unlock();
74893+ put_files_struct(files);
74894+ }
74895+
74896+ if (our_file == NULL)
74897+ return 1;
74898+
74899+ read_lock(&tasklist_lock);
74900+ do_each_thread(p2, p) {
74901+ files = get_files_struct(p);
74902+ if (files == NULL ||
74903+ (p->signal && p->signal->tty == task->signal->tty)) {
74904+ if (files != NULL)
74905+ put_files_struct(files);
74906+ continue;
74907+ }
74908+ rcu_read_lock();
74909+ fdt = files_fdtable(files);
74910+ for (i=0; i < fdt->max_fds; i++) {
74911+ file = fcheck_files(files, i);
74912+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
74913+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
74914+ p3 = task;
74915+ while (task_pid_nr(p3) > 0) {
74916+ if (p3 == p)
74917+ break;
74918+ p3 = p3->real_parent;
74919+ }
74920+ if (p3 == p)
74921+ break;
74922+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
74923+ gr_handle_alertkill(p);
74924+ rcu_read_unlock();
74925+ put_files_struct(files);
74926+ read_unlock(&tasklist_lock);
74927+ fput(our_file);
74928+ return 0;
74929+ }
74930+ }
74931+ rcu_read_unlock();
74932+ put_files_struct(files);
74933+ } while_each_thread(p2, p);
74934+ read_unlock(&tasklist_lock);
74935+
74936+ fput(our_file);
74937+ return 1;
74938+}
74939+
74940+ssize_t
74941+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
74942+{
74943+ struct gr_arg_wrapper uwrap;
74944+ unsigned char *sprole_salt = NULL;
74945+ unsigned char *sprole_sum = NULL;
74946+ int error = 0;
74947+ int error2 = 0;
74948+ size_t req_count = 0;
74949+ unsigned char oldmode = 0;
74950+
74951+ mutex_lock(&gr_dev_mutex);
74952+
74953+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
74954+ error = -EPERM;
74955+ goto out;
74956+ }
74957+
74958+#ifdef CONFIG_COMPAT
74959+ pax_open_kernel();
74960+ if (is_compat_task()) {
74961+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
74962+ copy_gr_arg = &copy_gr_arg_compat;
74963+ copy_acl_object_label = &copy_acl_object_label_compat;
74964+ copy_acl_subject_label = &copy_acl_subject_label_compat;
74965+ copy_acl_role_label = &copy_acl_role_label_compat;
74966+ copy_acl_ip_label = &copy_acl_ip_label_compat;
74967+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
74968+ copy_role_transition = &copy_role_transition_compat;
74969+ copy_sprole_pw = &copy_sprole_pw_compat;
74970+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
74971+ copy_pointer_from_array = &copy_pointer_from_array_compat;
74972+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
74973+ } else {
74974+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
74975+ copy_gr_arg = &copy_gr_arg_normal;
74976+ copy_acl_object_label = &copy_acl_object_label_normal;
74977+ copy_acl_subject_label = &copy_acl_subject_label_normal;
74978+ copy_acl_role_label = &copy_acl_role_label_normal;
74979+ copy_acl_ip_label = &copy_acl_ip_label_normal;
74980+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
74981+ copy_role_transition = &copy_role_transition_normal;
74982+ copy_sprole_pw = &copy_sprole_pw_normal;
74983+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
74984+ copy_pointer_from_array = &copy_pointer_from_array_normal;
74985+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
74986+ }
74987+ pax_close_kernel();
74988+#endif
74989+
74990+ req_count = get_gr_arg_wrapper_size();
74991+
74992+ if (count != req_count) {
74993+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
74994+ error = -EINVAL;
74995+ goto out;
74996+ }
74997+
74998+
74999+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
75000+ gr_auth_expires = 0;
75001+ gr_auth_attempts = 0;
75002+ }
75003+
75004+ error = copy_gr_arg_wrapper(buf, &uwrap);
75005+ if (error)
75006+ goto out;
75007+
75008+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75009+ if (error)
75010+ goto out;
75011+
75012+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75013+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75014+ time_after(gr_auth_expires, get_seconds())) {
75015+ error = -EBUSY;
75016+ goto out;
75017+ }
75018+
75019+ /* if non-root trying to do anything other than use a special role,
75020+ do not attempt authentication, do not count towards authentication
75021+ locking
75022+ */
75023+
75024+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75025+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75026+ gr_is_global_nonroot(current_uid())) {
75027+ error = -EPERM;
75028+ goto out;
75029+ }
75030+
75031+ /* ensure pw and special role name are null terminated */
75032+
75033+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75034+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75035+
75036+ /* Okay.
75037+ * We have our enough of the argument structure..(we have yet
75038+ * to copy_from_user the tables themselves) . Copy the tables
75039+ * only if we need them, i.e. for loading operations. */
75040+
75041+ switch (gr_usermode->mode) {
75042+ case GR_STATUS:
75043+ if (gr_acl_is_enabled()) {
75044+ error = 1;
75045+ if (!gr_check_secure_terminal(current))
75046+ error = 3;
75047+ } else
75048+ error = 2;
75049+ goto out;
75050+ case GR_SHUTDOWN:
75051+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75052+ stop_machine(gr_rbac_disable, NULL, NULL);
75053+ free_variables(false);
75054+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75055+ memset(gr_system_salt, 0, GR_SALT_LEN);
75056+ memset(gr_system_sum, 0, GR_SHA_LEN);
75057+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75058+ } else if (gr_acl_is_enabled()) {
75059+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75060+ error = -EPERM;
75061+ } else {
75062+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75063+ error = -EAGAIN;
75064+ }
75065+ break;
75066+ case GR_ENABLE:
75067+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75068+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75069+ else {
75070+ if (gr_acl_is_enabled())
75071+ error = -EAGAIN;
75072+ else
75073+ error = error2;
75074+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75075+ }
75076+ break;
75077+ case GR_OLDRELOAD:
75078+ oldmode = 1;
75079+ case GR_RELOAD:
75080+ if (!gr_acl_is_enabled()) {
75081+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75082+ error = -EAGAIN;
75083+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75084+ error2 = gracl_reload(gr_usermode, oldmode);
75085+ if (!error2)
75086+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75087+ else {
75088+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75089+ error = error2;
75090+ }
75091+ } else {
75092+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75093+ error = -EPERM;
75094+ }
75095+ break;
75096+ case GR_SEGVMOD:
75097+ if (unlikely(!gr_acl_is_enabled())) {
75098+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75099+ error = -EAGAIN;
75100+ break;
75101+ }
75102+
75103+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75104+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75105+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75106+ struct acl_subject_label *segvacl;
75107+ segvacl =
75108+ lookup_acl_subj_label(gr_usermode->segv_inode,
75109+ gr_usermode->segv_device,
75110+ current->role);
75111+ if (segvacl) {
75112+ segvacl->crashes = 0;
75113+ segvacl->expires = 0;
75114+ }
75115+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75116+ gr_remove_uid(gr_usermode->segv_uid);
75117+ }
75118+ } else {
75119+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75120+ error = -EPERM;
75121+ }
75122+ break;
75123+ case GR_SPROLE:
75124+ case GR_SPROLEPAM:
75125+ if (unlikely(!gr_acl_is_enabled())) {
75126+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75127+ error = -EAGAIN;
75128+ break;
75129+ }
75130+
75131+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75132+ current->role->expires = 0;
75133+ current->role->auth_attempts = 0;
75134+ }
75135+
75136+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75137+ time_after(current->role->expires, get_seconds())) {
75138+ error = -EBUSY;
75139+ goto out;
75140+ }
75141+
75142+ if (lookup_special_role_auth
75143+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75144+ && ((!sprole_salt && !sprole_sum)
75145+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75146+ char *p = "";
75147+ assign_special_role(gr_usermode->sp_role);
75148+ read_lock(&tasklist_lock);
75149+ if (current->real_parent)
75150+ p = current->real_parent->role->rolename;
75151+ read_unlock(&tasklist_lock);
75152+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75153+ p, acl_sp_role_value);
75154+ } else {
75155+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75156+ error = -EPERM;
75157+ if(!(current->role->auth_attempts++))
75158+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75159+
75160+ goto out;
75161+ }
75162+ break;
75163+ case GR_UNSPROLE:
75164+ if (unlikely(!gr_acl_is_enabled())) {
75165+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75166+ error = -EAGAIN;
75167+ break;
75168+ }
75169+
75170+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75171+ char *p = "";
75172+ int i = 0;
75173+
75174+ read_lock(&tasklist_lock);
75175+ if (current->real_parent) {
75176+ p = current->real_parent->role->rolename;
75177+ i = current->real_parent->acl_role_id;
75178+ }
75179+ read_unlock(&tasklist_lock);
75180+
75181+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75182+ gr_set_acls(1);
75183+ } else {
75184+ error = -EPERM;
75185+ goto out;
75186+ }
75187+ break;
75188+ default:
75189+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75190+ error = -EINVAL;
75191+ break;
75192+ }
75193+
75194+ if (error != -EPERM)
75195+ goto out;
75196+
75197+ if(!(gr_auth_attempts++))
75198+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75199+
75200+ out:
75201+ mutex_unlock(&gr_dev_mutex);
75202+
75203+ if (!error)
75204+ error = req_count;
75205+
75206+ return error;
75207+}
75208+
75209+int
75210+gr_set_acls(const int type)
75211+{
75212+ struct task_struct *task, *task2;
75213+ struct acl_role_label *role = current->role;
75214+ struct acl_subject_label *subj;
75215+ __u16 acl_role_id = current->acl_role_id;
75216+ const struct cred *cred;
75217+ int ret;
75218+
75219+ rcu_read_lock();
75220+ read_lock(&tasklist_lock);
75221+ read_lock(&grsec_exec_file_lock);
75222+ do_each_thread(task2, task) {
75223+ /* check to see if we're called from the exit handler,
75224+ if so, only replace ACLs that have inherited the admin
75225+ ACL */
75226+
75227+ if (type && (task->role != role ||
75228+ task->acl_role_id != acl_role_id))
75229+ continue;
75230+
75231+ task->acl_role_id = 0;
75232+ task->acl_sp_role = 0;
75233+ task->inherited = 0;
75234+
75235+ if (task->exec_file) {
75236+ cred = __task_cred(task);
75237+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75238+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
75239+ if (subj == NULL) {
75240+ ret = -EINVAL;
75241+ read_unlock(&grsec_exec_file_lock);
75242+ read_unlock(&tasklist_lock);
75243+ rcu_read_unlock();
75244+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75245+ return ret;
75246+ }
75247+ __gr_apply_subject_to_task(polstate, task, subj);
75248+ } else {
75249+ // it's a kernel process
75250+ task->role = polstate->kernel_role;
75251+ task->acl = polstate->kernel_role->root_label;
75252+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75253+ task->acl->mode &= ~GR_PROCFIND;
75254+#endif
75255+ }
75256+ } while_each_thread(task2, task);
75257+ read_unlock(&grsec_exec_file_lock);
75258+ read_unlock(&tasklist_lock);
75259+ rcu_read_unlock();
75260+
75261+ return 0;
75262+}
75263diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75264new file mode 100644
75265index 0000000..39645c9
75266--- /dev/null
75267+++ b/grsecurity/gracl_res.c
75268@@ -0,0 +1,68 @@
75269+#include <linux/kernel.h>
75270+#include <linux/sched.h>
75271+#include <linux/gracl.h>
75272+#include <linux/grinternal.h>
75273+
75274+static const char *restab_log[] = {
75275+ [RLIMIT_CPU] = "RLIMIT_CPU",
75276+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75277+ [RLIMIT_DATA] = "RLIMIT_DATA",
75278+ [RLIMIT_STACK] = "RLIMIT_STACK",
75279+ [RLIMIT_CORE] = "RLIMIT_CORE",
75280+ [RLIMIT_RSS] = "RLIMIT_RSS",
75281+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75282+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75283+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75284+ [RLIMIT_AS] = "RLIMIT_AS",
75285+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75286+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75287+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75288+ [RLIMIT_NICE] = "RLIMIT_NICE",
75289+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75290+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75291+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75292+};
75293+
75294+void
75295+gr_log_resource(const struct task_struct *task,
75296+ const int res, const unsigned long wanted, const int gt)
75297+{
75298+ const struct cred *cred;
75299+ unsigned long rlim;
75300+
75301+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75302+ return;
75303+
75304+ // not yet supported resource
75305+ if (unlikely(!restab_log[res]))
75306+ return;
75307+
75308+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75309+ rlim = task_rlimit_max(task, res);
75310+ else
75311+ rlim = task_rlimit(task, res);
75312+
75313+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75314+ return;
75315+
75316+ rcu_read_lock();
75317+ cred = __task_cred(task);
75318+
75319+ if (res == RLIMIT_NPROC &&
75320+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75321+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75322+ goto out_rcu_unlock;
75323+ else if (res == RLIMIT_MEMLOCK &&
75324+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75325+ goto out_rcu_unlock;
75326+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75327+ goto out_rcu_unlock;
75328+ rcu_read_unlock();
75329+
75330+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75331+
75332+ return;
75333+out_rcu_unlock:
75334+ rcu_read_unlock();
75335+ return;
75336+}
75337diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75338new file mode 100644
75339index 0000000..218b66b
75340--- /dev/null
75341+++ b/grsecurity/gracl_segv.c
75342@@ -0,0 +1,324 @@
75343+#include <linux/kernel.h>
75344+#include <linux/mm.h>
75345+#include <asm/uaccess.h>
75346+#include <asm/errno.h>
75347+#include <asm/mman.h>
75348+#include <net/sock.h>
75349+#include <linux/file.h>
75350+#include <linux/fs.h>
75351+#include <linux/net.h>
75352+#include <linux/in.h>
75353+#include <linux/slab.h>
75354+#include <linux/types.h>
75355+#include <linux/sched.h>
75356+#include <linux/timer.h>
75357+#include <linux/gracl.h>
75358+#include <linux/grsecurity.h>
75359+#include <linux/grinternal.h>
75360+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75361+#include <linux/magic.h>
75362+#include <linux/pagemap.h>
75363+#include "../fs/btrfs/async-thread.h"
75364+#include "../fs/btrfs/ctree.h"
75365+#include "../fs/btrfs/btrfs_inode.h"
75366+#endif
75367+
75368+static struct crash_uid *uid_set;
75369+static unsigned short uid_used;
75370+static DEFINE_SPINLOCK(gr_uid_lock);
75371+extern rwlock_t gr_inode_lock;
75372+extern struct acl_subject_label *
75373+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
75374+ struct acl_role_label *role);
75375+
75376+static inline dev_t __get_dev(const struct dentry *dentry)
75377+{
75378+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75379+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75380+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
75381+ else
75382+#endif
75383+ return dentry->d_sb->s_dev;
75384+}
75385+
75386+static inline u64 __get_ino(const struct dentry *dentry)
75387+{
75388+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75389+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75390+ return btrfs_ino(dentry->d_inode);
75391+ else
75392+#endif
75393+ return dentry->d_inode->i_ino;
75394+}
75395+
75396+int
75397+gr_init_uidset(void)
75398+{
75399+ uid_set =
75400+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
75401+ uid_used = 0;
75402+
75403+ return uid_set ? 1 : 0;
75404+}
75405+
75406+void
75407+gr_free_uidset(void)
75408+{
75409+ if (uid_set) {
75410+ struct crash_uid *tmpset;
75411+ spin_lock(&gr_uid_lock);
75412+ tmpset = uid_set;
75413+ uid_set = NULL;
75414+ uid_used = 0;
75415+ spin_unlock(&gr_uid_lock);
75416+ if (tmpset)
75417+ kfree(tmpset);
75418+ }
75419+
75420+ return;
75421+}
75422+
75423+int
75424+gr_find_uid(const uid_t uid)
75425+{
75426+ struct crash_uid *tmp = uid_set;
75427+ uid_t buid;
75428+ int low = 0, high = uid_used - 1, mid;
75429+
75430+ while (high >= low) {
75431+ mid = (low + high) >> 1;
75432+ buid = tmp[mid].uid;
75433+ if (buid == uid)
75434+ return mid;
75435+ if (buid > uid)
75436+ high = mid - 1;
75437+ if (buid < uid)
75438+ low = mid + 1;
75439+ }
75440+
75441+ return -1;
75442+}
75443+
75444+static __inline__ void
75445+gr_insertsort(void)
75446+{
75447+ unsigned short i, j;
75448+ struct crash_uid index;
75449+
75450+ for (i = 1; i < uid_used; i++) {
75451+ index = uid_set[i];
75452+ j = i;
75453+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
75454+ uid_set[j] = uid_set[j - 1];
75455+ j--;
75456+ }
75457+ uid_set[j] = index;
75458+ }
75459+
75460+ return;
75461+}
75462+
75463+static __inline__ void
75464+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
75465+{
75466+ int loc;
75467+ uid_t uid = GR_GLOBAL_UID(kuid);
75468+
75469+ if (uid_used == GR_UIDTABLE_MAX)
75470+ return;
75471+
75472+ loc = gr_find_uid(uid);
75473+
75474+ if (loc >= 0) {
75475+ uid_set[loc].expires = expires;
75476+ return;
75477+ }
75478+
75479+ uid_set[uid_used].uid = uid;
75480+ uid_set[uid_used].expires = expires;
75481+ uid_used++;
75482+
75483+ gr_insertsort();
75484+
75485+ return;
75486+}
75487+
75488+void
75489+gr_remove_uid(const unsigned short loc)
75490+{
75491+ unsigned short i;
75492+
75493+ for (i = loc + 1; i < uid_used; i++)
75494+ uid_set[i - 1] = uid_set[i];
75495+
75496+ uid_used--;
75497+
75498+ return;
75499+}
75500+
75501+int
75502+gr_check_crash_uid(const kuid_t kuid)
75503+{
75504+ int loc;
75505+ int ret = 0;
75506+ uid_t uid;
75507+
75508+ if (unlikely(!gr_acl_is_enabled()))
75509+ return 0;
75510+
75511+ uid = GR_GLOBAL_UID(kuid);
75512+
75513+ spin_lock(&gr_uid_lock);
75514+ loc = gr_find_uid(uid);
75515+
75516+ if (loc < 0)
75517+ goto out_unlock;
75518+
75519+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
75520+ gr_remove_uid(loc);
75521+ else
75522+ ret = 1;
75523+
75524+out_unlock:
75525+ spin_unlock(&gr_uid_lock);
75526+ return ret;
75527+}
75528+
75529+static __inline__ int
75530+proc_is_setxid(const struct cred *cred)
75531+{
75532+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
75533+ !uid_eq(cred->uid, cred->fsuid))
75534+ return 1;
75535+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
75536+ !gid_eq(cred->gid, cred->fsgid))
75537+ return 1;
75538+
75539+ return 0;
75540+}
75541+
75542+extern int gr_fake_force_sig(int sig, struct task_struct *t);
75543+
75544+void
75545+gr_handle_crash(struct task_struct *task, const int sig)
75546+{
75547+ struct acl_subject_label *curr;
75548+ struct task_struct *tsk, *tsk2;
75549+ const struct cred *cred;
75550+ const struct cred *cred2;
75551+
75552+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
75553+ return;
75554+
75555+ if (unlikely(!gr_acl_is_enabled()))
75556+ return;
75557+
75558+ curr = task->acl;
75559+
75560+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
75561+ return;
75562+
75563+ if (time_before_eq(curr->expires, get_seconds())) {
75564+ curr->expires = 0;
75565+ curr->crashes = 0;
75566+ }
75567+
75568+ curr->crashes++;
75569+
75570+ if (!curr->expires)
75571+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
75572+
75573+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75574+ time_after(curr->expires, get_seconds())) {
75575+ rcu_read_lock();
75576+ cred = __task_cred(task);
75577+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
75578+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75579+ spin_lock(&gr_uid_lock);
75580+ gr_insert_uid(cred->uid, curr->expires);
75581+ spin_unlock(&gr_uid_lock);
75582+ curr->expires = 0;
75583+ curr->crashes = 0;
75584+ read_lock(&tasklist_lock);
75585+ do_each_thread(tsk2, tsk) {
75586+ cred2 = __task_cred(tsk);
75587+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
75588+ gr_fake_force_sig(SIGKILL, tsk);
75589+ } while_each_thread(tsk2, tsk);
75590+ read_unlock(&tasklist_lock);
75591+ } else {
75592+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75593+ read_lock(&tasklist_lock);
75594+ read_lock(&grsec_exec_file_lock);
75595+ do_each_thread(tsk2, tsk) {
75596+ if (likely(tsk != task)) {
75597+ // if this thread has the same subject as the one that triggered
75598+ // RES_CRASH and it's the same binary, kill it
75599+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
75600+ gr_fake_force_sig(SIGKILL, tsk);
75601+ }
75602+ } while_each_thread(tsk2, tsk);
75603+ read_unlock(&grsec_exec_file_lock);
75604+ read_unlock(&tasklist_lock);
75605+ }
75606+ rcu_read_unlock();
75607+ }
75608+
75609+ return;
75610+}
75611+
75612+int
75613+gr_check_crash_exec(const struct file *filp)
75614+{
75615+ struct acl_subject_label *curr;
75616+ struct dentry *dentry;
75617+
75618+ if (unlikely(!gr_acl_is_enabled()))
75619+ return 0;
75620+
75621+ read_lock(&gr_inode_lock);
75622+ dentry = filp->f_path.dentry;
75623+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
75624+ current->role);
75625+ read_unlock(&gr_inode_lock);
75626+
75627+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
75628+ (!curr->crashes && !curr->expires))
75629+ return 0;
75630+
75631+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75632+ time_after(curr->expires, get_seconds()))
75633+ return 1;
75634+ else if (time_before_eq(curr->expires, get_seconds())) {
75635+ curr->crashes = 0;
75636+ curr->expires = 0;
75637+ }
75638+
75639+ return 0;
75640+}
75641+
75642+void
75643+gr_handle_alertkill(struct task_struct *task)
75644+{
75645+ struct acl_subject_label *curracl;
75646+ __u32 curr_ip;
75647+ struct task_struct *p, *p2;
75648+
75649+ if (unlikely(!gr_acl_is_enabled()))
75650+ return;
75651+
75652+ curracl = task->acl;
75653+ curr_ip = task->signal->curr_ip;
75654+
75655+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
75656+ read_lock(&tasklist_lock);
75657+ do_each_thread(p2, p) {
75658+ if (p->signal->curr_ip == curr_ip)
75659+ gr_fake_force_sig(SIGKILL, p);
75660+ } while_each_thread(p2, p);
75661+ read_unlock(&tasklist_lock);
75662+ } else if (curracl->mode & GR_KILLPROC)
75663+ gr_fake_force_sig(SIGKILL, task);
75664+
75665+ return;
75666+}
75667diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
75668new file mode 100644
75669index 0000000..6b0c9cc
75670--- /dev/null
75671+++ b/grsecurity/gracl_shm.c
75672@@ -0,0 +1,40 @@
75673+#include <linux/kernel.h>
75674+#include <linux/mm.h>
75675+#include <linux/sched.h>
75676+#include <linux/file.h>
75677+#include <linux/ipc.h>
75678+#include <linux/gracl.h>
75679+#include <linux/grsecurity.h>
75680+#include <linux/grinternal.h>
75681+
75682+int
75683+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
75684+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
75685+{
75686+ struct task_struct *task;
75687+
75688+ if (!gr_acl_is_enabled())
75689+ return 1;
75690+
75691+ rcu_read_lock();
75692+ read_lock(&tasklist_lock);
75693+
75694+ task = find_task_by_vpid(shm_cprid);
75695+
75696+ if (unlikely(!task))
75697+ task = find_task_by_vpid(shm_lapid);
75698+
75699+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
75700+ (task_pid_nr(task) == shm_lapid)) &&
75701+ (task->acl->mode & GR_PROTSHM) &&
75702+ (task->acl != current->acl))) {
75703+ read_unlock(&tasklist_lock);
75704+ rcu_read_unlock();
75705+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
75706+ return 0;
75707+ }
75708+ read_unlock(&tasklist_lock);
75709+ rcu_read_unlock();
75710+
75711+ return 1;
75712+}
75713diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
75714new file mode 100644
75715index 0000000..bc0be01
75716--- /dev/null
75717+++ b/grsecurity/grsec_chdir.c
75718@@ -0,0 +1,19 @@
75719+#include <linux/kernel.h>
75720+#include <linux/sched.h>
75721+#include <linux/fs.h>
75722+#include <linux/file.h>
75723+#include <linux/grsecurity.h>
75724+#include <linux/grinternal.h>
75725+
75726+void
75727+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
75728+{
75729+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
75730+ if ((grsec_enable_chdir && grsec_enable_group &&
75731+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
75732+ !grsec_enable_group)) {
75733+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
75734+ }
75735+#endif
75736+ return;
75737+}
75738diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
75739new file mode 100644
75740index 0000000..114ea4f
75741--- /dev/null
75742+++ b/grsecurity/grsec_chroot.c
75743@@ -0,0 +1,467 @@
75744+#include <linux/kernel.h>
75745+#include <linux/module.h>
75746+#include <linux/sched.h>
75747+#include <linux/file.h>
75748+#include <linux/fs.h>
75749+#include <linux/mount.h>
75750+#include <linux/types.h>
75751+#include "../fs/mount.h"
75752+#include <linux/grsecurity.h>
75753+#include <linux/grinternal.h>
75754+
75755+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75756+int gr_init_ran;
75757+#endif
75758+
75759+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75760+{
75761+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75762+ struct dentry *tmpd = dentry;
75763+
75764+ read_seqlock_excl(&mount_lock);
75765+ write_seqlock(&rename_lock);
75766+
75767+ while (tmpd != mnt->mnt_root) {
75768+ atomic_inc(&tmpd->chroot_refcnt);
75769+ tmpd = tmpd->d_parent;
75770+ }
75771+ atomic_inc(&tmpd->chroot_refcnt);
75772+
75773+ write_sequnlock(&rename_lock);
75774+ read_sequnlock_excl(&mount_lock);
75775+#endif
75776+}
75777+
75778+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75779+{
75780+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75781+ struct dentry *tmpd = dentry;
75782+
75783+ read_seqlock_excl(&mount_lock);
75784+ write_seqlock(&rename_lock);
75785+
75786+ while (tmpd != mnt->mnt_root) {
75787+ atomic_dec(&tmpd->chroot_refcnt);
75788+ tmpd = tmpd->d_parent;
75789+ }
75790+ atomic_dec(&tmpd->chroot_refcnt);
75791+
75792+ write_sequnlock(&rename_lock);
75793+ read_sequnlock_excl(&mount_lock);
75794+#endif
75795+}
75796+
75797+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75798+static struct dentry *get_closest_chroot(struct dentry *dentry)
75799+{
75800+ write_seqlock(&rename_lock);
75801+ do {
75802+ if (atomic_read(&dentry->chroot_refcnt)) {
75803+ write_sequnlock(&rename_lock);
75804+ return dentry;
75805+ }
75806+ dentry = dentry->d_parent;
75807+ } while (!IS_ROOT(dentry));
75808+ write_sequnlock(&rename_lock);
75809+ return NULL;
75810+}
75811+#endif
75812+
75813+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
75814+ struct dentry *newdentry, struct vfsmount *newmnt)
75815+{
75816+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75817+ struct dentry *chroot;
75818+
75819+ if (unlikely(!grsec_enable_chroot_rename))
75820+ return 0;
75821+
75822+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
75823+ return 0;
75824+
75825+ chroot = get_closest_chroot(olddentry);
75826+
75827+ if (chroot == NULL)
75828+ return 0;
75829+
75830+ if (is_subdir(newdentry, chroot))
75831+ return 0;
75832+
75833+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
75834+
75835+ return 1;
75836+#else
75837+ return 0;
75838+#endif
75839+}
75840+
75841+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
75842+{
75843+#ifdef CONFIG_GRKERNSEC
75844+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
75845+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
75846+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75847+ && gr_init_ran
75848+#endif
75849+ )
75850+ task->gr_is_chrooted = 1;
75851+ else {
75852+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75853+ if (task_pid_nr(task) == 1 && !gr_init_ran)
75854+ gr_init_ran = 1;
75855+#endif
75856+ task->gr_is_chrooted = 0;
75857+ }
75858+
75859+ task->gr_chroot_dentry = path->dentry;
75860+#endif
75861+ return;
75862+}
75863+
75864+void gr_clear_chroot_entries(struct task_struct *task)
75865+{
75866+#ifdef CONFIG_GRKERNSEC
75867+ task->gr_is_chrooted = 0;
75868+ task->gr_chroot_dentry = NULL;
75869+#endif
75870+ return;
75871+}
75872+
75873+int
75874+gr_handle_chroot_unix(const pid_t pid)
75875+{
75876+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
75877+ struct task_struct *p;
75878+
75879+ if (unlikely(!grsec_enable_chroot_unix))
75880+ return 1;
75881+
75882+ if (likely(!proc_is_chrooted(current)))
75883+ return 1;
75884+
75885+ rcu_read_lock();
75886+ read_lock(&tasklist_lock);
75887+ p = find_task_by_vpid_unrestricted(pid);
75888+ if (unlikely(p && !have_same_root(current, p))) {
75889+ read_unlock(&tasklist_lock);
75890+ rcu_read_unlock();
75891+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
75892+ return 0;
75893+ }
75894+ read_unlock(&tasklist_lock);
75895+ rcu_read_unlock();
75896+#endif
75897+ return 1;
75898+}
75899+
75900+int
75901+gr_handle_chroot_nice(void)
75902+{
75903+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
75904+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
75905+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
75906+ return -EPERM;
75907+ }
75908+#endif
75909+ return 0;
75910+}
75911+
75912+int
75913+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
75914+{
75915+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
75916+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
75917+ && proc_is_chrooted(current)) {
75918+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
75919+ return -EACCES;
75920+ }
75921+#endif
75922+ return 0;
75923+}
75924+
75925+int
75926+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
75927+{
75928+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75929+ struct task_struct *p;
75930+ int ret = 0;
75931+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
75932+ return ret;
75933+
75934+ read_lock(&tasklist_lock);
75935+ do_each_pid_task(pid, type, p) {
75936+ if (!have_same_root(current, p)) {
75937+ ret = 1;
75938+ goto out;
75939+ }
75940+ } while_each_pid_task(pid, type, p);
75941+out:
75942+ read_unlock(&tasklist_lock);
75943+ return ret;
75944+#endif
75945+ return 0;
75946+}
75947+
75948+int
75949+gr_pid_is_chrooted(struct task_struct *p)
75950+{
75951+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75952+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
75953+ return 0;
75954+
75955+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
75956+ !have_same_root(current, p)) {
75957+ return 1;
75958+ }
75959+#endif
75960+ return 0;
75961+}
75962+
75963+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
75964+
75965+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
75966+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
75967+{
75968+ struct path path, currentroot;
75969+ int ret = 0;
75970+
75971+ path.dentry = (struct dentry *)u_dentry;
75972+ path.mnt = (struct vfsmount *)u_mnt;
75973+ get_fs_root(current->fs, &currentroot);
75974+ if (path_is_under(&path, &currentroot))
75975+ ret = 1;
75976+ path_put(&currentroot);
75977+
75978+ return ret;
75979+}
75980+#endif
75981+
75982+int
75983+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
75984+{
75985+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
75986+ if (!grsec_enable_chroot_fchdir)
75987+ return 1;
75988+
75989+ if (!proc_is_chrooted(current))
75990+ return 1;
75991+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
75992+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
75993+ return 0;
75994+ }
75995+#endif
75996+ return 1;
75997+}
75998+
75999+int
76000+gr_chroot_fhandle(void)
76001+{
76002+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76003+ if (!grsec_enable_chroot_fchdir)
76004+ return 1;
76005+
76006+ if (!proc_is_chrooted(current))
76007+ return 1;
76008+ else {
76009+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76010+ return 0;
76011+ }
76012+#endif
76013+ return 1;
76014+}
76015+
76016+int
76017+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76018+ const u64 shm_createtime)
76019+{
76020+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76021+ struct task_struct *p;
76022+
76023+ if (unlikely(!grsec_enable_chroot_shmat))
76024+ return 1;
76025+
76026+ if (likely(!proc_is_chrooted(current)))
76027+ return 1;
76028+
76029+ rcu_read_lock();
76030+ read_lock(&tasklist_lock);
76031+
76032+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76033+ if (time_before_eq64(p->start_time, shm_createtime)) {
76034+ if (have_same_root(current, p)) {
76035+ goto allow;
76036+ } else {
76037+ read_unlock(&tasklist_lock);
76038+ rcu_read_unlock();
76039+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76040+ return 0;
76041+ }
76042+ }
76043+ /* creator exited, pid reuse, fall through to next check */
76044+ }
76045+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76046+ if (unlikely(!have_same_root(current, p))) {
76047+ read_unlock(&tasklist_lock);
76048+ rcu_read_unlock();
76049+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76050+ return 0;
76051+ }
76052+ }
76053+
76054+allow:
76055+ read_unlock(&tasklist_lock);
76056+ rcu_read_unlock();
76057+#endif
76058+ return 1;
76059+}
76060+
76061+void
76062+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76063+{
76064+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76065+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76066+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76067+#endif
76068+ return;
76069+}
76070+
76071+int
76072+gr_handle_chroot_mknod(const struct dentry *dentry,
76073+ const struct vfsmount *mnt, const int mode)
76074+{
76075+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76076+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76077+ proc_is_chrooted(current)) {
76078+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76079+ return -EPERM;
76080+ }
76081+#endif
76082+ return 0;
76083+}
76084+
76085+int
76086+gr_handle_chroot_mount(const struct dentry *dentry,
76087+ const struct vfsmount *mnt, const char *dev_name)
76088+{
76089+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76090+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76091+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76092+ return -EPERM;
76093+ }
76094+#endif
76095+ return 0;
76096+}
76097+
76098+int
76099+gr_handle_chroot_pivot(void)
76100+{
76101+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76102+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76103+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76104+ return -EPERM;
76105+ }
76106+#endif
76107+ return 0;
76108+}
76109+
76110+int
76111+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76112+{
76113+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76114+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76115+ !gr_is_outside_chroot(dentry, mnt)) {
76116+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76117+ return -EPERM;
76118+ }
76119+#endif
76120+ return 0;
76121+}
76122+
76123+extern const char *captab_log[];
76124+extern int captab_log_entries;
76125+
76126+int
76127+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76128+{
76129+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76130+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76131+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76132+ if (cap_raised(chroot_caps, cap)) {
76133+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76134+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76135+ }
76136+ return 0;
76137+ }
76138+ }
76139+#endif
76140+ return 1;
76141+}
76142+
76143+int
76144+gr_chroot_is_capable(const int cap)
76145+{
76146+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76147+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76148+#endif
76149+ return 1;
76150+}
76151+
76152+int
76153+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76154+{
76155+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76156+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76157+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76158+ if (cap_raised(chroot_caps, cap)) {
76159+ return 0;
76160+ }
76161+ }
76162+#endif
76163+ return 1;
76164+}
76165+
76166+int
76167+gr_chroot_is_capable_nolog(const int cap)
76168+{
76169+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76170+ return gr_task_chroot_is_capable_nolog(current, cap);
76171+#endif
76172+ return 1;
76173+}
76174+
76175+int
76176+gr_handle_chroot_sysctl(const int op)
76177+{
76178+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76179+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76180+ proc_is_chrooted(current))
76181+ return -EACCES;
76182+#endif
76183+ return 0;
76184+}
76185+
76186+void
76187+gr_handle_chroot_chdir(const struct path *path)
76188+{
76189+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76190+ if (grsec_enable_chroot_chdir)
76191+ set_fs_pwd(current->fs, path);
76192+#endif
76193+ return;
76194+}
76195+
76196+int
76197+gr_handle_chroot_chmod(const struct dentry *dentry,
76198+ const struct vfsmount *mnt, const int mode)
76199+{
76200+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76201+ /* allow chmod +s on directories, but not files */
76202+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76203+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76204+ proc_is_chrooted(current)) {
76205+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76206+ return -EPERM;
76207+ }
76208+#endif
76209+ return 0;
76210+}
76211diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76212new file mode 100644
76213index 0000000..946f750
76214--- /dev/null
76215+++ b/grsecurity/grsec_disabled.c
76216@@ -0,0 +1,445 @@
76217+#include <linux/kernel.h>
76218+#include <linux/module.h>
76219+#include <linux/sched.h>
76220+#include <linux/file.h>
76221+#include <linux/fs.h>
76222+#include <linux/kdev_t.h>
76223+#include <linux/net.h>
76224+#include <linux/in.h>
76225+#include <linux/ip.h>
76226+#include <linux/skbuff.h>
76227+#include <linux/sysctl.h>
76228+
76229+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76230+void
76231+pax_set_initial_flags(struct linux_binprm *bprm)
76232+{
76233+ return;
76234+}
76235+#endif
76236+
76237+#ifdef CONFIG_SYSCTL
76238+__u32
76239+gr_handle_sysctl(const struct ctl_table * table, const int op)
76240+{
76241+ return 0;
76242+}
76243+#endif
76244+
76245+#ifdef CONFIG_TASKSTATS
76246+int gr_is_taskstats_denied(int pid)
76247+{
76248+ return 0;
76249+}
76250+#endif
76251+
76252+int
76253+gr_acl_is_enabled(void)
76254+{
76255+ return 0;
76256+}
76257+
76258+int
76259+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76260+{
76261+ return 0;
76262+}
76263+
76264+void
76265+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76266+{
76267+ return;
76268+}
76269+
76270+int
76271+gr_handle_rawio(const struct inode *inode)
76272+{
76273+ return 0;
76274+}
76275+
76276+void
76277+gr_acl_handle_psacct(struct task_struct *task, const long code)
76278+{
76279+ return;
76280+}
76281+
76282+int
76283+gr_handle_ptrace(struct task_struct *task, const long request)
76284+{
76285+ return 0;
76286+}
76287+
76288+int
76289+gr_handle_proc_ptrace(struct task_struct *task)
76290+{
76291+ return 0;
76292+}
76293+
76294+int
76295+gr_set_acls(const int type)
76296+{
76297+ return 0;
76298+}
76299+
76300+int
76301+gr_check_hidden_task(const struct task_struct *tsk)
76302+{
76303+ return 0;
76304+}
76305+
76306+int
76307+gr_check_protected_task(const struct task_struct *task)
76308+{
76309+ return 0;
76310+}
76311+
76312+int
76313+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76314+{
76315+ return 0;
76316+}
76317+
76318+void
76319+gr_copy_label(struct task_struct *tsk)
76320+{
76321+ return;
76322+}
76323+
76324+void
76325+gr_set_pax_flags(struct task_struct *task)
76326+{
76327+ return;
76328+}
76329+
76330+int
76331+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76332+ const int unsafe_share)
76333+{
76334+ return 0;
76335+}
76336+
76337+void
76338+gr_handle_delete(const u64 ino, const dev_t dev)
76339+{
76340+ return;
76341+}
76342+
76343+void
76344+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76345+{
76346+ return;
76347+}
76348+
76349+void
76350+gr_handle_crash(struct task_struct *task, const int sig)
76351+{
76352+ return;
76353+}
76354+
76355+int
76356+gr_check_crash_exec(const struct file *filp)
76357+{
76358+ return 0;
76359+}
76360+
76361+int
76362+gr_check_crash_uid(const kuid_t uid)
76363+{
76364+ return 0;
76365+}
76366+
76367+void
76368+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76369+ struct dentry *old_dentry,
76370+ struct dentry *new_dentry,
76371+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
76372+{
76373+ return;
76374+}
76375+
76376+int
76377+gr_search_socket(const int family, const int type, const int protocol)
76378+{
76379+ return 1;
76380+}
76381+
76382+int
76383+gr_search_connectbind(const int mode, const struct socket *sock,
76384+ const struct sockaddr_in *addr)
76385+{
76386+ return 0;
76387+}
76388+
76389+void
76390+gr_handle_alertkill(struct task_struct *task)
76391+{
76392+ return;
76393+}
76394+
76395+__u32
76396+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
76397+{
76398+ return 1;
76399+}
76400+
76401+__u32
76402+gr_acl_handle_hidden_file(const struct dentry * dentry,
76403+ const struct vfsmount * mnt)
76404+{
76405+ return 1;
76406+}
76407+
76408+__u32
76409+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
76410+ int acc_mode)
76411+{
76412+ return 1;
76413+}
76414+
76415+__u32
76416+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
76417+{
76418+ return 1;
76419+}
76420+
76421+__u32
76422+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
76423+{
76424+ return 1;
76425+}
76426+
76427+int
76428+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
76429+ unsigned int *vm_flags)
76430+{
76431+ return 1;
76432+}
76433+
76434+__u32
76435+gr_acl_handle_truncate(const struct dentry * dentry,
76436+ const struct vfsmount * mnt)
76437+{
76438+ return 1;
76439+}
76440+
76441+__u32
76442+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
76443+{
76444+ return 1;
76445+}
76446+
76447+__u32
76448+gr_acl_handle_access(const struct dentry * dentry,
76449+ const struct vfsmount * mnt, const int fmode)
76450+{
76451+ return 1;
76452+}
76453+
76454+__u32
76455+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
76456+ umode_t *mode)
76457+{
76458+ return 1;
76459+}
76460+
76461+__u32
76462+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
76463+{
76464+ return 1;
76465+}
76466+
76467+__u32
76468+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
76469+{
76470+ return 1;
76471+}
76472+
76473+__u32
76474+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
76475+{
76476+ return 1;
76477+}
76478+
76479+void
76480+grsecurity_init(void)
76481+{
76482+ return;
76483+}
76484+
76485+umode_t gr_acl_umask(void)
76486+{
76487+ return 0;
76488+}
76489+
76490+__u32
76491+gr_acl_handle_mknod(const struct dentry * new_dentry,
76492+ const struct dentry * parent_dentry,
76493+ const struct vfsmount * parent_mnt,
76494+ const int mode)
76495+{
76496+ return 1;
76497+}
76498+
76499+__u32
76500+gr_acl_handle_mkdir(const struct dentry * new_dentry,
76501+ const struct dentry * parent_dentry,
76502+ const struct vfsmount * parent_mnt)
76503+{
76504+ return 1;
76505+}
76506+
76507+__u32
76508+gr_acl_handle_symlink(const struct dentry * new_dentry,
76509+ const struct dentry * parent_dentry,
76510+ const struct vfsmount * parent_mnt, const struct filename *from)
76511+{
76512+ return 1;
76513+}
76514+
76515+__u32
76516+gr_acl_handle_link(const struct dentry * new_dentry,
76517+ const struct dentry * parent_dentry,
76518+ const struct vfsmount * parent_mnt,
76519+ const struct dentry * old_dentry,
76520+ const struct vfsmount * old_mnt, const struct filename *to)
76521+{
76522+ return 1;
76523+}
76524+
76525+int
76526+gr_acl_handle_rename(const struct dentry *new_dentry,
76527+ const struct dentry *parent_dentry,
76528+ const struct vfsmount *parent_mnt,
76529+ const struct dentry *old_dentry,
76530+ const struct inode *old_parent_inode,
76531+ const struct vfsmount *old_mnt, const struct filename *newname,
76532+ unsigned int flags)
76533+{
76534+ return 0;
76535+}
76536+
76537+int
76538+gr_acl_handle_filldir(const struct file *file, const char *name,
76539+ const int namelen, const u64 ino)
76540+{
76541+ return 1;
76542+}
76543+
76544+int
76545+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76546+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76547+{
76548+ return 1;
76549+}
76550+
76551+int
76552+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
76553+{
76554+ return 0;
76555+}
76556+
76557+int
76558+gr_search_accept(const struct socket *sock)
76559+{
76560+ return 0;
76561+}
76562+
76563+int
76564+gr_search_listen(const struct socket *sock)
76565+{
76566+ return 0;
76567+}
76568+
76569+int
76570+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
76571+{
76572+ return 0;
76573+}
76574+
76575+__u32
76576+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
76577+{
76578+ return 1;
76579+}
76580+
76581+__u32
76582+gr_acl_handle_creat(const struct dentry * dentry,
76583+ const struct dentry * p_dentry,
76584+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
76585+ const int imode)
76586+{
76587+ return 1;
76588+}
76589+
76590+void
76591+gr_acl_handle_exit(void)
76592+{
76593+ return;
76594+}
76595+
76596+int
76597+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
76598+{
76599+ return 1;
76600+}
76601+
76602+void
76603+gr_set_role_label(const kuid_t uid, const kgid_t gid)
76604+{
76605+ return;
76606+}
76607+
76608+int
76609+gr_acl_handle_procpidmem(const struct task_struct *task)
76610+{
76611+ return 0;
76612+}
76613+
76614+int
76615+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
76616+{
76617+ return 0;
76618+}
76619+
76620+int
76621+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
76622+{
76623+ return 0;
76624+}
76625+
76626+int
76627+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
76628+{
76629+ return 0;
76630+}
76631+
76632+int
76633+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
76634+{
76635+ return 0;
76636+}
76637+
76638+int gr_acl_enable_at_secure(void)
76639+{
76640+ return 0;
76641+}
76642+
76643+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
76644+{
76645+ return dentry->d_sb->s_dev;
76646+}
76647+
76648+u64 gr_get_ino_from_dentry(struct dentry *dentry)
76649+{
76650+ return dentry->d_inode->i_ino;
76651+}
76652+
76653+void gr_put_exec_file(struct task_struct *task)
76654+{
76655+ return;
76656+}
76657+
76658+#ifdef CONFIG_SECURITY
76659+EXPORT_SYMBOL_GPL(gr_check_user_change);
76660+EXPORT_SYMBOL_GPL(gr_check_group_change);
76661+#endif
76662diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
76663new file mode 100644
76664index 0000000..fb7531e
76665--- /dev/null
76666+++ b/grsecurity/grsec_exec.c
76667@@ -0,0 +1,189 @@
76668+#include <linux/kernel.h>
76669+#include <linux/sched.h>
76670+#include <linux/file.h>
76671+#include <linux/binfmts.h>
76672+#include <linux/fs.h>
76673+#include <linux/types.h>
76674+#include <linux/grdefs.h>
76675+#include <linux/grsecurity.h>
76676+#include <linux/grinternal.h>
76677+#include <linux/capability.h>
76678+#include <linux/module.h>
76679+#include <linux/compat.h>
76680+
76681+#include <asm/uaccess.h>
76682+
76683+#ifdef CONFIG_GRKERNSEC_EXECLOG
76684+static char gr_exec_arg_buf[132];
76685+static DEFINE_MUTEX(gr_exec_arg_mutex);
76686+#endif
76687+
76688+struct user_arg_ptr {
76689+#ifdef CONFIG_COMPAT
76690+ bool is_compat;
76691+#endif
76692+ union {
76693+ const char __user *const __user *native;
76694+#ifdef CONFIG_COMPAT
76695+ const compat_uptr_t __user *compat;
76696+#endif
76697+ } ptr;
76698+};
76699+
76700+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
76701+
76702+void
76703+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
76704+{
76705+#ifdef CONFIG_GRKERNSEC_EXECLOG
76706+ char *grarg = gr_exec_arg_buf;
76707+ unsigned int i, x, execlen = 0;
76708+ char c;
76709+
76710+ if (!((grsec_enable_execlog && grsec_enable_group &&
76711+ in_group_p(grsec_audit_gid))
76712+ || (grsec_enable_execlog && !grsec_enable_group)))
76713+ return;
76714+
76715+ mutex_lock(&gr_exec_arg_mutex);
76716+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
76717+
76718+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
76719+ const char __user *p;
76720+ unsigned int len;
76721+
76722+ p = get_user_arg_ptr(argv, i);
76723+ if (IS_ERR(p))
76724+ goto log;
76725+
76726+ len = strnlen_user(p, 128 - execlen);
76727+ if (len > 128 - execlen)
76728+ len = 128 - execlen;
76729+ else if (len > 0)
76730+ len--;
76731+ if (copy_from_user(grarg + execlen, p, len))
76732+ goto log;
76733+
76734+ /* rewrite unprintable characters */
76735+ for (x = 0; x < len; x++) {
76736+ c = *(grarg + execlen + x);
76737+ if (c < 32 || c > 126)
76738+ *(grarg + execlen + x) = ' ';
76739+ }
76740+
76741+ execlen += len;
76742+ *(grarg + execlen) = ' ';
76743+ *(grarg + execlen + 1) = '\0';
76744+ execlen++;
76745+ }
76746+
76747+ log:
76748+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
76749+ bprm->file->f_path.mnt, grarg);
76750+ mutex_unlock(&gr_exec_arg_mutex);
76751+#endif
76752+ return;
76753+}
76754+
76755+#ifdef CONFIG_GRKERNSEC
76756+extern int gr_acl_is_capable(const int cap);
76757+extern int gr_acl_is_capable_nolog(const int cap);
76758+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76759+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
76760+extern int gr_chroot_is_capable(const int cap);
76761+extern int gr_chroot_is_capable_nolog(const int cap);
76762+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76763+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
76764+#endif
76765+
76766+const char *captab_log[] = {
76767+ "CAP_CHOWN",
76768+ "CAP_DAC_OVERRIDE",
76769+ "CAP_DAC_READ_SEARCH",
76770+ "CAP_FOWNER",
76771+ "CAP_FSETID",
76772+ "CAP_KILL",
76773+ "CAP_SETGID",
76774+ "CAP_SETUID",
76775+ "CAP_SETPCAP",
76776+ "CAP_LINUX_IMMUTABLE",
76777+ "CAP_NET_BIND_SERVICE",
76778+ "CAP_NET_BROADCAST",
76779+ "CAP_NET_ADMIN",
76780+ "CAP_NET_RAW",
76781+ "CAP_IPC_LOCK",
76782+ "CAP_IPC_OWNER",
76783+ "CAP_SYS_MODULE",
76784+ "CAP_SYS_RAWIO",
76785+ "CAP_SYS_CHROOT",
76786+ "CAP_SYS_PTRACE",
76787+ "CAP_SYS_PACCT",
76788+ "CAP_SYS_ADMIN",
76789+ "CAP_SYS_BOOT",
76790+ "CAP_SYS_NICE",
76791+ "CAP_SYS_RESOURCE",
76792+ "CAP_SYS_TIME",
76793+ "CAP_SYS_TTY_CONFIG",
76794+ "CAP_MKNOD",
76795+ "CAP_LEASE",
76796+ "CAP_AUDIT_WRITE",
76797+ "CAP_AUDIT_CONTROL",
76798+ "CAP_SETFCAP",
76799+ "CAP_MAC_OVERRIDE",
76800+ "CAP_MAC_ADMIN",
76801+ "CAP_SYSLOG",
76802+ "CAP_WAKE_ALARM",
76803+ "CAP_BLOCK_SUSPEND",
76804+ "CAP_AUDIT_READ"
76805+};
76806+
76807+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
76808+
76809+int gr_is_capable(const int cap)
76810+{
76811+#ifdef CONFIG_GRKERNSEC
76812+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
76813+ return 1;
76814+ return 0;
76815+#else
76816+ return 1;
76817+#endif
76818+}
76819+
76820+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76821+{
76822+#ifdef CONFIG_GRKERNSEC
76823+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
76824+ return 1;
76825+ return 0;
76826+#else
76827+ return 1;
76828+#endif
76829+}
76830+
76831+int gr_is_capable_nolog(const int cap)
76832+{
76833+#ifdef CONFIG_GRKERNSEC
76834+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
76835+ return 1;
76836+ return 0;
76837+#else
76838+ return 1;
76839+#endif
76840+}
76841+
76842+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
76843+{
76844+#ifdef CONFIG_GRKERNSEC
76845+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
76846+ return 1;
76847+ return 0;
76848+#else
76849+ return 1;
76850+#endif
76851+}
76852+
76853+EXPORT_SYMBOL_GPL(gr_is_capable);
76854+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
76855+EXPORT_SYMBOL_GPL(gr_task_is_capable);
76856+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
76857diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
76858new file mode 100644
76859index 0000000..06cc6ea
76860--- /dev/null
76861+++ b/grsecurity/grsec_fifo.c
76862@@ -0,0 +1,24 @@
76863+#include <linux/kernel.h>
76864+#include <linux/sched.h>
76865+#include <linux/fs.h>
76866+#include <linux/file.h>
76867+#include <linux/grinternal.h>
76868+
76869+int
76870+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
76871+ const struct dentry *dir, const int flag, const int acc_mode)
76872+{
76873+#ifdef CONFIG_GRKERNSEC_FIFO
76874+ const struct cred *cred = current_cred();
76875+
76876+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
76877+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
76878+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
76879+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
76880+ if (!inode_permission(dentry->d_inode, acc_mode))
76881+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
76882+ return -EACCES;
76883+ }
76884+#endif
76885+ return 0;
76886+}
76887diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
76888new file mode 100644
76889index 0000000..8ca18bf
76890--- /dev/null
76891+++ b/grsecurity/grsec_fork.c
76892@@ -0,0 +1,23 @@
76893+#include <linux/kernel.h>
76894+#include <linux/sched.h>
76895+#include <linux/grsecurity.h>
76896+#include <linux/grinternal.h>
76897+#include <linux/errno.h>
76898+
76899+void
76900+gr_log_forkfail(const int retval)
76901+{
76902+#ifdef CONFIG_GRKERNSEC_FORKFAIL
76903+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
76904+ switch (retval) {
76905+ case -EAGAIN:
76906+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
76907+ break;
76908+ case -ENOMEM:
76909+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
76910+ break;
76911+ }
76912+ }
76913+#endif
76914+ return;
76915+}
76916diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
76917new file mode 100644
76918index 0000000..4ed9e7d
76919--- /dev/null
76920+++ b/grsecurity/grsec_init.c
76921@@ -0,0 +1,290 @@
76922+#include <linux/kernel.h>
76923+#include <linux/sched.h>
76924+#include <linux/mm.h>
76925+#include <linux/gracl.h>
76926+#include <linux/slab.h>
76927+#include <linux/vmalloc.h>
76928+#include <linux/percpu.h>
76929+#include <linux/module.h>
76930+
76931+int grsec_enable_ptrace_readexec;
76932+int grsec_enable_setxid;
76933+int grsec_enable_symlinkown;
76934+kgid_t grsec_symlinkown_gid;
76935+int grsec_enable_brute;
76936+int grsec_enable_link;
76937+int grsec_enable_dmesg;
76938+int grsec_enable_harden_ptrace;
76939+int grsec_enable_harden_ipc;
76940+int grsec_enable_fifo;
76941+int grsec_enable_execlog;
76942+int grsec_enable_signal;
76943+int grsec_enable_forkfail;
76944+int grsec_enable_audit_ptrace;
76945+int grsec_enable_time;
76946+int grsec_enable_group;
76947+kgid_t grsec_audit_gid;
76948+int grsec_enable_chdir;
76949+int grsec_enable_mount;
76950+int grsec_enable_rofs;
76951+int grsec_deny_new_usb;
76952+int grsec_enable_chroot_findtask;
76953+int grsec_enable_chroot_mount;
76954+int grsec_enable_chroot_shmat;
76955+int grsec_enable_chroot_fchdir;
76956+int grsec_enable_chroot_double;
76957+int grsec_enable_chroot_pivot;
76958+int grsec_enable_chroot_chdir;
76959+int grsec_enable_chroot_chmod;
76960+int grsec_enable_chroot_mknod;
76961+int grsec_enable_chroot_nice;
76962+int grsec_enable_chroot_execlog;
76963+int grsec_enable_chroot_caps;
76964+int grsec_enable_chroot_rename;
76965+int grsec_enable_chroot_sysctl;
76966+int grsec_enable_chroot_unix;
76967+int grsec_enable_tpe;
76968+kgid_t grsec_tpe_gid;
76969+int grsec_enable_blackhole;
76970+#ifdef CONFIG_IPV6_MODULE
76971+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
76972+#endif
76973+int grsec_lastack_retries;
76974+int grsec_enable_tpe_all;
76975+int grsec_enable_tpe_invert;
76976+int grsec_enable_socket_all;
76977+kgid_t grsec_socket_all_gid;
76978+int grsec_enable_socket_client;
76979+kgid_t grsec_socket_client_gid;
76980+int grsec_enable_socket_server;
76981+kgid_t grsec_socket_server_gid;
76982+int grsec_resource_logging;
76983+int grsec_disable_privio;
76984+int grsec_enable_log_rwxmaps;
76985+int grsec_lock;
76986+
76987+DEFINE_SPINLOCK(grsec_alert_lock);
76988+unsigned long grsec_alert_wtime = 0;
76989+unsigned long grsec_alert_fyet = 0;
76990+
76991+DEFINE_SPINLOCK(grsec_audit_lock);
76992+
76993+DEFINE_RWLOCK(grsec_exec_file_lock);
76994+
76995+char *gr_shared_page[4];
76996+
76997+char *gr_alert_log_fmt;
76998+char *gr_audit_log_fmt;
76999+char *gr_alert_log_buf;
77000+char *gr_audit_log_buf;
77001+
77002+extern struct gr_arg *gr_usermode;
77003+extern unsigned char *gr_system_salt;
77004+extern unsigned char *gr_system_sum;
77005+
77006+void __init
77007+grsecurity_init(void)
77008+{
77009+ int j;
77010+ /* create the per-cpu shared pages */
77011+
77012+#ifdef CONFIG_X86
77013+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77014+#endif
77015+
77016+ for (j = 0; j < 4; j++) {
77017+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77018+ if (gr_shared_page[j] == NULL) {
77019+ panic("Unable to allocate grsecurity shared page");
77020+ return;
77021+ }
77022+ }
77023+
77024+ /* allocate log buffers */
77025+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77026+ if (!gr_alert_log_fmt) {
77027+ panic("Unable to allocate grsecurity alert log format buffer");
77028+ return;
77029+ }
77030+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77031+ if (!gr_audit_log_fmt) {
77032+ panic("Unable to allocate grsecurity audit log format buffer");
77033+ return;
77034+ }
77035+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77036+ if (!gr_alert_log_buf) {
77037+ panic("Unable to allocate grsecurity alert log buffer");
77038+ return;
77039+ }
77040+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77041+ if (!gr_audit_log_buf) {
77042+ panic("Unable to allocate grsecurity audit log buffer");
77043+ return;
77044+ }
77045+
77046+ /* allocate memory for authentication structure */
77047+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77048+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77049+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77050+
77051+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77052+ panic("Unable to allocate grsecurity authentication structure");
77053+ return;
77054+ }
77055+
77056+#ifdef CONFIG_GRKERNSEC_IO
77057+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77058+ grsec_disable_privio = 1;
77059+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77060+ grsec_disable_privio = 1;
77061+#else
77062+ grsec_disable_privio = 0;
77063+#endif
77064+#endif
77065+
77066+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77067+ /* for backward compatibility, tpe_invert always defaults to on if
77068+ enabled in the kernel
77069+ */
77070+ grsec_enable_tpe_invert = 1;
77071+#endif
77072+
77073+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77074+#ifndef CONFIG_GRKERNSEC_SYSCTL
77075+ grsec_lock = 1;
77076+#endif
77077+
77078+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77079+ grsec_enable_log_rwxmaps = 1;
77080+#endif
77081+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77082+ grsec_enable_group = 1;
77083+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77084+#endif
77085+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77086+ grsec_enable_ptrace_readexec = 1;
77087+#endif
77088+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77089+ grsec_enable_chdir = 1;
77090+#endif
77091+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77092+ grsec_enable_harden_ptrace = 1;
77093+#endif
77094+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77095+ grsec_enable_harden_ipc = 1;
77096+#endif
77097+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77098+ grsec_enable_mount = 1;
77099+#endif
77100+#ifdef CONFIG_GRKERNSEC_LINK
77101+ grsec_enable_link = 1;
77102+#endif
77103+#ifdef CONFIG_GRKERNSEC_BRUTE
77104+ grsec_enable_brute = 1;
77105+#endif
77106+#ifdef CONFIG_GRKERNSEC_DMESG
77107+ grsec_enable_dmesg = 1;
77108+#endif
77109+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77110+ grsec_enable_blackhole = 1;
77111+ grsec_lastack_retries = 4;
77112+#endif
77113+#ifdef CONFIG_GRKERNSEC_FIFO
77114+ grsec_enable_fifo = 1;
77115+#endif
77116+#ifdef CONFIG_GRKERNSEC_EXECLOG
77117+ grsec_enable_execlog = 1;
77118+#endif
77119+#ifdef CONFIG_GRKERNSEC_SETXID
77120+ grsec_enable_setxid = 1;
77121+#endif
77122+#ifdef CONFIG_GRKERNSEC_SIGNAL
77123+ grsec_enable_signal = 1;
77124+#endif
77125+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77126+ grsec_enable_forkfail = 1;
77127+#endif
77128+#ifdef CONFIG_GRKERNSEC_TIME
77129+ grsec_enable_time = 1;
77130+#endif
77131+#ifdef CONFIG_GRKERNSEC_RESLOG
77132+ grsec_resource_logging = 1;
77133+#endif
77134+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77135+ grsec_enable_chroot_findtask = 1;
77136+#endif
77137+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77138+ grsec_enable_chroot_unix = 1;
77139+#endif
77140+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77141+ grsec_enable_chroot_mount = 1;
77142+#endif
77143+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77144+ grsec_enable_chroot_fchdir = 1;
77145+#endif
77146+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77147+ grsec_enable_chroot_shmat = 1;
77148+#endif
77149+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77150+ grsec_enable_audit_ptrace = 1;
77151+#endif
77152+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77153+ grsec_enable_chroot_double = 1;
77154+#endif
77155+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77156+ grsec_enable_chroot_pivot = 1;
77157+#endif
77158+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77159+ grsec_enable_chroot_chdir = 1;
77160+#endif
77161+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77162+ grsec_enable_chroot_chmod = 1;
77163+#endif
77164+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77165+ grsec_enable_chroot_mknod = 1;
77166+#endif
77167+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77168+ grsec_enable_chroot_nice = 1;
77169+#endif
77170+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77171+ grsec_enable_chroot_execlog = 1;
77172+#endif
77173+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77174+ grsec_enable_chroot_caps = 1;
77175+#endif
77176+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77177+ grsec_enable_chroot_rename = 1;
77178+#endif
77179+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77180+ grsec_enable_chroot_sysctl = 1;
77181+#endif
77182+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77183+ grsec_enable_symlinkown = 1;
77184+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77185+#endif
77186+#ifdef CONFIG_GRKERNSEC_TPE
77187+ grsec_enable_tpe = 1;
77188+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77189+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77190+ grsec_enable_tpe_all = 1;
77191+#endif
77192+#endif
77193+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77194+ grsec_enable_socket_all = 1;
77195+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77196+#endif
77197+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77198+ grsec_enable_socket_client = 1;
77199+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77200+#endif
77201+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77202+ grsec_enable_socket_server = 1;
77203+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77204+#endif
77205+#endif
77206+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77207+ grsec_deny_new_usb = 1;
77208+#endif
77209+
77210+ return;
77211+}
77212diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77213new file mode 100644
77214index 0000000..1773300
77215--- /dev/null
77216+++ b/grsecurity/grsec_ipc.c
77217@@ -0,0 +1,48 @@
77218+#include <linux/kernel.h>
77219+#include <linux/mm.h>
77220+#include <linux/sched.h>
77221+#include <linux/file.h>
77222+#include <linux/ipc.h>
77223+#include <linux/ipc_namespace.h>
77224+#include <linux/grsecurity.h>
77225+#include <linux/grinternal.h>
77226+
77227+int
77228+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77229+{
77230+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77231+ int write;
77232+ int orig_granted_mode;
77233+ kuid_t euid;
77234+ kgid_t egid;
77235+
77236+ if (!grsec_enable_harden_ipc)
77237+ return 1;
77238+
77239+ euid = current_euid();
77240+ egid = current_egid();
77241+
77242+ write = requested_mode & 00002;
77243+ orig_granted_mode = ipcp->mode;
77244+
77245+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77246+ orig_granted_mode >>= 6;
77247+ else {
77248+ /* if likely wrong permissions, lock to user */
77249+ if (orig_granted_mode & 0007)
77250+ orig_granted_mode = 0;
77251+ /* otherwise do a egid-only check */
77252+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77253+ orig_granted_mode >>= 3;
77254+ /* otherwise, no access */
77255+ else
77256+ orig_granted_mode = 0;
77257+ }
77258+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77259+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77260+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77261+ return 0;
77262+ }
77263+#endif
77264+ return 1;
77265+}
77266diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77267new file mode 100644
77268index 0000000..5e05e20
77269--- /dev/null
77270+++ b/grsecurity/grsec_link.c
77271@@ -0,0 +1,58 @@
77272+#include <linux/kernel.h>
77273+#include <linux/sched.h>
77274+#include <linux/fs.h>
77275+#include <linux/file.h>
77276+#include <linux/grinternal.h>
77277+
77278+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77279+{
77280+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77281+ const struct inode *link_inode = link->dentry->d_inode;
77282+
77283+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77284+ /* ignore root-owned links, e.g. /proc/self */
77285+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77286+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77287+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77288+ return 1;
77289+ }
77290+#endif
77291+ return 0;
77292+}
77293+
77294+int
77295+gr_handle_follow_link(const struct inode *parent,
77296+ const struct inode *inode,
77297+ const struct dentry *dentry, const struct vfsmount *mnt)
77298+{
77299+#ifdef CONFIG_GRKERNSEC_LINK
77300+ const struct cred *cred = current_cred();
77301+
77302+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77303+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77304+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77305+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77306+ return -EACCES;
77307+ }
77308+#endif
77309+ return 0;
77310+}
77311+
77312+int
77313+gr_handle_hardlink(const struct dentry *dentry,
77314+ const struct vfsmount *mnt,
77315+ struct inode *inode, const int mode, const struct filename *to)
77316+{
77317+#ifdef CONFIG_GRKERNSEC_LINK
77318+ const struct cred *cred = current_cred();
77319+
77320+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77321+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77322+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77323+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77324+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77325+ return -EPERM;
77326+ }
77327+#endif
77328+ return 0;
77329+}
77330diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77331new file mode 100644
77332index 0000000..dbe0a6b
77333--- /dev/null
77334+++ b/grsecurity/grsec_log.c
77335@@ -0,0 +1,341 @@
77336+#include <linux/kernel.h>
77337+#include <linux/sched.h>
77338+#include <linux/file.h>
77339+#include <linux/tty.h>
77340+#include <linux/fs.h>
77341+#include <linux/mm.h>
77342+#include <linux/grinternal.h>
77343+
77344+#ifdef CONFIG_TREE_PREEMPT_RCU
77345+#define DISABLE_PREEMPT() preempt_disable()
77346+#define ENABLE_PREEMPT() preempt_enable()
77347+#else
77348+#define DISABLE_PREEMPT()
77349+#define ENABLE_PREEMPT()
77350+#endif
77351+
77352+#define BEGIN_LOCKS(x) \
77353+ DISABLE_PREEMPT(); \
77354+ rcu_read_lock(); \
77355+ read_lock(&tasklist_lock); \
77356+ read_lock(&grsec_exec_file_lock); \
77357+ if (x != GR_DO_AUDIT) \
77358+ spin_lock(&grsec_alert_lock); \
77359+ else \
77360+ spin_lock(&grsec_audit_lock)
77361+
77362+#define END_LOCKS(x) \
77363+ if (x != GR_DO_AUDIT) \
77364+ spin_unlock(&grsec_alert_lock); \
77365+ else \
77366+ spin_unlock(&grsec_audit_lock); \
77367+ read_unlock(&grsec_exec_file_lock); \
77368+ read_unlock(&tasklist_lock); \
77369+ rcu_read_unlock(); \
77370+ ENABLE_PREEMPT(); \
77371+ if (x == GR_DONT_AUDIT) \
77372+ gr_handle_alertkill(current)
77373+
77374+enum {
77375+ FLOODING,
77376+ NO_FLOODING
77377+};
77378+
77379+extern char *gr_alert_log_fmt;
77380+extern char *gr_audit_log_fmt;
77381+extern char *gr_alert_log_buf;
77382+extern char *gr_audit_log_buf;
77383+
77384+static int gr_log_start(int audit)
77385+{
77386+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
77387+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
77388+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77389+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
77390+ unsigned long curr_secs = get_seconds();
77391+
77392+ if (audit == GR_DO_AUDIT)
77393+ goto set_fmt;
77394+
77395+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
77396+ grsec_alert_wtime = curr_secs;
77397+ grsec_alert_fyet = 0;
77398+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
77399+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
77400+ grsec_alert_fyet++;
77401+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
77402+ grsec_alert_wtime = curr_secs;
77403+ grsec_alert_fyet++;
77404+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
77405+ return FLOODING;
77406+ }
77407+ else return FLOODING;
77408+
77409+set_fmt:
77410+#endif
77411+ memset(buf, 0, PAGE_SIZE);
77412+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
77413+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
77414+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77415+ } else if (current->signal->curr_ip) {
77416+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
77417+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
77418+ } else if (gr_acl_is_enabled()) {
77419+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
77420+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77421+ } else {
77422+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
77423+ strcpy(buf, fmt);
77424+ }
77425+
77426+ return NO_FLOODING;
77427+}
77428+
77429+static void gr_log_middle(int audit, const char *msg, va_list ap)
77430+ __attribute__ ((format (printf, 2, 0)));
77431+
77432+static void gr_log_middle(int audit, const char *msg, va_list ap)
77433+{
77434+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77435+ unsigned int len = strlen(buf);
77436+
77437+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77438+
77439+ return;
77440+}
77441+
77442+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77443+ __attribute__ ((format (printf, 2, 3)));
77444+
77445+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77446+{
77447+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77448+ unsigned int len = strlen(buf);
77449+ va_list ap;
77450+
77451+ va_start(ap, msg);
77452+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77453+ va_end(ap);
77454+
77455+ return;
77456+}
77457+
77458+static void gr_log_end(int audit, int append_default)
77459+{
77460+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77461+ if (append_default) {
77462+ struct task_struct *task = current;
77463+ struct task_struct *parent = task->real_parent;
77464+ const struct cred *cred = __task_cred(task);
77465+ const struct cred *pcred = __task_cred(parent);
77466+ unsigned int len = strlen(buf);
77467+
77468+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77469+ }
77470+
77471+ printk("%s\n", buf);
77472+
77473+ return;
77474+}
77475+
77476+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
77477+{
77478+ int logtype;
77479+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
77480+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
77481+ void *voidptr = NULL;
77482+ int num1 = 0, num2 = 0;
77483+ unsigned long ulong1 = 0, ulong2 = 0;
77484+ struct dentry *dentry = NULL;
77485+ struct vfsmount *mnt = NULL;
77486+ struct file *file = NULL;
77487+ struct task_struct *task = NULL;
77488+ struct vm_area_struct *vma = NULL;
77489+ const struct cred *cred, *pcred;
77490+ va_list ap;
77491+
77492+ BEGIN_LOCKS(audit);
77493+ logtype = gr_log_start(audit);
77494+ if (logtype == FLOODING) {
77495+ END_LOCKS(audit);
77496+ return;
77497+ }
77498+ va_start(ap, argtypes);
77499+ switch (argtypes) {
77500+ case GR_TTYSNIFF:
77501+ task = va_arg(ap, struct task_struct *);
77502+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
77503+ break;
77504+ case GR_SYSCTL_HIDDEN:
77505+ str1 = va_arg(ap, char *);
77506+ gr_log_middle_varargs(audit, msg, result, str1);
77507+ break;
77508+ case GR_RBAC:
77509+ dentry = va_arg(ap, struct dentry *);
77510+ mnt = va_arg(ap, struct vfsmount *);
77511+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
77512+ break;
77513+ case GR_RBAC_STR:
77514+ dentry = va_arg(ap, struct dentry *);
77515+ mnt = va_arg(ap, struct vfsmount *);
77516+ str1 = va_arg(ap, char *);
77517+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
77518+ break;
77519+ case GR_STR_RBAC:
77520+ str1 = va_arg(ap, char *);
77521+ dentry = va_arg(ap, struct dentry *);
77522+ mnt = va_arg(ap, struct vfsmount *);
77523+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
77524+ break;
77525+ case GR_RBAC_MODE2:
77526+ dentry = va_arg(ap, struct dentry *);
77527+ mnt = va_arg(ap, struct vfsmount *);
77528+ str1 = va_arg(ap, char *);
77529+ str2 = va_arg(ap, char *);
77530+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
77531+ break;
77532+ case GR_RBAC_MODE3:
77533+ dentry = va_arg(ap, struct dentry *);
77534+ mnt = va_arg(ap, struct vfsmount *);
77535+ str1 = va_arg(ap, char *);
77536+ str2 = va_arg(ap, char *);
77537+ str3 = va_arg(ap, char *);
77538+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
77539+ break;
77540+ case GR_FILENAME:
77541+ dentry = va_arg(ap, struct dentry *);
77542+ mnt = va_arg(ap, struct vfsmount *);
77543+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
77544+ break;
77545+ case GR_STR_FILENAME:
77546+ str1 = va_arg(ap, char *);
77547+ dentry = va_arg(ap, struct dentry *);
77548+ mnt = va_arg(ap, struct vfsmount *);
77549+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
77550+ break;
77551+ case GR_FILENAME_STR:
77552+ dentry = va_arg(ap, struct dentry *);
77553+ mnt = va_arg(ap, struct vfsmount *);
77554+ str1 = va_arg(ap, char *);
77555+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
77556+ break;
77557+ case GR_FILENAME_TWO_INT:
77558+ dentry = va_arg(ap, struct dentry *);
77559+ mnt = va_arg(ap, struct vfsmount *);
77560+ num1 = va_arg(ap, int);
77561+ num2 = va_arg(ap, int);
77562+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
77563+ break;
77564+ case GR_FILENAME_TWO_INT_STR:
77565+ dentry = va_arg(ap, struct dentry *);
77566+ mnt = va_arg(ap, struct vfsmount *);
77567+ num1 = va_arg(ap, int);
77568+ num2 = va_arg(ap, int);
77569+ str1 = va_arg(ap, char *);
77570+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
77571+ break;
77572+ case GR_TEXTREL:
77573+ file = va_arg(ap, struct file *);
77574+ ulong1 = va_arg(ap, unsigned long);
77575+ ulong2 = va_arg(ap, unsigned long);
77576+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
77577+ break;
77578+ case GR_PTRACE:
77579+ task = va_arg(ap, struct task_struct *);
77580+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
77581+ break;
77582+ case GR_RESOURCE:
77583+ task = va_arg(ap, struct task_struct *);
77584+ cred = __task_cred(task);
77585+ pcred = __task_cred(task->real_parent);
77586+ ulong1 = va_arg(ap, unsigned long);
77587+ str1 = va_arg(ap, char *);
77588+ ulong2 = va_arg(ap, unsigned long);
77589+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77590+ break;
77591+ case GR_CAP:
77592+ task = va_arg(ap, struct task_struct *);
77593+ cred = __task_cred(task);
77594+ pcred = __task_cred(task->real_parent);
77595+ str1 = va_arg(ap, char *);
77596+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77597+ break;
77598+ case GR_SIG:
77599+ str1 = va_arg(ap, char *);
77600+ voidptr = va_arg(ap, void *);
77601+ gr_log_middle_varargs(audit, msg, str1, voidptr);
77602+ break;
77603+ case GR_SIG2:
77604+ task = va_arg(ap, struct task_struct *);
77605+ cred = __task_cred(task);
77606+ pcred = __task_cred(task->real_parent);
77607+ num1 = va_arg(ap, int);
77608+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77609+ break;
77610+ case GR_CRASH1:
77611+ task = va_arg(ap, struct task_struct *);
77612+ cred = __task_cred(task);
77613+ pcred = __task_cred(task->real_parent);
77614+ ulong1 = va_arg(ap, unsigned long);
77615+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
77616+ break;
77617+ case GR_CRASH2:
77618+ task = va_arg(ap, struct task_struct *);
77619+ cred = __task_cred(task);
77620+ pcred = __task_cred(task->real_parent);
77621+ ulong1 = va_arg(ap, unsigned long);
77622+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
77623+ break;
77624+ case GR_RWXMAP:
77625+ file = va_arg(ap, struct file *);
77626+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
77627+ break;
77628+ case GR_RWXMAPVMA:
77629+ vma = va_arg(ap, struct vm_area_struct *);
77630+ if (vma->vm_file)
77631+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
77632+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
77633+ str1 = "<stack>";
77634+ else if (vma->vm_start <= current->mm->brk &&
77635+ vma->vm_end >= current->mm->start_brk)
77636+ str1 = "<heap>";
77637+ else
77638+ str1 = "<anonymous mapping>";
77639+ gr_log_middle_varargs(audit, msg, str1);
77640+ break;
77641+ case GR_PSACCT:
77642+ {
77643+ unsigned int wday, cday;
77644+ __u8 whr, chr;
77645+ __u8 wmin, cmin;
77646+ __u8 wsec, csec;
77647+ char cur_tty[64] = { 0 };
77648+ char parent_tty[64] = { 0 };
77649+
77650+ task = va_arg(ap, struct task_struct *);
77651+ wday = va_arg(ap, unsigned int);
77652+ cday = va_arg(ap, unsigned int);
77653+ whr = va_arg(ap, int);
77654+ chr = va_arg(ap, int);
77655+ wmin = va_arg(ap, int);
77656+ cmin = va_arg(ap, int);
77657+ wsec = va_arg(ap, int);
77658+ csec = va_arg(ap, int);
77659+ ulong1 = va_arg(ap, unsigned long);
77660+ cred = __task_cred(task);
77661+ pcred = __task_cred(task->real_parent);
77662+
77663+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77664+ }
77665+ break;
77666+ default:
77667+ gr_log_middle(audit, msg, ap);
77668+ }
77669+ va_end(ap);
77670+ // these don't need DEFAULTSECARGS printed on the end
77671+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
77672+ gr_log_end(audit, 0);
77673+ else
77674+ gr_log_end(audit, 1);
77675+ END_LOCKS(audit);
77676+}
77677diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
77678new file mode 100644
77679index 0000000..0e39d8c
77680--- /dev/null
77681+++ b/grsecurity/grsec_mem.c
77682@@ -0,0 +1,48 @@
77683+#include <linux/kernel.h>
77684+#include <linux/sched.h>
77685+#include <linux/mm.h>
77686+#include <linux/mman.h>
77687+#include <linux/module.h>
77688+#include <linux/grinternal.h>
77689+
77690+void gr_handle_msr_write(void)
77691+{
77692+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
77693+ return;
77694+}
77695+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
77696+
77697+void
77698+gr_handle_ioperm(void)
77699+{
77700+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
77701+ return;
77702+}
77703+
77704+void
77705+gr_handle_iopl(void)
77706+{
77707+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
77708+ return;
77709+}
77710+
77711+void
77712+gr_handle_mem_readwrite(u64 from, u64 to)
77713+{
77714+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
77715+ return;
77716+}
77717+
77718+void
77719+gr_handle_vm86(void)
77720+{
77721+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
77722+ return;
77723+}
77724+
77725+void
77726+gr_log_badprocpid(const char *entry)
77727+{
77728+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
77729+ return;
77730+}
77731diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
77732new file mode 100644
77733index 0000000..6f9eb73
77734--- /dev/null
77735+++ b/grsecurity/grsec_mount.c
77736@@ -0,0 +1,65 @@
77737+#include <linux/kernel.h>
77738+#include <linux/sched.h>
77739+#include <linux/mount.h>
77740+#include <linux/major.h>
77741+#include <linux/grsecurity.h>
77742+#include <linux/grinternal.h>
77743+
77744+void
77745+gr_log_remount(const char *devname, const int retval)
77746+{
77747+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77748+ if (grsec_enable_mount && (retval >= 0))
77749+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
77750+#endif
77751+ return;
77752+}
77753+
77754+void
77755+gr_log_unmount(const char *devname, const int retval)
77756+{
77757+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77758+ if (grsec_enable_mount && (retval >= 0))
77759+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
77760+#endif
77761+ return;
77762+}
77763+
77764+void
77765+gr_log_mount(const char *from, struct path *to, const int retval)
77766+{
77767+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77768+ if (grsec_enable_mount && (retval >= 0))
77769+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
77770+#endif
77771+ return;
77772+}
77773+
77774+int
77775+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
77776+{
77777+#ifdef CONFIG_GRKERNSEC_ROFS
77778+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
77779+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
77780+ return -EPERM;
77781+ } else
77782+ return 0;
77783+#endif
77784+ return 0;
77785+}
77786+
77787+int
77788+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
77789+{
77790+#ifdef CONFIG_GRKERNSEC_ROFS
77791+ struct inode *inode = dentry->d_inode;
77792+
77793+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
77794+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
77795+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
77796+ return -EPERM;
77797+ } else
77798+ return 0;
77799+#endif
77800+ return 0;
77801+}
77802diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
77803new file mode 100644
77804index 0000000..6ee9d50
77805--- /dev/null
77806+++ b/grsecurity/grsec_pax.c
77807@@ -0,0 +1,45 @@
77808+#include <linux/kernel.h>
77809+#include <linux/sched.h>
77810+#include <linux/mm.h>
77811+#include <linux/file.h>
77812+#include <linux/grinternal.h>
77813+#include <linux/grsecurity.h>
77814+
77815+void
77816+gr_log_textrel(struct vm_area_struct * vma)
77817+{
77818+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77819+ if (grsec_enable_log_rwxmaps)
77820+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
77821+#endif
77822+ return;
77823+}
77824+
77825+void gr_log_ptgnustack(struct file *file)
77826+{
77827+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77828+ if (grsec_enable_log_rwxmaps)
77829+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
77830+#endif
77831+ return;
77832+}
77833+
77834+void
77835+gr_log_rwxmmap(struct file *file)
77836+{
77837+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77838+ if (grsec_enable_log_rwxmaps)
77839+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
77840+#endif
77841+ return;
77842+}
77843+
77844+void
77845+gr_log_rwxmprotect(struct vm_area_struct *vma)
77846+{
77847+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77848+ if (grsec_enable_log_rwxmaps)
77849+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
77850+#endif
77851+ return;
77852+}
77853diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
77854new file mode 100644
77855index 0000000..2005a3a
77856--- /dev/null
77857+++ b/grsecurity/grsec_proc.c
77858@@ -0,0 +1,20 @@
77859+#include <linux/kernel.h>
77860+#include <linux/sched.h>
77861+#include <linux/grsecurity.h>
77862+#include <linux/grinternal.h>
77863+
77864+int gr_proc_is_restricted(void)
77865+{
77866+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77867+ const struct cred *cred = current_cred();
77868+#endif
77869+
77870+#ifdef CONFIG_GRKERNSEC_PROC_USER
77871+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
77872+ return -EACCES;
77873+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77874+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
77875+ return -EACCES;
77876+#endif
77877+ return 0;
77878+}
77879diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
77880new file mode 100644
77881index 0000000..f7f29aa
77882--- /dev/null
77883+++ b/grsecurity/grsec_ptrace.c
77884@@ -0,0 +1,30 @@
77885+#include <linux/kernel.h>
77886+#include <linux/sched.h>
77887+#include <linux/grinternal.h>
77888+#include <linux/security.h>
77889+
77890+void
77891+gr_audit_ptrace(struct task_struct *task)
77892+{
77893+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77894+ if (grsec_enable_audit_ptrace)
77895+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
77896+#endif
77897+ return;
77898+}
77899+
77900+int
77901+gr_ptrace_readexec(struct file *file, int unsafe_flags)
77902+{
77903+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77904+ const struct dentry *dentry = file->f_path.dentry;
77905+ const struct vfsmount *mnt = file->f_path.mnt;
77906+
77907+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
77908+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
77909+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
77910+ return -EACCES;
77911+ }
77912+#endif
77913+ return 0;
77914+}
77915diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
77916new file mode 100644
77917index 0000000..3860c7e
77918--- /dev/null
77919+++ b/grsecurity/grsec_sig.c
77920@@ -0,0 +1,236 @@
77921+#include <linux/kernel.h>
77922+#include <linux/sched.h>
77923+#include <linux/fs.h>
77924+#include <linux/delay.h>
77925+#include <linux/grsecurity.h>
77926+#include <linux/grinternal.h>
77927+#include <linux/hardirq.h>
77928+
77929+char *signames[] = {
77930+ [SIGSEGV] = "Segmentation fault",
77931+ [SIGILL] = "Illegal instruction",
77932+ [SIGABRT] = "Abort",
77933+ [SIGBUS] = "Invalid alignment/Bus error"
77934+};
77935+
77936+void
77937+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
77938+{
77939+#ifdef CONFIG_GRKERNSEC_SIGNAL
77940+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
77941+ (sig == SIGABRT) || (sig == SIGBUS))) {
77942+ if (task_pid_nr(t) == task_pid_nr(current)) {
77943+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
77944+ } else {
77945+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
77946+ }
77947+ }
77948+#endif
77949+ return;
77950+}
77951+
77952+int
77953+gr_handle_signal(const struct task_struct *p, const int sig)
77954+{
77955+#ifdef CONFIG_GRKERNSEC
77956+ /* ignore the 0 signal for protected task checks */
77957+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
77958+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
77959+ return -EPERM;
77960+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
77961+ return -EPERM;
77962+ }
77963+#endif
77964+ return 0;
77965+}
77966+
77967+#ifdef CONFIG_GRKERNSEC
77968+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
77969+
77970+int gr_fake_force_sig(int sig, struct task_struct *t)
77971+{
77972+ unsigned long int flags;
77973+ int ret, blocked, ignored;
77974+ struct k_sigaction *action;
77975+
77976+ spin_lock_irqsave(&t->sighand->siglock, flags);
77977+ action = &t->sighand->action[sig-1];
77978+ ignored = action->sa.sa_handler == SIG_IGN;
77979+ blocked = sigismember(&t->blocked, sig);
77980+ if (blocked || ignored) {
77981+ action->sa.sa_handler = SIG_DFL;
77982+ if (blocked) {
77983+ sigdelset(&t->blocked, sig);
77984+ recalc_sigpending_and_wake(t);
77985+ }
77986+ }
77987+ if (action->sa.sa_handler == SIG_DFL)
77988+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
77989+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
77990+
77991+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
77992+
77993+ return ret;
77994+}
77995+#endif
77996+
77997+#define GR_USER_BAN_TIME (15 * 60)
77998+#define GR_DAEMON_BRUTE_TIME (30 * 60)
77999+
78000+void gr_handle_brute_attach(int dumpable)
78001+{
78002+#ifdef CONFIG_GRKERNSEC_BRUTE
78003+ struct task_struct *p = current;
78004+ kuid_t uid = GLOBAL_ROOT_UID;
78005+ int daemon = 0;
78006+
78007+ if (!grsec_enable_brute)
78008+ return;
78009+
78010+ rcu_read_lock();
78011+ read_lock(&tasklist_lock);
78012+ read_lock(&grsec_exec_file_lock);
78013+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78014+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78015+ p->real_parent->brute = 1;
78016+ daemon = 1;
78017+ } else {
78018+ const struct cred *cred = __task_cred(p), *cred2;
78019+ struct task_struct *tsk, *tsk2;
78020+
78021+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78022+ struct user_struct *user;
78023+
78024+ uid = cred->uid;
78025+
78026+ /* this is put upon execution past expiration */
78027+ user = find_user(uid);
78028+ if (user == NULL)
78029+ goto unlock;
78030+ user->suid_banned = 1;
78031+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78032+ if (user->suid_ban_expires == ~0UL)
78033+ user->suid_ban_expires--;
78034+
78035+ /* only kill other threads of the same binary, from the same user */
78036+ do_each_thread(tsk2, tsk) {
78037+ cred2 = __task_cred(tsk);
78038+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78039+ gr_fake_force_sig(SIGKILL, tsk);
78040+ } while_each_thread(tsk2, tsk);
78041+ }
78042+ }
78043+unlock:
78044+ read_unlock(&grsec_exec_file_lock);
78045+ read_unlock(&tasklist_lock);
78046+ rcu_read_unlock();
78047+
78048+ if (gr_is_global_nonroot(uid))
78049+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78050+ else if (daemon)
78051+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78052+
78053+#endif
78054+ return;
78055+}
78056+
78057+void gr_handle_brute_check(void)
78058+{
78059+#ifdef CONFIG_GRKERNSEC_BRUTE
78060+ struct task_struct *p = current;
78061+
78062+ if (unlikely(p->brute)) {
78063+ if (!grsec_enable_brute)
78064+ p->brute = 0;
78065+ else if (time_before(get_seconds(), p->brute_expires))
78066+ msleep(30 * 1000);
78067+ }
78068+#endif
78069+ return;
78070+}
78071+
78072+void gr_handle_kernel_exploit(void)
78073+{
78074+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78075+ const struct cred *cred;
78076+ struct task_struct *tsk, *tsk2;
78077+ struct user_struct *user;
78078+ kuid_t uid;
78079+
78080+ if (in_irq() || in_serving_softirq() || in_nmi())
78081+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78082+
78083+ uid = current_uid();
78084+
78085+ if (gr_is_global_root(uid))
78086+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78087+ else {
78088+ /* kill all the processes of this user, hold a reference
78089+ to their creds struct, and prevent them from creating
78090+ another process until system reset
78091+ */
78092+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78093+ GR_GLOBAL_UID(uid));
78094+ /* we intentionally leak this ref */
78095+ user = get_uid(current->cred->user);
78096+ if (user)
78097+ user->kernel_banned = 1;
78098+
78099+ /* kill all processes of this user */
78100+ read_lock(&tasklist_lock);
78101+ do_each_thread(tsk2, tsk) {
78102+ cred = __task_cred(tsk);
78103+ if (uid_eq(cred->uid, uid))
78104+ gr_fake_force_sig(SIGKILL, tsk);
78105+ } while_each_thread(tsk2, tsk);
78106+ read_unlock(&tasklist_lock);
78107+ }
78108+#endif
78109+}
78110+
78111+#ifdef CONFIG_GRKERNSEC_BRUTE
78112+static bool suid_ban_expired(struct user_struct *user)
78113+{
78114+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78115+ user->suid_banned = 0;
78116+ user->suid_ban_expires = 0;
78117+ free_uid(user);
78118+ return true;
78119+ }
78120+
78121+ return false;
78122+}
78123+#endif
78124+
78125+int gr_process_kernel_exec_ban(void)
78126+{
78127+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78128+ if (unlikely(current->cred->user->kernel_banned))
78129+ return -EPERM;
78130+#endif
78131+ return 0;
78132+}
78133+
78134+int gr_process_kernel_setuid_ban(struct user_struct *user)
78135+{
78136+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78137+ if (unlikely(user->kernel_banned))
78138+ gr_fake_force_sig(SIGKILL, current);
78139+#endif
78140+ return 0;
78141+}
78142+
78143+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78144+{
78145+#ifdef CONFIG_GRKERNSEC_BRUTE
78146+ struct user_struct *user = current->cred->user;
78147+ if (unlikely(user->suid_banned)) {
78148+ if (suid_ban_expired(user))
78149+ return 0;
78150+ /* disallow execution of suid binaries only */
78151+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78152+ return -EPERM;
78153+ }
78154+#endif
78155+ return 0;
78156+}
78157diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78158new file mode 100644
78159index 0000000..e3650b6
78160--- /dev/null
78161+++ b/grsecurity/grsec_sock.c
78162@@ -0,0 +1,244 @@
78163+#include <linux/kernel.h>
78164+#include <linux/module.h>
78165+#include <linux/sched.h>
78166+#include <linux/file.h>
78167+#include <linux/net.h>
78168+#include <linux/in.h>
78169+#include <linux/ip.h>
78170+#include <net/sock.h>
78171+#include <net/inet_sock.h>
78172+#include <linux/grsecurity.h>
78173+#include <linux/grinternal.h>
78174+#include <linux/gracl.h>
78175+
78176+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78177+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78178+
78179+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78180+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78181+
78182+#ifdef CONFIG_UNIX_MODULE
78183+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78184+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78185+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78186+EXPORT_SYMBOL_GPL(gr_handle_create);
78187+#endif
78188+
78189+#ifdef CONFIG_GRKERNSEC
78190+#define gr_conn_table_size 32749
78191+struct conn_table_entry {
78192+ struct conn_table_entry *next;
78193+ struct signal_struct *sig;
78194+};
78195+
78196+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78197+DEFINE_SPINLOCK(gr_conn_table_lock);
78198+
78199+extern const char * gr_socktype_to_name(unsigned char type);
78200+extern const char * gr_proto_to_name(unsigned char proto);
78201+extern const char * gr_sockfamily_to_name(unsigned char family);
78202+
78203+static __inline__ int
78204+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78205+{
78206+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78207+}
78208+
78209+static __inline__ int
78210+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78211+ __u16 sport, __u16 dport)
78212+{
78213+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78214+ sig->gr_sport == sport && sig->gr_dport == dport))
78215+ return 1;
78216+ else
78217+ return 0;
78218+}
78219+
78220+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78221+{
78222+ struct conn_table_entry **match;
78223+ unsigned int index;
78224+
78225+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78226+ sig->gr_sport, sig->gr_dport,
78227+ gr_conn_table_size);
78228+
78229+ newent->sig = sig;
78230+
78231+ match = &gr_conn_table[index];
78232+ newent->next = *match;
78233+ *match = newent;
78234+
78235+ return;
78236+}
78237+
78238+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78239+{
78240+ struct conn_table_entry *match, *last = NULL;
78241+ unsigned int index;
78242+
78243+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78244+ sig->gr_sport, sig->gr_dport,
78245+ gr_conn_table_size);
78246+
78247+ match = gr_conn_table[index];
78248+ while (match && !conn_match(match->sig,
78249+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78250+ sig->gr_dport)) {
78251+ last = match;
78252+ match = match->next;
78253+ }
78254+
78255+ if (match) {
78256+ if (last)
78257+ last->next = match->next;
78258+ else
78259+ gr_conn_table[index] = NULL;
78260+ kfree(match);
78261+ }
78262+
78263+ return;
78264+}
78265+
78266+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78267+ __u16 sport, __u16 dport)
78268+{
78269+ struct conn_table_entry *match;
78270+ unsigned int index;
78271+
78272+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78273+
78274+ match = gr_conn_table[index];
78275+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78276+ match = match->next;
78277+
78278+ if (match)
78279+ return match->sig;
78280+ else
78281+ return NULL;
78282+}
78283+
78284+#endif
78285+
78286+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78287+{
78288+#ifdef CONFIG_GRKERNSEC
78289+ struct signal_struct *sig = current->signal;
78290+ struct conn_table_entry *newent;
78291+
78292+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78293+ if (newent == NULL)
78294+ return;
78295+ /* no bh lock needed since we are called with bh disabled */
78296+ spin_lock(&gr_conn_table_lock);
78297+ gr_del_task_from_ip_table_nolock(sig);
78298+ sig->gr_saddr = inet->inet_rcv_saddr;
78299+ sig->gr_daddr = inet->inet_daddr;
78300+ sig->gr_sport = inet->inet_sport;
78301+ sig->gr_dport = inet->inet_dport;
78302+ gr_add_to_task_ip_table_nolock(sig, newent);
78303+ spin_unlock(&gr_conn_table_lock);
78304+#endif
78305+ return;
78306+}
78307+
78308+void gr_del_task_from_ip_table(struct task_struct *task)
78309+{
78310+#ifdef CONFIG_GRKERNSEC
78311+ spin_lock_bh(&gr_conn_table_lock);
78312+ gr_del_task_from_ip_table_nolock(task->signal);
78313+ spin_unlock_bh(&gr_conn_table_lock);
78314+#endif
78315+ return;
78316+}
78317+
78318+void
78319+gr_attach_curr_ip(const struct sock *sk)
78320+{
78321+#ifdef CONFIG_GRKERNSEC
78322+ struct signal_struct *p, *set;
78323+ const struct inet_sock *inet = inet_sk(sk);
78324+
78325+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78326+ return;
78327+
78328+ set = current->signal;
78329+
78330+ spin_lock_bh(&gr_conn_table_lock);
78331+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78332+ inet->inet_dport, inet->inet_sport);
78333+ if (unlikely(p != NULL)) {
78334+ set->curr_ip = p->curr_ip;
78335+ set->used_accept = 1;
78336+ gr_del_task_from_ip_table_nolock(p);
78337+ spin_unlock_bh(&gr_conn_table_lock);
78338+ return;
78339+ }
78340+ spin_unlock_bh(&gr_conn_table_lock);
78341+
78342+ set->curr_ip = inet->inet_daddr;
78343+ set->used_accept = 1;
78344+#endif
78345+ return;
78346+}
78347+
78348+int
78349+gr_handle_sock_all(const int family, const int type, const int protocol)
78350+{
78351+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78352+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
78353+ (family != AF_UNIX)) {
78354+ if (family == AF_INET)
78355+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
78356+ else
78357+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
78358+ return -EACCES;
78359+ }
78360+#endif
78361+ return 0;
78362+}
78363+
78364+int
78365+gr_handle_sock_server(const struct sockaddr *sck)
78366+{
78367+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78368+ if (grsec_enable_socket_server &&
78369+ in_group_p(grsec_socket_server_gid) &&
78370+ sck && (sck->sa_family != AF_UNIX) &&
78371+ (sck->sa_family != AF_LOCAL)) {
78372+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78373+ return -EACCES;
78374+ }
78375+#endif
78376+ return 0;
78377+}
78378+
78379+int
78380+gr_handle_sock_server_other(const struct sock *sck)
78381+{
78382+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78383+ if (grsec_enable_socket_server &&
78384+ in_group_p(grsec_socket_server_gid) &&
78385+ sck && (sck->sk_family != AF_UNIX) &&
78386+ (sck->sk_family != AF_LOCAL)) {
78387+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78388+ return -EACCES;
78389+ }
78390+#endif
78391+ return 0;
78392+}
78393+
78394+int
78395+gr_handle_sock_client(const struct sockaddr *sck)
78396+{
78397+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78398+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
78399+ sck && (sck->sa_family != AF_UNIX) &&
78400+ (sck->sa_family != AF_LOCAL)) {
78401+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
78402+ return -EACCES;
78403+ }
78404+#endif
78405+ return 0;
78406+}
78407diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
78408new file mode 100644
78409index 0000000..cce889e
78410--- /dev/null
78411+++ b/grsecurity/grsec_sysctl.c
78412@@ -0,0 +1,488 @@
78413+#include <linux/kernel.h>
78414+#include <linux/sched.h>
78415+#include <linux/sysctl.h>
78416+#include <linux/grsecurity.h>
78417+#include <linux/grinternal.h>
78418+
78419+int
78420+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
78421+{
78422+#ifdef CONFIG_GRKERNSEC_SYSCTL
78423+ if (dirname == NULL || name == NULL)
78424+ return 0;
78425+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
78426+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
78427+ return -EACCES;
78428+ }
78429+#endif
78430+ return 0;
78431+}
78432+
78433+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
78434+static int __maybe_unused __read_only one = 1;
78435+#endif
78436+
78437+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
78438+ defined(CONFIG_GRKERNSEC_DENYUSB)
78439+struct ctl_table grsecurity_table[] = {
78440+#ifdef CONFIG_GRKERNSEC_SYSCTL
78441+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
78442+#ifdef CONFIG_GRKERNSEC_IO
78443+ {
78444+ .procname = "disable_priv_io",
78445+ .data = &grsec_disable_privio,
78446+ .maxlen = sizeof(int),
78447+ .mode = 0600,
78448+ .proc_handler = &proc_dointvec,
78449+ },
78450+#endif
78451+#endif
78452+#ifdef CONFIG_GRKERNSEC_LINK
78453+ {
78454+ .procname = "linking_restrictions",
78455+ .data = &grsec_enable_link,
78456+ .maxlen = sizeof(int),
78457+ .mode = 0600,
78458+ .proc_handler = &proc_dointvec,
78459+ },
78460+#endif
78461+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78462+ {
78463+ .procname = "enforce_symlinksifowner",
78464+ .data = &grsec_enable_symlinkown,
78465+ .maxlen = sizeof(int),
78466+ .mode = 0600,
78467+ .proc_handler = &proc_dointvec,
78468+ },
78469+ {
78470+ .procname = "symlinkown_gid",
78471+ .data = &grsec_symlinkown_gid,
78472+ .maxlen = sizeof(int),
78473+ .mode = 0600,
78474+ .proc_handler = &proc_dointvec,
78475+ },
78476+#endif
78477+#ifdef CONFIG_GRKERNSEC_BRUTE
78478+ {
78479+ .procname = "deter_bruteforce",
78480+ .data = &grsec_enable_brute,
78481+ .maxlen = sizeof(int),
78482+ .mode = 0600,
78483+ .proc_handler = &proc_dointvec,
78484+ },
78485+#endif
78486+#ifdef CONFIG_GRKERNSEC_FIFO
78487+ {
78488+ .procname = "fifo_restrictions",
78489+ .data = &grsec_enable_fifo,
78490+ .maxlen = sizeof(int),
78491+ .mode = 0600,
78492+ .proc_handler = &proc_dointvec,
78493+ },
78494+#endif
78495+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78496+ {
78497+ .procname = "ptrace_readexec",
78498+ .data = &grsec_enable_ptrace_readexec,
78499+ .maxlen = sizeof(int),
78500+ .mode = 0600,
78501+ .proc_handler = &proc_dointvec,
78502+ },
78503+#endif
78504+#ifdef CONFIG_GRKERNSEC_SETXID
78505+ {
78506+ .procname = "consistent_setxid",
78507+ .data = &grsec_enable_setxid,
78508+ .maxlen = sizeof(int),
78509+ .mode = 0600,
78510+ .proc_handler = &proc_dointvec,
78511+ },
78512+#endif
78513+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78514+ {
78515+ .procname = "ip_blackhole",
78516+ .data = &grsec_enable_blackhole,
78517+ .maxlen = sizeof(int),
78518+ .mode = 0600,
78519+ .proc_handler = &proc_dointvec,
78520+ },
78521+ {
78522+ .procname = "lastack_retries",
78523+ .data = &grsec_lastack_retries,
78524+ .maxlen = sizeof(int),
78525+ .mode = 0600,
78526+ .proc_handler = &proc_dointvec,
78527+ },
78528+#endif
78529+#ifdef CONFIG_GRKERNSEC_EXECLOG
78530+ {
78531+ .procname = "exec_logging",
78532+ .data = &grsec_enable_execlog,
78533+ .maxlen = sizeof(int),
78534+ .mode = 0600,
78535+ .proc_handler = &proc_dointvec,
78536+ },
78537+#endif
78538+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78539+ {
78540+ .procname = "rwxmap_logging",
78541+ .data = &grsec_enable_log_rwxmaps,
78542+ .maxlen = sizeof(int),
78543+ .mode = 0600,
78544+ .proc_handler = &proc_dointvec,
78545+ },
78546+#endif
78547+#ifdef CONFIG_GRKERNSEC_SIGNAL
78548+ {
78549+ .procname = "signal_logging",
78550+ .data = &grsec_enable_signal,
78551+ .maxlen = sizeof(int),
78552+ .mode = 0600,
78553+ .proc_handler = &proc_dointvec,
78554+ },
78555+#endif
78556+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78557+ {
78558+ .procname = "forkfail_logging",
78559+ .data = &grsec_enable_forkfail,
78560+ .maxlen = sizeof(int),
78561+ .mode = 0600,
78562+ .proc_handler = &proc_dointvec,
78563+ },
78564+#endif
78565+#ifdef CONFIG_GRKERNSEC_TIME
78566+ {
78567+ .procname = "timechange_logging",
78568+ .data = &grsec_enable_time,
78569+ .maxlen = sizeof(int),
78570+ .mode = 0600,
78571+ .proc_handler = &proc_dointvec,
78572+ },
78573+#endif
78574+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78575+ {
78576+ .procname = "chroot_deny_shmat",
78577+ .data = &grsec_enable_chroot_shmat,
78578+ .maxlen = sizeof(int),
78579+ .mode = 0600,
78580+ .proc_handler = &proc_dointvec,
78581+ },
78582+#endif
78583+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78584+ {
78585+ .procname = "chroot_deny_unix",
78586+ .data = &grsec_enable_chroot_unix,
78587+ .maxlen = sizeof(int),
78588+ .mode = 0600,
78589+ .proc_handler = &proc_dointvec,
78590+ },
78591+#endif
78592+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78593+ {
78594+ .procname = "chroot_deny_mount",
78595+ .data = &grsec_enable_chroot_mount,
78596+ .maxlen = sizeof(int),
78597+ .mode = 0600,
78598+ .proc_handler = &proc_dointvec,
78599+ },
78600+#endif
78601+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78602+ {
78603+ .procname = "chroot_deny_fchdir",
78604+ .data = &grsec_enable_chroot_fchdir,
78605+ .maxlen = sizeof(int),
78606+ .mode = 0600,
78607+ .proc_handler = &proc_dointvec,
78608+ },
78609+#endif
78610+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78611+ {
78612+ .procname = "chroot_deny_chroot",
78613+ .data = &grsec_enable_chroot_double,
78614+ .maxlen = sizeof(int),
78615+ .mode = 0600,
78616+ .proc_handler = &proc_dointvec,
78617+ },
78618+#endif
78619+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78620+ {
78621+ .procname = "chroot_deny_pivot",
78622+ .data = &grsec_enable_chroot_pivot,
78623+ .maxlen = sizeof(int),
78624+ .mode = 0600,
78625+ .proc_handler = &proc_dointvec,
78626+ },
78627+#endif
78628+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78629+ {
78630+ .procname = "chroot_enforce_chdir",
78631+ .data = &grsec_enable_chroot_chdir,
78632+ .maxlen = sizeof(int),
78633+ .mode = 0600,
78634+ .proc_handler = &proc_dointvec,
78635+ },
78636+#endif
78637+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78638+ {
78639+ .procname = "chroot_deny_chmod",
78640+ .data = &grsec_enable_chroot_chmod,
78641+ .maxlen = sizeof(int),
78642+ .mode = 0600,
78643+ .proc_handler = &proc_dointvec,
78644+ },
78645+#endif
78646+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78647+ {
78648+ .procname = "chroot_deny_mknod",
78649+ .data = &grsec_enable_chroot_mknod,
78650+ .maxlen = sizeof(int),
78651+ .mode = 0600,
78652+ .proc_handler = &proc_dointvec,
78653+ },
78654+#endif
78655+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78656+ {
78657+ .procname = "chroot_restrict_nice",
78658+ .data = &grsec_enable_chroot_nice,
78659+ .maxlen = sizeof(int),
78660+ .mode = 0600,
78661+ .proc_handler = &proc_dointvec,
78662+ },
78663+#endif
78664+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78665+ {
78666+ .procname = "chroot_execlog",
78667+ .data = &grsec_enable_chroot_execlog,
78668+ .maxlen = sizeof(int),
78669+ .mode = 0600,
78670+ .proc_handler = &proc_dointvec,
78671+ },
78672+#endif
78673+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78674+ {
78675+ .procname = "chroot_caps",
78676+ .data = &grsec_enable_chroot_caps,
78677+ .maxlen = sizeof(int),
78678+ .mode = 0600,
78679+ .proc_handler = &proc_dointvec,
78680+ },
78681+#endif
78682+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
78683+ {
78684+ .procname = "chroot_deny_bad_rename",
78685+ .data = &grsec_enable_chroot_rename,
78686+ .maxlen = sizeof(int),
78687+ .mode = 0600,
78688+ .proc_handler = &proc_dointvec,
78689+ },
78690+#endif
78691+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78692+ {
78693+ .procname = "chroot_deny_sysctl",
78694+ .data = &grsec_enable_chroot_sysctl,
78695+ .maxlen = sizeof(int),
78696+ .mode = 0600,
78697+ .proc_handler = &proc_dointvec,
78698+ },
78699+#endif
78700+#ifdef CONFIG_GRKERNSEC_TPE
78701+ {
78702+ .procname = "tpe",
78703+ .data = &grsec_enable_tpe,
78704+ .maxlen = sizeof(int),
78705+ .mode = 0600,
78706+ .proc_handler = &proc_dointvec,
78707+ },
78708+ {
78709+ .procname = "tpe_gid",
78710+ .data = &grsec_tpe_gid,
78711+ .maxlen = sizeof(int),
78712+ .mode = 0600,
78713+ .proc_handler = &proc_dointvec,
78714+ },
78715+#endif
78716+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78717+ {
78718+ .procname = "tpe_invert",
78719+ .data = &grsec_enable_tpe_invert,
78720+ .maxlen = sizeof(int),
78721+ .mode = 0600,
78722+ .proc_handler = &proc_dointvec,
78723+ },
78724+#endif
78725+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78726+ {
78727+ .procname = "tpe_restrict_all",
78728+ .data = &grsec_enable_tpe_all,
78729+ .maxlen = sizeof(int),
78730+ .mode = 0600,
78731+ .proc_handler = &proc_dointvec,
78732+ },
78733+#endif
78734+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78735+ {
78736+ .procname = "socket_all",
78737+ .data = &grsec_enable_socket_all,
78738+ .maxlen = sizeof(int),
78739+ .mode = 0600,
78740+ .proc_handler = &proc_dointvec,
78741+ },
78742+ {
78743+ .procname = "socket_all_gid",
78744+ .data = &grsec_socket_all_gid,
78745+ .maxlen = sizeof(int),
78746+ .mode = 0600,
78747+ .proc_handler = &proc_dointvec,
78748+ },
78749+#endif
78750+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78751+ {
78752+ .procname = "socket_client",
78753+ .data = &grsec_enable_socket_client,
78754+ .maxlen = sizeof(int),
78755+ .mode = 0600,
78756+ .proc_handler = &proc_dointvec,
78757+ },
78758+ {
78759+ .procname = "socket_client_gid",
78760+ .data = &grsec_socket_client_gid,
78761+ .maxlen = sizeof(int),
78762+ .mode = 0600,
78763+ .proc_handler = &proc_dointvec,
78764+ },
78765+#endif
78766+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78767+ {
78768+ .procname = "socket_server",
78769+ .data = &grsec_enable_socket_server,
78770+ .maxlen = sizeof(int),
78771+ .mode = 0600,
78772+ .proc_handler = &proc_dointvec,
78773+ },
78774+ {
78775+ .procname = "socket_server_gid",
78776+ .data = &grsec_socket_server_gid,
78777+ .maxlen = sizeof(int),
78778+ .mode = 0600,
78779+ .proc_handler = &proc_dointvec,
78780+ },
78781+#endif
78782+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78783+ {
78784+ .procname = "audit_group",
78785+ .data = &grsec_enable_group,
78786+ .maxlen = sizeof(int),
78787+ .mode = 0600,
78788+ .proc_handler = &proc_dointvec,
78789+ },
78790+ {
78791+ .procname = "audit_gid",
78792+ .data = &grsec_audit_gid,
78793+ .maxlen = sizeof(int),
78794+ .mode = 0600,
78795+ .proc_handler = &proc_dointvec,
78796+ },
78797+#endif
78798+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78799+ {
78800+ .procname = "audit_chdir",
78801+ .data = &grsec_enable_chdir,
78802+ .maxlen = sizeof(int),
78803+ .mode = 0600,
78804+ .proc_handler = &proc_dointvec,
78805+ },
78806+#endif
78807+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78808+ {
78809+ .procname = "audit_mount",
78810+ .data = &grsec_enable_mount,
78811+ .maxlen = sizeof(int),
78812+ .mode = 0600,
78813+ .proc_handler = &proc_dointvec,
78814+ },
78815+#endif
78816+#ifdef CONFIG_GRKERNSEC_DMESG
78817+ {
78818+ .procname = "dmesg",
78819+ .data = &grsec_enable_dmesg,
78820+ .maxlen = sizeof(int),
78821+ .mode = 0600,
78822+ .proc_handler = &proc_dointvec,
78823+ },
78824+#endif
78825+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78826+ {
78827+ .procname = "chroot_findtask",
78828+ .data = &grsec_enable_chroot_findtask,
78829+ .maxlen = sizeof(int),
78830+ .mode = 0600,
78831+ .proc_handler = &proc_dointvec,
78832+ },
78833+#endif
78834+#ifdef CONFIG_GRKERNSEC_RESLOG
78835+ {
78836+ .procname = "resource_logging",
78837+ .data = &grsec_resource_logging,
78838+ .maxlen = sizeof(int),
78839+ .mode = 0600,
78840+ .proc_handler = &proc_dointvec,
78841+ },
78842+#endif
78843+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78844+ {
78845+ .procname = "audit_ptrace",
78846+ .data = &grsec_enable_audit_ptrace,
78847+ .maxlen = sizeof(int),
78848+ .mode = 0600,
78849+ .proc_handler = &proc_dointvec,
78850+ },
78851+#endif
78852+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78853+ {
78854+ .procname = "harden_ptrace",
78855+ .data = &grsec_enable_harden_ptrace,
78856+ .maxlen = sizeof(int),
78857+ .mode = 0600,
78858+ .proc_handler = &proc_dointvec,
78859+ },
78860+#endif
78861+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78862+ {
78863+ .procname = "harden_ipc",
78864+ .data = &grsec_enable_harden_ipc,
78865+ .maxlen = sizeof(int),
78866+ .mode = 0600,
78867+ .proc_handler = &proc_dointvec,
78868+ },
78869+#endif
78870+ {
78871+ .procname = "grsec_lock",
78872+ .data = &grsec_lock,
78873+ .maxlen = sizeof(int),
78874+ .mode = 0600,
78875+ .proc_handler = &proc_dointvec,
78876+ },
78877+#endif
78878+#ifdef CONFIG_GRKERNSEC_ROFS
78879+ {
78880+ .procname = "romount_protect",
78881+ .data = &grsec_enable_rofs,
78882+ .maxlen = sizeof(int),
78883+ .mode = 0600,
78884+ .proc_handler = &proc_dointvec_minmax,
78885+ .extra1 = &one,
78886+ .extra2 = &one,
78887+ },
78888+#endif
78889+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
78890+ {
78891+ .procname = "deny_new_usb",
78892+ .data = &grsec_deny_new_usb,
78893+ .maxlen = sizeof(int),
78894+ .mode = 0600,
78895+ .proc_handler = &proc_dointvec,
78896+ },
78897+#endif
78898+ { }
78899+};
78900+#endif
78901diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
78902new file mode 100644
78903index 0000000..61b514e
78904--- /dev/null
78905+++ b/grsecurity/grsec_time.c
78906@@ -0,0 +1,16 @@
78907+#include <linux/kernel.h>
78908+#include <linux/sched.h>
78909+#include <linux/grinternal.h>
78910+#include <linux/module.h>
78911+
78912+void
78913+gr_log_timechange(void)
78914+{
78915+#ifdef CONFIG_GRKERNSEC_TIME
78916+ if (grsec_enable_time)
78917+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
78918+#endif
78919+ return;
78920+}
78921+
78922+EXPORT_SYMBOL_GPL(gr_log_timechange);
78923diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
78924new file mode 100644
78925index 0000000..d1953de
78926--- /dev/null
78927+++ b/grsecurity/grsec_tpe.c
78928@@ -0,0 +1,78 @@
78929+#include <linux/kernel.h>
78930+#include <linux/sched.h>
78931+#include <linux/file.h>
78932+#include <linux/fs.h>
78933+#include <linux/grinternal.h>
78934+
78935+extern int gr_acl_tpe_check(void);
78936+
78937+int
78938+gr_tpe_allow(const struct file *file)
78939+{
78940+#ifdef CONFIG_GRKERNSEC
78941+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
78942+ struct inode *file_inode = file->f_path.dentry->d_inode;
78943+ const struct cred *cred = current_cred();
78944+ char *msg = NULL;
78945+ char *msg2 = NULL;
78946+
78947+ // never restrict root
78948+ if (gr_is_global_root(cred->uid))
78949+ return 1;
78950+
78951+ if (grsec_enable_tpe) {
78952+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78953+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
78954+ msg = "not being in trusted group";
78955+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
78956+ msg = "being in untrusted group";
78957+#else
78958+ if (in_group_p(grsec_tpe_gid))
78959+ msg = "being in untrusted group";
78960+#endif
78961+ }
78962+ if (!msg && gr_acl_tpe_check())
78963+ msg = "being in untrusted role";
78964+
78965+ // not in any affected group/role
78966+ if (!msg)
78967+ goto next_check;
78968+
78969+ if (gr_is_global_nonroot(inode->i_uid))
78970+ msg2 = "file in non-root-owned directory";
78971+ else if (inode->i_mode & S_IWOTH)
78972+ msg2 = "file in world-writable directory";
78973+ else if (inode->i_mode & S_IWGRP)
78974+ msg2 = "file in group-writable directory";
78975+ else if (file_inode->i_mode & S_IWOTH)
78976+ msg2 = "file is world-writable";
78977+
78978+ if (msg && msg2) {
78979+ char fullmsg[70] = {0};
78980+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
78981+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
78982+ return 0;
78983+ }
78984+ msg = NULL;
78985+next_check:
78986+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78987+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
78988+ return 1;
78989+
78990+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
78991+ msg = "directory not owned by user";
78992+ else if (inode->i_mode & S_IWOTH)
78993+ msg = "file in world-writable directory";
78994+ else if (inode->i_mode & S_IWGRP)
78995+ msg = "file in group-writable directory";
78996+ else if (file_inode->i_mode & S_IWOTH)
78997+ msg = "file is world-writable";
78998+
78999+ if (msg) {
79000+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79001+ return 0;
79002+ }
79003+#endif
79004+#endif
79005+ return 1;
79006+}
79007diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79008new file mode 100644
79009index 0000000..ae02d8e
79010--- /dev/null
79011+++ b/grsecurity/grsec_usb.c
79012@@ -0,0 +1,15 @@
79013+#include <linux/kernel.h>
79014+#include <linux/grinternal.h>
79015+#include <linux/module.h>
79016+
79017+int gr_handle_new_usb(void)
79018+{
79019+#ifdef CONFIG_GRKERNSEC_DENYUSB
79020+ if (grsec_deny_new_usb) {
79021+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79022+ return 1;
79023+ }
79024+#endif
79025+ return 0;
79026+}
79027+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79028diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79029new file mode 100644
79030index 0000000..158b330
79031--- /dev/null
79032+++ b/grsecurity/grsum.c
79033@@ -0,0 +1,64 @@
79034+#include <linux/err.h>
79035+#include <linux/kernel.h>
79036+#include <linux/sched.h>
79037+#include <linux/mm.h>
79038+#include <linux/scatterlist.h>
79039+#include <linux/crypto.h>
79040+#include <linux/gracl.h>
79041+
79042+
79043+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79044+#error "crypto and sha256 must be built into the kernel"
79045+#endif
79046+
79047+int
79048+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79049+{
79050+ struct crypto_hash *tfm;
79051+ struct hash_desc desc;
79052+ struct scatterlist sg[2];
79053+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79054+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79055+ unsigned long *sumptr = (unsigned long *)sum;
79056+ int cryptres;
79057+ int retval = 1;
79058+ volatile int mismatched = 0;
79059+ volatile int dummy = 0;
79060+ unsigned int i;
79061+
79062+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79063+ if (IS_ERR(tfm)) {
79064+ /* should never happen, since sha256 should be built in */
79065+ memset(entry->pw, 0, GR_PW_LEN);
79066+ return 1;
79067+ }
79068+
79069+ sg_init_table(sg, 2);
79070+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79071+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79072+
79073+ desc.tfm = tfm;
79074+ desc.flags = 0;
79075+
79076+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79077+ temp_sum);
79078+
79079+ memset(entry->pw, 0, GR_PW_LEN);
79080+
79081+ if (cryptres)
79082+ goto out;
79083+
79084+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79085+ if (sumptr[i] != tmpsumptr[i])
79086+ mismatched = 1;
79087+ else
79088+ dummy = 1; // waste a cycle
79089+
79090+ if (!mismatched)
79091+ retval = dummy - 1;
79092+
79093+out:
79094+ crypto_free_hash(tfm);
79095+
79096+ return retval;
79097+}
79098diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79099index 77ff547..181834f 100644
79100--- a/include/asm-generic/4level-fixup.h
79101+++ b/include/asm-generic/4level-fixup.h
79102@@ -13,8 +13,10 @@
79103 #define pmd_alloc(mm, pud, address) \
79104 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79105 NULL: pmd_offset(pud, address))
79106+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79107
79108 #define pud_alloc(mm, pgd, address) (pgd)
79109+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79110 #define pud_offset(pgd, start) (pgd)
79111 #define pud_none(pud) 0
79112 #define pud_bad(pud) 0
79113diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79114index b7babf0..1e4b4f1 100644
79115--- a/include/asm-generic/atomic-long.h
79116+++ b/include/asm-generic/atomic-long.h
79117@@ -22,6 +22,12 @@
79118
79119 typedef atomic64_t atomic_long_t;
79120
79121+#ifdef CONFIG_PAX_REFCOUNT
79122+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79123+#else
79124+typedef atomic64_t atomic_long_unchecked_t;
79125+#endif
79126+
79127 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79128
79129 static inline long atomic_long_read(atomic_long_t *l)
79130@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79131 return (long)atomic64_read(v);
79132 }
79133
79134+#ifdef CONFIG_PAX_REFCOUNT
79135+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79136+{
79137+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79138+
79139+ return (long)atomic64_read_unchecked(v);
79140+}
79141+#endif
79142+
79143 static inline void atomic_long_set(atomic_long_t *l, long i)
79144 {
79145 atomic64_t *v = (atomic64_t *)l;
79146@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79147 atomic64_set(v, i);
79148 }
79149
79150+#ifdef CONFIG_PAX_REFCOUNT
79151+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79152+{
79153+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79154+
79155+ atomic64_set_unchecked(v, i);
79156+}
79157+#endif
79158+
79159 static inline void atomic_long_inc(atomic_long_t *l)
79160 {
79161 atomic64_t *v = (atomic64_t *)l;
79162@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79163 atomic64_inc(v);
79164 }
79165
79166+#ifdef CONFIG_PAX_REFCOUNT
79167+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79168+{
79169+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79170+
79171+ atomic64_inc_unchecked(v);
79172+}
79173+#endif
79174+
79175 static inline void atomic_long_dec(atomic_long_t *l)
79176 {
79177 atomic64_t *v = (atomic64_t *)l;
79178@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79179 atomic64_dec(v);
79180 }
79181
79182+#ifdef CONFIG_PAX_REFCOUNT
79183+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79184+{
79185+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79186+
79187+ atomic64_dec_unchecked(v);
79188+}
79189+#endif
79190+
79191 static inline void atomic_long_add(long i, atomic_long_t *l)
79192 {
79193 atomic64_t *v = (atomic64_t *)l;
79194@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79195 atomic64_add(i, v);
79196 }
79197
79198+#ifdef CONFIG_PAX_REFCOUNT
79199+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79200+{
79201+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79202+
79203+ atomic64_add_unchecked(i, v);
79204+}
79205+#endif
79206+
79207 static inline void atomic_long_sub(long i, atomic_long_t *l)
79208 {
79209 atomic64_t *v = (atomic64_t *)l;
79210@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79211 atomic64_sub(i, v);
79212 }
79213
79214+#ifdef CONFIG_PAX_REFCOUNT
79215+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79216+{
79217+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79218+
79219+ atomic64_sub_unchecked(i, v);
79220+}
79221+#endif
79222+
79223 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79224 {
79225 atomic64_t *v = (atomic64_t *)l;
79226@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79227 return atomic64_add_negative(i, v);
79228 }
79229
79230-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79231+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79232 {
79233 atomic64_t *v = (atomic64_t *)l;
79234
79235 return (long)atomic64_add_return(i, v);
79236 }
79237
79238+#ifdef CONFIG_PAX_REFCOUNT
79239+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79240+{
79241+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79242+
79243+ return (long)atomic64_add_return_unchecked(i, v);
79244+}
79245+#endif
79246+
79247 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79248 {
79249 atomic64_t *v = (atomic64_t *)l;
79250@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79251 return (long)atomic64_inc_return(v);
79252 }
79253
79254+#ifdef CONFIG_PAX_REFCOUNT
79255+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79256+{
79257+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79258+
79259+ return (long)atomic64_inc_return_unchecked(v);
79260+}
79261+#endif
79262+
79263 static inline long atomic_long_dec_return(atomic_long_t *l)
79264 {
79265 atomic64_t *v = (atomic64_t *)l;
79266@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79267
79268 typedef atomic_t atomic_long_t;
79269
79270+#ifdef CONFIG_PAX_REFCOUNT
79271+typedef atomic_unchecked_t atomic_long_unchecked_t;
79272+#else
79273+typedef atomic_t atomic_long_unchecked_t;
79274+#endif
79275+
79276 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79277 static inline long atomic_long_read(atomic_long_t *l)
79278 {
79279@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79280 return (long)atomic_read(v);
79281 }
79282
79283+#ifdef CONFIG_PAX_REFCOUNT
79284+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79285+{
79286+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79287+
79288+ return (long)atomic_read_unchecked(v);
79289+}
79290+#endif
79291+
79292 static inline void atomic_long_set(atomic_long_t *l, long i)
79293 {
79294 atomic_t *v = (atomic_t *)l;
79295@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79296 atomic_set(v, i);
79297 }
79298
79299+#ifdef CONFIG_PAX_REFCOUNT
79300+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79301+{
79302+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79303+
79304+ atomic_set_unchecked(v, i);
79305+}
79306+#endif
79307+
79308 static inline void atomic_long_inc(atomic_long_t *l)
79309 {
79310 atomic_t *v = (atomic_t *)l;
79311@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79312 atomic_inc(v);
79313 }
79314
79315+#ifdef CONFIG_PAX_REFCOUNT
79316+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79317+{
79318+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79319+
79320+ atomic_inc_unchecked(v);
79321+}
79322+#endif
79323+
79324 static inline void atomic_long_dec(atomic_long_t *l)
79325 {
79326 atomic_t *v = (atomic_t *)l;
79327@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79328 atomic_dec(v);
79329 }
79330
79331+#ifdef CONFIG_PAX_REFCOUNT
79332+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79333+{
79334+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79335+
79336+ atomic_dec_unchecked(v);
79337+}
79338+#endif
79339+
79340 static inline void atomic_long_add(long i, atomic_long_t *l)
79341 {
79342 atomic_t *v = (atomic_t *)l;
79343@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79344 atomic_add(i, v);
79345 }
79346
79347+#ifdef CONFIG_PAX_REFCOUNT
79348+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79349+{
79350+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79351+
79352+ atomic_add_unchecked(i, v);
79353+}
79354+#endif
79355+
79356 static inline void atomic_long_sub(long i, atomic_long_t *l)
79357 {
79358 atomic_t *v = (atomic_t *)l;
79359@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79360 atomic_sub(i, v);
79361 }
79362
79363+#ifdef CONFIG_PAX_REFCOUNT
79364+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79365+{
79366+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79367+
79368+ atomic_sub_unchecked(i, v);
79369+}
79370+#endif
79371+
79372 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79373 {
79374 atomic_t *v = (atomic_t *)l;
79375@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79376 return atomic_add_negative(i, v);
79377 }
79378
79379-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79380+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79381 {
79382 atomic_t *v = (atomic_t *)l;
79383
79384 return (long)atomic_add_return(i, v);
79385 }
79386
79387+#ifdef CONFIG_PAX_REFCOUNT
79388+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79389+{
79390+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79391+
79392+ return (long)atomic_add_return_unchecked(i, v);
79393+}
79394+
79395+#endif
79396+
79397 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79398 {
79399 atomic_t *v = (atomic_t *)l;
79400@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79401 return (long)atomic_inc_return(v);
79402 }
79403
79404+#ifdef CONFIG_PAX_REFCOUNT
79405+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79406+{
79407+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79408+
79409+ return (long)atomic_inc_return_unchecked(v);
79410+}
79411+#endif
79412+
79413 static inline long atomic_long_dec_return(atomic_long_t *l)
79414 {
79415 atomic_t *v = (atomic_t *)l;
79416@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79417
79418 #endif /* BITS_PER_LONG == 64 */
79419
79420+#ifdef CONFIG_PAX_REFCOUNT
79421+static inline void pax_refcount_needs_these_functions(void)
79422+{
79423+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
79424+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
79425+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
79426+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
79427+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
79428+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
79429+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
79430+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
79431+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
79432+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
79433+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
79434+#ifdef CONFIG_X86
79435+ atomic_clear_mask_unchecked(0, NULL);
79436+ atomic_set_mask_unchecked(0, NULL);
79437+#endif
79438+
79439+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
79440+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
79441+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
79442+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
79443+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
79444+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
79445+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
79446+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
79447+}
79448+#else
79449+#define atomic_read_unchecked(v) atomic_read(v)
79450+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
79451+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
79452+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
79453+#define atomic_inc_unchecked(v) atomic_inc(v)
79454+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
79455+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
79456+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
79457+#define atomic_dec_unchecked(v) atomic_dec(v)
79458+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
79459+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
79460+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
79461+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
79462+
79463+#define atomic_long_read_unchecked(v) atomic_long_read(v)
79464+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
79465+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
79466+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
79467+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
79468+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
79469+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
79470+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
79471+#endif
79472+
79473 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
79474diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
79475index 30ad9c8..c70c170 100644
79476--- a/include/asm-generic/atomic64.h
79477+++ b/include/asm-generic/atomic64.h
79478@@ -16,6 +16,8 @@ typedef struct {
79479 long long counter;
79480 } atomic64_t;
79481
79482+typedef atomic64_t atomic64_unchecked_t;
79483+
79484 #define ATOMIC64_INIT(i) { (i) }
79485
79486 extern long long atomic64_read(const atomic64_t *v);
79487@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
79488 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
79489 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
79490
79491+#define atomic64_read_unchecked(v) atomic64_read(v)
79492+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
79493+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
79494+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
79495+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
79496+#define atomic64_inc_unchecked(v) atomic64_inc(v)
79497+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
79498+#define atomic64_dec_unchecked(v) atomic64_dec(v)
79499+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
79500+
79501 #endif /* _ASM_GENERIC_ATOMIC64_H */
79502diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
79503index f5c40b0..e902f9d 100644
79504--- a/include/asm-generic/barrier.h
79505+++ b/include/asm-generic/barrier.h
79506@@ -82,7 +82,7 @@
79507 do { \
79508 compiletime_assert_atomic_type(*p); \
79509 smp_mb(); \
79510- ACCESS_ONCE(*p) = (v); \
79511+ ACCESS_ONCE_RW(*p) = (v); \
79512 } while (0)
79513
79514 #define smp_load_acquire(p) \
79515diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
79516index a60a7cc..0fe12f2 100644
79517--- a/include/asm-generic/bitops/__fls.h
79518+++ b/include/asm-generic/bitops/__fls.h
79519@@ -9,7 +9,7 @@
79520 *
79521 * Undefined if no set bit exists, so code should check against 0 first.
79522 */
79523-static __always_inline unsigned long __fls(unsigned long word)
79524+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
79525 {
79526 int num = BITS_PER_LONG - 1;
79527
79528diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
79529index 0576d1f..dad6c71 100644
79530--- a/include/asm-generic/bitops/fls.h
79531+++ b/include/asm-generic/bitops/fls.h
79532@@ -9,7 +9,7 @@
79533 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
79534 */
79535
79536-static __always_inline int fls(int x)
79537+static __always_inline int __intentional_overflow(-1) fls(int x)
79538 {
79539 int r = 32;
79540
79541diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
79542index b097cf8..3d40e14 100644
79543--- a/include/asm-generic/bitops/fls64.h
79544+++ b/include/asm-generic/bitops/fls64.h
79545@@ -15,7 +15,7 @@
79546 * at position 64.
79547 */
79548 #if BITS_PER_LONG == 32
79549-static __always_inline int fls64(__u64 x)
79550+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79551 {
79552 __u32 h = x >> 32;
79553 if (h)
79554@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
79555 return fls(x);
79556 }
79557 #elif BITS_PER_LONG == 64
79558-static __always_inline int fls64(__u64 x)
79559+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79560 {
79561 if (x == 0)
79562 return 0;
79563diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
79564index 1bfcfe5..e04c5c9 100644
79565--- a/include/asm-generic/cache.h
79566+++ b/include/asm-generic/cache.h
79567@@ -6,7 +6,7 @@
79568 * cache lines need to provide their own cache.h.
79569 */
79570
79571-#define L1_CACHE_SHIFT 5
79572-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
79573+#define L1_CACHE_SHIFT 5UL
79574+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
79575
79576 #endif /* __ASM_GENERIC_CACHE_H */
79577diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
79578index 0d68a1e..b74a761 100644
79579--- a/include/asm-generic/emergency-restart.h
79580+++ b/include/asm-generic/emergency-restart.h
79581@@ -1,7 +1,7 @@
79582 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
79583 #define _ASM_GENERIC_EMERGENCY_RESTART_H
79584
79585-static inline void machine_emergency_restart(void)
79586+static inline __noreturn void machine_emergency_restart(void)
79587 {
79588 machine_restart(NULL);
79589 }
79590diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
79591index 90f99c7..00ce236 100644
79592--- a/include/asm-generic/kmap_types.h
79593+++ b/include/asm-generic/kmap_types.h
79594@@ -2,9 +2,9 @@
79595 #define _ASM_GENERIC_KMAP_TYPES_H
79596
79597 #ifdef __WITH_KM_FENCE
79598-# define KM_TYPE_NR 41
79599+# define KM_TYPE_NR 42
79600 #else
79601-# define KM_TYPE_NR 20
79602+# define KM_TYPE_NR 21
79603 #endif
79604
79605 #endif
79606diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
79607index 9ceb03b..62b0b8f 100644
79608--- a/include/asm-generic/local.h
79609+++ b/include/asm-generic/local.h
79610@@ -23,24 +23,37 @@ typedef struct
79611 atomic_long_t a;
79612 } local_t;
79613
79614+typedef struct {
79615+ atomic_long_unchecked_t a;
79616+} local_unchecked_t;
79617+
79618 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
79619
79620 #define local_read(l) atomic_long_read(&(l)->a)
79621+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
79622 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
79623+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
79624 #define local_inc(l) atomic_long_inc(&(l)->a)
79625+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
79626 #define local_dec(l) atomic_long_dec(&(l)->a)
79627+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
79628 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
79629+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
79630 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
79631+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
79632
79633 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
79634 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
79635 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
79636 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
79637 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
79638+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
79639 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
79640 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
79641+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
79642
79643 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79644+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79645 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
79646 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
79647 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
79648diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
79649index 725612b..9cc513a 100644
79650--- a/include/asm-generic/pgtable-nopmd.h
79651+++ b/include/asm-generic/pgtable-nopmd.h
79652@@ -1,14 +1,19 @@
79653 #ifndef _PGTABLE_NOPMD_H
79654 #define _PGTABLE_NOPMD_H
79655
79656-#ifndef __ASSEMBLY__
79657-
79658 #include <asm-generic/pgtable-nopud.h>
79659
79660-struct mm_struct;
79661-
79662 #define __PAGETABLE_PMD_FOLDED
79663
79664+#define PMD_SHIFT PUD_SHIFT
79665+#define PTRS_PER_PMD 1
79666+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
79667+#define PMD_MASK (~(PMD_SIZE-1))
79668+
79669+#ifndef __ASSEMBLY__
79670+
79671+struct mm_struct;
79672+
79673 /*
79674 * Having the pmd type consist of a pud gets the size right, and allows
79675 * us to conceptually access the pud entry that this pmd is folded into
79676@@ -16,11 +21,6 @@ struct mm_struct;
79677 */
79678 typedef struct { pud_t pud; } pmd_t;
79679
79680-#define PMD_SHIFT PUD_SHIFT
79681-#define PTRS_PER_PMD 1
79682-#define PMD_SIZE (1UL << PMD_SHIFT)
79683-#define PMD_MASK (~(PMD_SIZE-1))
79684-
79685 /*
79686 * The "pud_xxx()" functions here are trivial for a folded two-level
79687 * setup: the pmd is never bad, and a pmd always exists (as it's folded
79688diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
79689index 810431d..0ec4804f 100644
79690--- a/include/asm-generic/pgtable-nopud.h
79691+++ b/include/asm-generic/pgtable-nopud.h
79692@@ -1,10 +1,15 @@
79693 #ifndef _PGTABLE_NOPUD_H
79694 #define _PGTABLE_NOPUD_H
79695
79696-#ifndef __ASSEMBLY__
79697-
79698 #define __PAGETABLE_PUD_FOLDED
79699
79700+#define PUD_SHIFT PGDIR_SHIFT
79701+#define PTRS_PER_PUD 1
79702+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
79703+#define PUD_MASK (~(PUD_SIZE-1))
79704+
79705+#ifndef __ASSEMBLY__
79706+
79707 /*
79708 * Having the pud type consist of a pgd gets the size right, and allows
79709 * us to conceptually access the pgd entry that this pud is folded into
79710@@ -12,11 +17,6 @@
79711 */
79712 typedef struct { pgd_t pgd; } pud_t;
79713
79714-#define PUD_SHIFT PGDIR_SHIFT
79715-#define PTRS_PER_PUD 1
79716-#define PUD_SIZE (1UL << PUD_SHIFT)
79717-#define PUD_MASK (~(PUD_SIZE-1))
79718-
79719 /*
79720 * The "pgd_xxx()" functions here are trivial for a folded two-level
79721 * setup: the pud is never bad, and a pud always exists (as it's folded
79722@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
79723 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
79724
79725 #define pgd_populate(mm, pgd, pud) do { } while (0)
79726+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
79727 /*
79728 * (puds are folded into pgds so this doesn't get actually called,
79729 * but the define is needed for a generic inline function.)
79730diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
79731index 177d597..2826237 100644
79732--- a/include/asm-generic/pgtable.h
79733+++ b/include/asm-generic/pgtable.h
79734@@ -839,6 +839,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
79735 }
79736 #endif /* CONFIG_NUMA_BALANCING */
79737
79738+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
79739+#ifdef CONFIG_PAX_KERNEXEC
79740+#error KERNEXEC requires pax_open_kernel
79741+#else
79742+static inline unsigned long pax_open_kernel(void) { return 0; }
79743+#endif
79744+#endif
79745+
79746+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
79747+#ifdef CONFIG_PAX_KERNEXEC
79748+#error KERNEXEC requires pax_close_kernel
79749+#else
79750+static inline unsigned long pax_close_kernel(void) { return 0; }
79751+#endif
79752+#endif
79753+
79754 #endif /* CONFIG_MMU */
79755
79756 #endif /* !__ASSEMBLY__ */
79757diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
79758index 72d8803..cb9749c 100644
79759--- a/include/asm-generic/uaccess.h
79760+++ b/include/asm-generic/uaccess.h
79761@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
79762 return __clear_user(to, n);
79763 }
79764
79765+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
79766+#ifdef CONFIG_PAX_MEMORY_UDEREF
79767+#error UDEREF requires pax_open_userland
79768+#else
79769+static inline unsigned long pax_open_userland(void) { return 0; }
79770+#endif
79771+#endif
79772+
79773+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
79774+#ifdef CONFIG_PAX_MEMORY_UDEREF
79775+#error UDEREF requires pax_close_userland
79776+#else
79777+static inline unsigned long pax_close_userland(void) { return 0; }
79778+#endif
79779+#endif
79780+
79781 #endif /* __ASM_GENERIC_UACCESS_H */
79782diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
79783index bee5d68..8d362d1 100644
79784--- a/include/asm-generic/vmlinux.lds.h
79785+++ b/include/asm-generic/vmlinux.lds.h
79786@@ -234,6 +234,7 @@
79787 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
79788 VMLINUX_SYMBOL(__start_rodata) = .; \
79789 *(.rodata) *(.rodata.*) \
79790+ *(.data..read_only) \
79791 *(__vermagic) /* Kernel version magic */ \
79792 . = ALIGN(8); \
79793 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
79794@@ -726,17 +727,18 @@
79795 * section in the linker script will go there too. @phdr should have
79796 * a leading colon.
79797 *
79798- * Note that this macros defines __per_cpu_load as an absolute symbol.
79799+ * Note that this macros defines per_cpu_load as an absolute symbol.
79800 * If there is no need to put the percpu section at a predetermined
79801 * address, use PERCPU_SECTION.
79802 */
79803 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
79804- VMLINUX_SYMBOL(__per_cpu_load) = .; \
79805- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
79806+ per_cpu_load = .; \
79807+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
79808 - LOAD_OFFSET) { \
79809+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
79810 PERCPU_INPUT(cacheline) \
79811 } phdr \
79812- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
79813+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
79814
79815 /**
79816 * PERCPU_SECTION - define output section for percpu area, simple version
79817diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
79818index 623a59c..1e79ab9 100644
79819--- a/include/crypto/algapi.h
79820+++ b/include/crypto/algapi.h
79821@@ -34,7 +34,7 @@ struct crypto_type {
79822 unsigned int maskclear;
79823 unsigned int maskset;
79824 unsigned int tfmsize;
79825-};
79826+} __do_const;
79827
79828 struct crypto_instance {
79829 struct crypto_alg alg;
79830diff --git a/include/drm/drmP.h b/include/drm/drmP.h
79831index e1b2e8b..2697bd2 100644
79832--- a/include/drm/drmP.h
79833+++ b/include/drm/drmP.h
79834@@ -59,6 +59,7 @@
79835
79836 #include <asm/mman.h>
79837 #include <asm/pgalloc.h>
79838+#include <asm/local.h>
79839 #include <asm/uaccess.h>
79840
79841 #include <uapi/drm/drm.h>
79842@@ -223,10 +224,12 @@ void drm_err(const char *format, ...);
79843 * \param cmd command.
79844 * \param arg argument.
79845 */
79846-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
79847+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
79848+ struct drm_file *file_priv);
79849+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
79850 struct drm_file *file_priv);
79851
79852-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79853+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
79854 unsigned long arg);
79855
79856 #define DRM_IOCTL_NR(n) _IOC_NR(n)
79857@@ -242,10 +245,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79858 struct drm_ioctl_desc {
79859 unsigned int cmd;
79860 int flags;
79861- drm_ioctl_t *func;
79862+ drm_ioctl_t func;
79863 unsigned int cmd_drv;
79864 const char *name;
79865-};
79866+} __do_const;
79867
79868 /**
79869 * Creates a driver or general drm_ioctl_desc array entry for the given
79870@@ -629,7 +632,8 @@ struct drm_info_list {
79871 int (*show)(struct seq_file*, void*); /** show callback */
79872 u32 driver_features; /**< Required driver features for this entry */
79873 void *data;
79874-};
79875+} __do_const;
79876+typedef struct drm_info_list __no_const drm_info_list_no_const;
79877
79878 /**
79879 * debugfs node structure. This structure represents a debugfs file.
79880@@ -713,7 +717,7 @@ struct drm_device {
79881
79882 /** \name Usage Counters */
79883 /*@{ */
79884- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
79885+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
79886 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
79887 int buf_use; /**< Buffers in use -- cannot alloc */
79888 atomic_t buf_alloc; /**< Buffer allocation in progress */
79889diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
79890index 7adbb65..2a1eb1f 100644
79891--- a/include/drm/drm_crtc_helper.h
79892+++ b/include/drm/drm_crtc_helper.h
79893@@ -116,7 +116,7 @@ struct drm_encoder_helper_funcs {
79894 struct drm_connector *connector);
79895 /* disable encoder when not in use - more explicit than dpms off */
79896 void (*disable)(struct drm_encoder *encoder);
79897-};
79898+} __no_const;
79899
79900 /**
79901 * drm_connector_helper_funcs - helper operations for connectors
79902diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
79903index d016dc5..3951fe0 100644
79904--- a/include/drm/i915_pciids.h
79905+++ b/include/drm/i915_pciids.h
79906@@ -37,7 +37,7 @@
79907 */
79908 #define INTEL_VGA_DEVICE(id, info) { \
79909 0x8086, id, \
79910- ~0, ~0, \
79911+ PCI_ANY_ID, PCI_ANY_ID, \
79912 0x030000, 0xff0000, \
79913 (unsigned long) info }
79914
79915diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
79916index 72dcbe8..8db58d7 100644
79917--- a/include/drm/ttm/ttm_memory.h
79918+++ b/include/drm/ttm/ttm_memory.h
79919@@ -48,7 +48,7 @@
79920
79921 struct ttm_mem_shrink {
79922 int (*do_shrink) (struct ttm_mem_shrink *);
79923-};
79924+} __no_const;
79925
79926 /**
79927 * struct ttm_mem_global - Global memory accounting structure.
79928diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
79929index 49a8284..9643967 100644
79930--- a/include/drm/ttm/ttm_page_alloc.h
79931+++ b/include/drm/ttm/ttm_page_alloc.h
79932@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
79933 */
79934 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
79935
79936+struct device;
79937 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
79938 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
79939
79940diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
79941index 4b840e8..155d235 100644
79942--- a/include/keys/asymmetric-subtype.h
79943+++ b/include/keys/asymmetric-subtype.h
79944@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
79945 /* Verify the signature on a key of this subtype (optional) */
79946 int (*verify_signature)(const struct key *key,
79947 const struct public_key_signature *sig);
79948-};
79949+} __do_const;
79950
79951 /**
79952 * asymmetric_key_subtype - Get the subtype from an asymmetric key
79953diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
79954index c1da539..1dcec55 100644
79955--- a/include/linux/atmdev.h
79956+++ b/include/linux/atmdev.h
79957@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
79958 #endif
79959
79960 struct k_atm_aal_stats {
79961-#define __HANDLE_ITEM(i) atomic_t i
79962+#define __HANDLE_ITEM(i) atomic_unchecked_t i
79963 __AAL_STAT_ITEMS
79964 #undef __HANDLE_ITEM
79965 };
79966@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
79967 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
79968 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
79969 struct module *owner;
79970-};
79971+} __do_const ;
79972
79973 struct atmphy_ops {
79974 int (*start)(struct atm_dev *dev);
79975diff --git a/include/linux/atomic.h b/include/linux/atomic.h
79976index 5b08a85..60922fb 100644
79977--- a/include/linux/atomic.h
79978+++ b/include/linux/atomic.h
79979@@ -12,7 +12,7 @@
79980 * Atomically adds @a to @v, so long as @v was not already @u.
79981 * Returns non-zero if @v was not @u, and zero otherwise.
79982 */
79983-static inline int atomic_add_unless(atomic_t *v, int a, int u)
79984+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
79985 {
79986 return __atomic_add_unless(v, a, u) != u;
79987 }
79988diff --git a/include/linux/audit.h b/include/linux/audit.h
79989index af84234..4177a40 100644
79990--- a/include/linux/audit.h
79991+++ b/include/linux/audit.h
79992@@ -225,7 +225,7 @@ static inline void audit_ptrace(struct task_struct *t)
79993 extern unsigned int audit_serial(void);
79994 extern int auditsc_get_stamp(struct audit_context *ctx,
79995 struct timespec *t, unsigned int *serial);
79996-extern int audit_set_loginuid(kuid_t loginuid);
79997+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
79998
79999 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80000 {
80001diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80002index 576e463..28fd926 100644
80003--- a/include/linux/binfmts.h
80004+++ b/include/linux/binfmts.h
80005@@ -44,7 +44,7 @@ struct linux_binprm {
80006 unsigned interp_flags;
80007 unsigned interp_data;
80008 unsigned long loader, exec;
80009-};
80010+} __randomize_layout;
80011
80012 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80013 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80014@@ -77,8 +77,10 @@ struct linux_binfmt {
80015 int (*load_binary)(struct linux_binprm *);
80016 int (*load_shlib)(struct file *);
80017 int (*core_dump)(struct coredump_params *cprm);
80018+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80019+ void (*handle_mmap)(struct file *);
80020 unsigned long min_coredump; /* minimal dump size */
80021-};
80022+} __do_const __randomize_layout;
80023
80024 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80025
80026diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
80027index 202e403..16e6617 100644
80028--- a/include/linux/bitmap.h
80029+++ b/include/linux/bitmap.h
80030@@ -302,7 +302,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
80031 return __bitmap_full(src, nbits);
80032 }
80033
80034-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
80035+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
80036 {
80037 if (small_const_nbits(nbits))
80038 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
80039diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80040index 5d858e0..336c1d9 100644
80041--- a/include/linux/bitops.h
80042+++ b/include/linux/bitops.h
80043@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80044 * @word: value to rotate
80045 * @shift: bits to roll
80046 */
80047-static inline __u32 rol32(__u32 word, unsigned int shift)
80048+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80049 {
80050 return (word << shift) | (word >> (32 - shift));
80051 }
80052@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80053 * @word: value to rotate
80054 * @shift: bits to roll
80055 */
80056-static inline __u32 ror32(__u32 word, unsigned int shift)
80057+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80058 {
80059 return (word >> shift) | (word << (32 - shift));
80060 }
80061@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80062 return (__s32)(value << shift) >> shift;
80063 }
80064
80065-static inline unsigned fls_long(unsigned long l)
80066+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80067 {
80068 if (sizeof(l) == 4)
80069 return fls(l);
80070diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80071index 92f4b4b..483d537 100644
80072--- a/include/linux/blkdev.h
80073+++ b/include/linux/blkdev.h
80074@@ -1613,7 +1613,7 @@ struct block_device_operations {
80075 /* this callback is with swap_lock and sometimes page table lock held */
80076 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80077 struct module *owner;
80078-};
80079+} __do_const;
80080
80081 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80082 unsigned long);
80083diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80084index afc1343..9735539 100644
80085--- a/include/linux/blktrace_api.h
80086+++ b/include/linux/blktrace_api.h
80087@@ -25,7 +25,7 @@ struct blk_trace {
80088 struct dentry *dropped_file;
80089 struct dentry *msg_file;
80090 struct list_head running_list;
80091- atomic_t dropped;
80092+ atomic_unchecked_t dropped;
80093 };
80094
80095 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80096diff --git a/include/linux/cache.h b/include/linux/cache.h
80097index 17e7e82..1d7da26 100644
80098--- a/include/linux/cache.h
80099+++ b/include/linux/cache.h
80100@@ -16,6 +16,14 @@
80101 #define __read_mostly
80102 #endif
80103
80104+#ifndef __read_only
80105+#ifdef CONFIG_PAX_KERNEXEC
80106+#error KERNEXEC requires __read_only
80107+#else
80108+#define __read_only __read_mostly
80109+#endif
80110+#endif
80111+
80112 #ifndef ____cacheline_aligned
80113 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80114 #endif
80115diff --git a/include/linux/capability.h b/include/linux/capability.h
80116index aa93e5e..985a1b0 100644
80117--- a/include/linux/capability.h
80118+++ b/include/linux/capability.h
80119@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80120 extern bool capable(int cap);
80121 extern bool ns_capable(struct user_namespace *ns, int cap);
80122 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80123+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80124 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80125+extern bool capable_nolog(int cap);
80126+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80127
80128 /* audit system wants to get cap info from files as well */
80129 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80130
80131+extern int is_privileged_binary(const struct dentry *dentry);
80132+
80133 #endif /* !_LINUX_CAPABILITY_H */
80134diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80135index 8609d57..86e4d79 100644
80136--- a/include/linux/cdrom.h
80137+++ b/include/linux/cdrom.h
80138@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80139
80140 /* driver specifications */
80141 const int capability; /* capability flags */
80142- int n_minors; /* number of active minor devices */
80143 /* handle uniform packets for scsi type devices (scsi,atapi) */
80144 int (*generic_packet) (struct cdrom_device_info *,
80145 struct packet_command *);
80146diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80147index 4ce9056..86caac6 100644
80148--- a/include/linux/cleancache.h
80149+++ b/include/linux/cleancache.h
80150@@ -31,7 +31,7 @@ struct cleancache_ops {
80151 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80152 void (*invalidate_inode)(int, struct cleancache_filekey);
80153 void (*invalidate_fs)(int);
80154-};
80155+} __no_const;
80156
80157 extern struct cleancache_ops *
80158 cleancache_register_ops(struct cleancache_ops *ops);
80159diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80160index d936409..ce9f842 100644
80161--- a/include/linux/clk-provider.h
80162+++ b/include/linux/clk-provider.h
80163@@ -191,6 +191,7 @@ struct clk_ops {
80164 void (*init)(struct clk_hw *hw);
80165 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80166 };
80167+typedef struct clk_ops __no_const clk_ops_no_const;
80168
80169 /**
80170 * struct clk_init_data - holds init data that's common to all clocks and is
80171diff --git a/include/linux/compat.h b/include/linux/compat.h
80172index 7450ca2..a824b81 100644
80173--- a/include/linux/compat.h
80174+++ b/include/linux/compat.h
80175@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80176 compat_size_t __user *len_ptr);
80177
80178 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80179-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80180+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80181 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80182 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80183 compat_ssize_t msgsz, int msgflg);
80184@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80185 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80186 compat_ulong_t addr, compat_ulong_t data);
80187 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80188- compat_long_t addr, compat_long_t data);
80189+ compat_ulong_t addr, compat_ulong_t data);
80190
80191 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80192 /*
80193diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80194index d1a5582..4424efa 100644
80195--- a/include/linux/compiler-gcc4.h
80196+++ b/include/linux/compiler-gcc4.h
80197@@ -39,9 +39,34 @@
80198 # define __compiletime_warning(message) __attribute__((warning(message)))
80199 # define __compiletime_error(message) __attribute__((error(message)))
80200 #endif /* __CHECKER__ */
80201+
80202+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80203+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80204+#define __bos0(ptr) __bos((ptr), 0)
80205+#define __bos1(ptr) __bos((ptr), 1)
80206 #endif /* GCC_VERSION >= 40300 */
80207
80208 #if GCC_VERSION >= 40500
80209+
80210+#ifdef RANDSTRUCT_PLUGIN
80211+#define __randomize_layout __attribute__((randomize_layout))
80212+#define __no_randomize_layout __attribute__((no_randomize_layout))
80213+#endif
80214+
80215+#ifdef CONSTIFY_PLUGIN
80216+#define __no_const __attribute__((no_const))
80217+#define __do_const __attribute__((do_const))
80218+#endif
80219+
80220+#ifdef SIZE_OVERFLOW_PLUGIN
80221+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80222+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80223+#endif
80224+
80225+#ifdef LATENT_ENTROPY_PLUGIN
80226+#define __latent_entropy __attribute__((latent_entropy))
80227+#endif
80228+
80229 /*
80230 * Mark a position in code as unreachable. This can be used to
80231 * suppress control flow warnings after asm blocks that transfer
80232diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
80233index c8c5659..d09f2ad 100644
80234--- a/include/linux/compiler-gcc5.h
80235+++ b/include/linux/compiler-gcc5.h
80236@@ -28,6 +28,28 @@
80237 # define __compiletime_error(message) __attribute__((error(message)))
80238 #endif /* __CHECKER__ */
80239
80240+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80241+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80242+#define __bos0(ptr) __bos((ptr), 0)
80243+#define __bos1(ptr) __bos((ptr), 1)
80244+
80245+#ifdef CONSTIFY_PLUGIN
80246+#error not yet
80247+#define __no_const __attribute__((no_const))
80248+#define __do_const __attribute__((do_const))
80249+#endif
80250+
80251+#ifdef SIZE_OVERFLOW_PLUGIN
80252+#error not yet
80253+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80254+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80255+#endif
80256+
80257+#ifdef LATENT_ENTROPY_PLUGIN
80258+#error not yet
80259+#define __latent_entropy __attribute__((latent_entropy))
80260+#endif
80261+
80262 /*
80263 * Mark a position in code as unreachable. This can be used to
80264 * suppress control flow warnings after asm blocks that transfer
80265diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80266index fa6a314..752a6ef 100644
80267--- a/include/linux/compiler.h
80268+++ b/include/linux/compiler.h
80269@@ -5,11 +5,14 @@
80270
80271 #ifdef __CHECKER__
80272 # define __user __attribute__((noderef, address_space(1)))
80273+# define __force_user __force __user
80274 # define __kernel __attribute__((address_space(0)))
80275+# define __force_kernel __force __kernel
80276 # define __safe __attribute__((safe))
80277 # define __force __attribute__((force))
80278 # define __nocast __attribute__((nocast))
80279 # define __iomem __attribute__((noderef, address_space(2)))
80280+# define __force_iomem __force __iomem
80281 # define __must_hold(x) __attribute__((context(x,1,1)))
80282 # define __acquires(x) __attribute__((context(x,0,1)))
80283 # define __releases(x) __attribute__((context(x,1,0)))
80284@@ -17,20 +20,37 @@
80285 # define __release(x) __context__(x,-1)
80286 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80287 # define __percpu __attribute__((noderef, address_space(3)))
80288+# define __force_percpu __force __percpu
80289 #ifdef CONFIG_SPARSE_RCU_POINTER
80290 # define __rcu __attribute__((noderef, address_space(4)))
80291+# define __force_rcu __force __rcu
80292 #else
80293 # define __rcu
80294+# define __force_rcu
80295 #endif
80296 extern void __chk_user_ptr(const volatile void __user *);
80297 extern void __chk_io_ptr(const volatile void __iomem *);
80298 #else
80299-# define __user
80300-# define __kernel
80301+# ifdef CHECKER_PLUGIN
80302+//# define __user
80303+//# define __force_user
80304+//# define __kernel
80305+//# define __force_kernel
80306+# else
80307+# ifdef STRUCTLEAK_PLUGIN
80308+# define __user __attribute__((user))
80309+# else
80310+# define __user
80311+# endif
80312+# define __force_user
80313+# define __kernel
80314+# define __force_kernel
80315+# endif
80316 # define __safe
80317 # define __force
80318 # define __nocast
80319 # define __iomem
80320+# define __force_iomem
80321 # define __chk_user_ptr(x) (void)0
80322 # define __chk_io_ptr(x) (void)0
80323 # define __builtin_warning(x, y...) (1)
80324@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80325 # define __release(x) (void)0
80326 # define __cond_lock(x,c) (c)
80327 # define __percpu
80328+# define __force_percpu
80329 # define __rcu
80330+# define __force_rcu
80331 #endif
80332
80333 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80334@@ -201,32 +223,32 @@ static __always_inline void data_access_exceeds_word_size(void)
80335 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
80336 {
80337 switch (size) {
80338- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
80339- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
80340- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
80341+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
80342+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
80343+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
80344 #ifdef CONFIG_64BIT
80345- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
80346+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
80347 #endif
80348 default:
80349 barrier();
80350- __builtin_memcpy((void *)res, (const void *)p, size);
80351+ __builtin_memcpy(res, (const void *)p, size);
80352 data_access_exceeds_word_size();
80353 barrier();
80354 }
80355 }
80356
80357-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
80358+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
80359 {
80360 switch (size) {
80361- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
80362- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
80363- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
80364+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
80365+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
80366+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
80367 #ifdef CONFIG_64BIT
80368- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
80369+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
80370 #endif
80371 default:
80372 barrier();
80373- __builtin_memcpy((void *)p, (const void *)res, size);
80374+ __builtin_memcpy((void *)p, res, size);
80375 data_access_exceeds_word_size();
80376 barrier();
80377 }
80378@@ -360,6 +382,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80379 # define __attribute_const__ /* unimplemented */
80380 #endif
80381
80382+#ifndef __randomize_layout
80383+# define __randomize_layout
80384+#endif
80385+
80386+#ifndef __no_randomize_layout
80387+# define __no_randomize_layout
80388+#endif
80389+
80390+#ifndef __no_const
80391+# define __no_const
80392+#endif
80393+
80394+#ifndef __do_const
80395+# define __do_const
80396+#endif
80397+
80398+#ifndef __size_overflow
80399+# define __size_overflow(...)
80400+#endif
80401+
80402+#ifndef __intentional_overflow
80403+# define __intentional_overflow(...)
80404+#endif
80405+
80406+#ifndef __latent_entropy
80407+# define __latent_entropy
80408+#endif
80409+
80410 /*
80411 * Tell gcc if a function is cold. The compiler will assume any path
80412 * directly leading to the call is unlikely.
80413@@ -369,6 +419,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80414 #define __cold
80415 #endif
80416
80417+#ifndef __alloc_size
80418+#define __alloc_size(...)
80419+#endif
80420+
80421+#ifndef __bos
80422+#define __bos(ptr, arg)
80423+#endif
80424+
80425+#ifndef __bos0
80426+#define __bos0(ptr)
80427+#endif
80428+
80429+#ifndef __bos1
80430+#define __bos1(ptr)
80431+#endif
80432+
80433 /* Simple shorthand for a section definition */
80434 #ifndef __section
80435 # define __section(S) __attribute__ ((__section__(#S)))
80436@@ -462,8 +528,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80437 */
80438 #define __ACCESS_ONCE(x) ({ \
80439 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
80440- (volatile typeof(x) *)&(x); })
80441+ (volatile const typeof(x) *)&(x); })
80442 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
80443+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
80444
80445 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
80446 #ifdef CONFIG_KPROBES
80447diff --git a/include/linux/completion.h b/include/linux/completion.h
80448index 5d5aaae..0ea9b84 100644
80449--- a/include/linux/completion.h
80450+++ b/include/linux/completion.h
80451@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
80452
80453 extern void wait_for_completion(struct completion *);
80454 extern void wait_for_completion_io(struct completion *);
80455-extern int wait_for_completion_interruptible(struct completion *x);
80456-extern int wait_for_completion_killable(struct completion *x);
80457+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
80458+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
80459 extern unsigned long wait_for_completion_timeout(struct completion *x,
80460- unsigned long timeout);
80461+ unsigned long timeout) __intentional_overflow(-1);
80462 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
80463- unsigned long timeout);
80464+ unsigned long timeout) __intentional_overflow(-1);
80465 extern long wait_for_completion_interruptible_timeout(
80466- struct completion *x, unsigned long timeout);
80467+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80468 extern long wait_for_completion_killable_timeout(
80469- struct completion *x, unsigned long timeout);
80470+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80471 extern bool try_wait_for_completion(struct completion *x);
80472 extern bool completion_done(struct completion *x);
80473
80474diff --git a/include/linux/configfs.h b/include/linux/configfs.h
80475index 34025df..d94bbbc 100644
80476--- a/include/linux/configfs.h
80477+++ b/include/linux/configfs.h
80478@@ -125,7 +125,7 @@ struct configfs_attribute {
80479 const char *ca_name;
80480 struct module *ca_owner;
80481 umode_t ca_mode;
80482-};
80483+} __do_const;
80484
80485 /*
80486 * Users often need to create attribute structures for their configurable
80487diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
80488index 4d078ce..c970f4d 100644
80489--- a/include/linux/cpufreq.h
80490+++ b/include/linux/cpufreq.h
80491@@ -206,6 +206,7 @@ struct global_attr {
80492 ssize_t (*store)(struct kobject *a, struct attribute *b,
80493 const char *c, size_t count);
80494 };
80495+typedef struct global_attr __no_const global_attr_no_const;
80496
80497 #define define_one_global_ro(_name) \
80498 static struct global_attr _name = \
80499@@ -277,7 +278,7 @@ struct cpufreq_driver {
80500 bool boost_supported;
80501 bool boost_enabled;
80502 int (*set_boost)(int state);
80503-};
80504+} __do_const;
80505
80506 /* flags */
80507 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
80508diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
80509index ab70f3b..3ef7771 100644
80510--- a/include/linux/cpuidle.h
80511+++ b/include/linux/cpuidle.h
80512@@ -50,7 +50,8 @@ struct cpuidle_state {
80513 int index);
80514
80515 int (*enter_dead) (struct cpuidle_device *dev, int index);
80516-};
80517+} __do_const;
80518+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
80519
80520 /* Idle State Flags */
80521 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
80522@@ -206,7 +207,7 @@ struct cpuidle_governor {
80523 void (*reflect) (struct cpuidle_device *dev, int index);
80524
80525 struct module *owner;
80526-};
80527+} __do_const;
80528
80529 #ifdef CONFIG_CPU_IDLE
80530 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
80531diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
80532index b950e9d..63810aa 100644
80533--- a/include/linux/cpumask.h
80534+++ b/include/linux/cpumask.h
80535@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80536 }
80537
80538 /* Valid inputs for n are -1 and 0. */
80539-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80540+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80541 {
80542 return n+1;
80543 }
80544
80545-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80546+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80547 {
80548 return n+1;
80549 }
80550
80551-static inline unsigned int cpumask_next_and(int n,
80552+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
80553 const struct cpumask *srcp,
80554 const struct cpumask *andp)
80555 {
80556@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80557 *
80558 * Returns >= nr_cpu_ids if no further cpus set.
80559 */
80560-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80561+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80562 {
80563 /* -1 is a legal arg here. */
80564 if (n != -1)
80565@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80566 *
80567 * Returns >= nr_cpu_ids if no further cpus unset.
80568 */
80569-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80570+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80571 {
80572 /* -1 is a legal arg here. */
80573 if (n != -1)
80574@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80575 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
80576 }
80577
80578-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
80579+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
80580 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
80581 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
80582
80583@@ -464,7 +464,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
80584 * cpumask_weight - Count of bits in *srcp
80585 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
80586 */
80587-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
80588+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
80589 {
80590 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
80591 }
80592diff --git a/include/linux/cred.h b/include/linux/cred.h
80593index 2fb2ca2..d6a3340 100644
80594--- a/include/linux/cred.h
80595+++ b/include/linux/cred.h
80596@@ -35,7 +35,7 @@ struct group_info {
80597 int nblocks;
80598 kgid_t small_block[NGROUPS_SMALL];
80599 kgid_t *blocks[0];
80600-};
80601+} __randomize_layout;
80602
80603 /**
80604 * get_group_info - Get a reference to a group info structure
80605@@ -137,7 +137,7 @@ struct cred {
80606 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
80607 struct group_info *group_info; /* supplementary groups for euid/fsgid */
80608 struct rcu_head rcu; /* RCU deletion hook */
80609-};
80610+} __randomize_layout;
80611
80612 extern void __put_cred(struct cred *);
80613 extern void exit_creds(struct task_struct *);
80614@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
80615 static inline void validate_process_creds(void)
80616 {
80617 }
80618+static inline void validate_task_creds(struct task_struct *task)
80619+{
80620+}
80621 #endif
80622
80623 /**
80624@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
80625
80626 #define task_uid(task) (task_cred_xxx((task), uid))
80627 #define task_euid(task) (task_cred_xxx((task), euid))
80628+#define task_securebits(task) (task_cred_xxx((task), securebits))
80629
80630 #define current_cred_xxx(xxx) \
80631 ({ \
80632diff --git a/include/linux/crypto.h b/include/linux/crypto.h
80633index 9c8776d..8c526c2 100644
80634--- a/include/linux/crypto.h
80635+++ b/include/linux/crypto.h
80636@@ -626,7 +626,7 @@ struct cipher_tfm {
80637 const u8 *key, unsigned int keylen);
80638 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80639 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80640-};
80641+} __no_const;
80642
80643 struct hash_tfm {
80644 int (*init)(struct hash_desc *desc);
80645@@ -647,13 +647,13 @@ struct compress_tfm {
80646 int (*cot_decompress)(struct crypto_tfm *tfm,
80647 const u8 *src, unsigned int slen,
80648 u8 *dst, unsigned int *dlen);
80649-};
80650+} __no_const;
80651
80652 struct rng_tfm {
80653 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
80654 unsigned int dlen);
80655 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
80656-};
80657+} __no_const;
80658
80659 #define crt_ablkcipher crt_u.ablkcipher
80660 #define crt_aead crt_u.aead
80661diff --git a/include/linux/ctype.h b/include/linux/ctype.h
80662index 653589e..4ef254a 100644
80663--- a/include/linux/ctype.h
80664+++ b/include/linux/ctype.h
80665@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
80666 * Fast implementation of tolower() for internal usage. Do not use in your
80667 * code.
80668 */
80669-static inline char _tolower(const char c)
80670+static inline unsigned char _tolower(const unsigned char c)
80671 {
80672 return c | 0x20;
80673 }
80674diff --git a/include/linux/dcache.h b/include/linux/dcache.h
80675index 5a81398..6bbee30 100644
80676--- a/include/linux/dcache.h
80677+++ b/include/linux/dcache.h
80678@@ -123,6 +123,9 @@ struct dentry {
80679 unsigned long d_time; /* used by d_revalidate */
80680 void *d_fsdata; /* fs-specific data */
80681
80682+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
80683+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
80684+#endif
80685 struct list_head d_lru; /* LRU list */
80686 struct list_head d_child; /* child of parent list */
80687 struct list_head d_subdirs; /* our children */
80688@@ -133,7 +136,7 @@ struct dentry {
80689 struct hlist_node d_alias; /* inode alias list */
80690 struct rcu_head d_rcu;
80691 } d_u;
80692-};
80693+} __randomize_layout;
80694
80695 /*
80696 * dentry->d_lock spinlock nesting subclasses:
80697diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
80698index 7925bf0..d5143d2 100644
80699--- a/include/linux/decompress/mm.h
80700+++ b/include/linux/decompress/mm.h
80701@@ -77,7 +77,7 @@ static void free(void *where)
80702 * warnings when not needed (indeed large_malloc / large_free are not
80703 * needed by inflate */
80704
80705-#define malloc(a) kmalloc(a, GFP_KERNEL)
80706+#define malloc(a) kmalloc((a), GFP_KERNEL)
80707 #define free(a) kfree(a)
80708
80709 #define large_malloc(a) vmalloc(a)
80710diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
80711index ce447f0..83c66bd 100644
80712--- a/include/linux/devfreq.h
80713+++ b/include/linux/devfreq.h
80714@@ -114,7 +114,7 @@ struct devfreq_governor {
80715 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
80716 int (*event_handler)(struct devfreq *devfreq,
80717 unsigned int event, void *data);
80718-};
80719+} __do_const;
80720
80721 /**
80722 * struct devfreq - Device devfreq structure
80723diff --git a/include/linux/device.h b/include/linux/device.h
80724index fb50673..ec0b35b 100644
80725--- a/include/linux/device.h
80726+++ b/include/linux/device.h
80727@@ -311,7 +311,7 @@ struct subsys_interface {
80728 struct list_head node;
80729 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
80730 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
80731-};
80732+} __do_const;
80733
80734 int subsys_interface_register(struct subsys_interface *sif);
80735 void subsys_interface_unregister(struct subsys_interface *sif);
80736@@ -507,7 +507,7 @@ struct device_type {
80737 void (*release)(struct device *dev);
80738
80739 const struct dev_pm_ops *pm;
80740-};
80741+} __do_const;
80742
80743 /* interface for exporting device attributes */
80744 struct device_attribute {
80745@@ -517,11 +517,12 @@ struct device_attribute {
80746 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
80747 const char *buf, size_t count);
80748 };
80749+typedef struct device_attribute __no_const device_attribute_no_const;
80750
80751 struct dev_ext_attribute {
80752 struct device_attribute attr;
80753 void *var;
80754-};
80755+} __do_const;
80756
80757 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
80758 char *buf);
80759diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
80760index c3007cb..43efc8c 100644
80761--- a/include/linux/dma-mapping.h
80762+++ b/include/linux/dma-mapping.h
80763@@ -60,7 +60,7 @@ struct dma_map_ops {
80764 u64 (*get_required_mask)(struct device *dev);
80765 #endif
80766 int is_phys;
80767-};
80768+} __do_const;
80769
80770 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
80771
80772diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
80773index 40cd75e..38572a9 100644
80774--- a/include/linux/dmaengine.h
80775+++ b/include/linux/dmaengine.h
80776@@ -1137,9 +1137,9 @@ struct dma_pinned_list {
80777 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
80778 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
80779
80780-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80781+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80782 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
80783-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80784+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80785 struct dma_pinned_list *pinned_list, struct page *page,
80786 unsigned int offset, size_t len);
80787
80788diff --git a/include/linux/efi.h b/include/linux/efi.h
80789index 0238d61..34a758f 100644
80790--- a/include/linux/efi.h
80791+++ b/include/linux/efi.h
80792@@ -1054,6 +1054,7 @@ struct efivar_operations {
80793 efi_set_variable_nonblocking_t *set_variable_nonblocking;
80794 efi_query_variable_store_t *query_variable_store;
80795 };
80796+typedef struct efivar_operations __no_const efivar_operations_no_const;
80797
80798 struct efivars {
80799 /*
80800diff --git a/include/linux/elf.h b/include/linux/elf.h
80801index 20fa8d8..3d0dd18 100644
80802--- a/include/linux/elf.h
80803+++ b/include/linux/elf.h
80804@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
80805 #define elf_note elf32_note
80806 #define elf_addr_t Elf32_Off
80807 #define Elf_Half Elf32_Half
80808+#define elf_dyn Elf32_Dyn
80809
80810 #else
80811
80812@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
80813 #define elf_note elf64_note
80814 #define elf_addr_t Elf64_Off
80815 #define Elf_Half Elf64_Half
80816+#define elf_dyn Elf64_Dyn
80817
80818 #endif
80819
80820diff --git a/include/linux/err.h b/include/linux/err.h
80821index a729120..6ede2c9 100644
80822--- a/include/linux/err.h
80823+++ b/include/linux/err.h
80824@@ -20,12 +20,12 @@
80825
80826 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
80827
80828-static inline void * __must_check ERR_PTR(long error)
80829+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
80830 {
80831 return (void *) error;
80832 }
80833
80834-static inline long __must_check PTR_ERR(__force const void *ptr)
80835+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
80836 {
80837 return (long) ptr;
80838 }
80839diff --git a/include/linux/extcon.h b/include/linux/extcon.h
80840index 36f49c4..a2a1f4c 100644
80841--- a/include/linux/extcon.h
80842+++ b/include/linux/extcon.h
80843@@ -135,7 +135,7 @@ struct extcon_dev {
80844 /* /sys/class/extcon/.../mutually_exclusive/... */
80845 struct attribute_group attr_g_muex;
80846 struct attribute **attrs_muex;
80847- struct device_attribute *d_attrs_muex;
80848+ device_attribute_no_const *d_attrs_muex;
80849 };
80850
80851 /**
80852diff --git a/include/linux/fb.h b/include/linux/fb.h
80853index 09bb7a1..d98870a 100644
80854--- a/include/linux/fb.h
80855+++ b/include/linux/fb.h
80856@@ -305,7 +305,7 @@ struct fb_ops {
80857 /* called at KDB enter and leave time to prepare the console */
80858 int (*fb_debug_enter)(struct fb_info *info);
80859 int (*fb_debug_leave)(struct fb_info *info);
80860-};
80861+} __do_const;
80862
80863 #ifdef CONFIG_FB_TILEBLITTING
80864 #define FB_TILE_CURSOR_NONE 0
80865diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
80866index 230f87b..1fd0485 100644
80867--- a/include/linux/fdtable.h
80868+++ b/include/linux/fdtable.h
80869@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
80870 void put_files_struct(struct files_struct *fs);
80871 void reset_files_struct(struct files_struct *);
80872 int unshare_files(struct files_struct **);
80873-struct files_struct *dup_fd(struct files_struct *, int *);
80874+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
80875 void do_close_on_exec(struct files_struct *);
80876 int iterate_fd(struct files_struct *, unsigned,
80877 int (*)(const void *, struct file *, unsigned),
80878diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
80879index 8293262..2b3b8bd 100644
80880--- a/include/linux/frontswap.h
80881+++ b/include/linux/frontswap.h
80882@@ -11,7 +11,7 @@ struct frontswap_ops {
80883 int (*load)(unsigned, pgoff_t, struct page *);
80884 void (*invalidate_page)(unsigned, pgoff_t);
80885 void (*invalidate_area)(unsigned);
80886-};
80887+} __no_const;
80888
80889 extern bool frontswap_enabled;
80890 extern struct frontswap_ops *
80891diff --git a/include/linux/fs.h b/include/linux/fs.h
80892index 42efe13..72d42ee 100644
80893--- a/include/linux/fs.h
80894+++ b/include/linux/fs.h
80895@@ -413,7 +413,7 @@ struct address_space {
80896 spinlock_t private_lock; /* for use by the address_space */
80897 struct list_head private_list; /* ditto */
80898 void *private_data; /* ditto */
80899-} __attribute__((aligned(sizeof(long))));
80900+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
80901 /*
80902 * On most architectures that alignment is already the case; but
80903 * must be enforced here for CRIS, to let the least significant bit
80904@@ -456,7 +456,7 @@ struct block_device {
80905 int bd_fsfreeze_count;
80906 /* Mutex for freeze */
80907 struct mutex bd_fsfreeze_mutex;
80908-};
80909+} __randomize_layout;
80910
80911 /*
80912 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
80913@@ -642,7 +642,7 @@ struct inode {
80914 #endif
80915
80916 void *i_private; /* fs or device private pointer */
80917-};
80918+} __randomize_layout;
80919
80920 static inline int inode_unhashed(struct inode *inode)
80921 {
80922@@ -837,7 +837,7 @@ struct file {
80923 struct list_head f_tfile_llink;
80924 #endif /* #ifdef CONFIG_EPOLL */
80925 struct address_space *f_mapping;
80926-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
80927+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
80928
80929 struct file_handle {
80930 __u32 handle_bytes;
80931@@ -962,7 +962,7 @@ struct file_lock {
80932 int state; /* state of grant or error if -ve */
80933 } afs;
80934 } fl_u;
80935-};
80936+} __randomize_layout;
80937
80938 /* The following constant reflects the upper bound of the file/locking space */
80939 #ifndef OFFSET_MAX
80940@@ -1305,7 +1305,7 @@ struct super_block {
80941 * Indicates how deep in a filesystem stack this SB is
80942 */
80943 int s_stack_depth;
80944-};
80945+} __randomize_layout;
80946
80947 extern struct timespec current_fs_time(struct super_block *sb);
80948
80949@@ -1536,7 +1536,8 @@ struct file_operations {
80950 long (*fallocate)(struct file *file, int mode, loff_t offset,
80951 loff_t len);
80952 void (*show_fdinfo)(struct seq_file *m, struct file *f);
80953-};
80954+} __do_const __randomize_layout;
80955+typedef struct file_operations __no_const file_operations_no_const;
80956
80957 struct inode_operations {
80958 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
80959@@ -2854,4 +2855,14 @@ static inline bool dir_relax(struct inode *inode)
80960 return !IS_DEADDIR(inode);
80961 }
80962
80963+static inline bool is_sidechannel_device(const struct inode *inode)
80964+{
80965+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
80966+ umode_t mode = inode->i_mode;
80967+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
80968+#else
80969+ return false;
80970+#endif
80971+}
80972+
80973 #endif /* _LINUX_FS_H */
80974diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
80975index 0efc3e6..fd23610 100644
80976--- a/include/linux/fs_struct.h
80977+++ b/include/linux/fs_struct.h
80978@@ -6,13 +6,13 @@
80979 #include <linux/seqlock.h>
80980
80981 struct fs_struct {
80982- int users;
80983+ atomic_t users;
80984 spinlock_t lock;
80985 seqcount_t seq;
80986 int umask;
80987 int in_exec;
80988 struct path root, pwd;
80989-};
80990+} __randomize_layout;
80991
80992 extern struct kmem_cache *fs_cachep;
80993
80994diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
80995index 7714849..a4a5c7a 100644
80996--- a/include/linux/fscache-cache.h
80997+++ b/include/linux/fscache-cache.h
80998@@ -113,7 +113,7 @@ struct fscache_operation {
80999 fscache_operation_release_t release;
81000 };
81001
81002-extern atomic_t fscache_op_debug_id;
81003+extern atomic_unchecked_t fscache_op_debug_id;
81004 extern void fscache_op_work_func(struct work_struct *work);
81005
81006 extern void fscache_enqueue_operation(struct fscache_operation *);
81007@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81008 INIT_WORK(&op->work, fscache_op_work_func);
81009 atomic_set(&op->usage, 1);
81010 op->state = FSCACHE_OP_ST_INITIALISED;
81011- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81012+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81013 op->processor = processor;
81014 op->release = release;
81015 INIT_LIST_HEAD(&op->pend_link);
81016diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81017index 115bb81..e7b812b 100644
81018--- a/include/linux/fscache.h
81019+++ b/include/linux/fscache.h
81020@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81021 * - this is mandatory for any object that may have data
81022 */
81023 void (*now_uncached)(void *cookie_netfs_data);
81024-};
81025+} __do_const;
81026
81027 /*
81028 * fscache cached network filesystem type
81029diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81030index 7ee1774..72505b8 100644
81031--- a/include/linux/fsnotify.h
81032+++ b/include/linux/fsnotify.h
81033@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
81034 struct inode *inode = file_inode(file);
81035 __u32 mask = FS_ACCESS;
81036
81037+ if (is_sidechannel_device(inode))
81038+ return;
81039+
81040 if (S_ISDIR(inode->i_mode))
81041 mask |= FS_ISDIR;
81042
81043@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
81044 struct inode *inode = file_inode(file);
81045 __u32 mask = FS_MODIFY;
81046
81047+ if (is_sidechannel_device(inode))
81048+ return;
81049+
81050 if (S_ISDIR(inode->i_mode))
81051 mask |= FS_ISDIR;
81052
81053@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81054 */
81055 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81056 {
81057- return kstrdup(name, GFP_KERNEL);
81058+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81059 }
81060
81061 /*
81062diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81063index ec274e0..e678159 100644
81064--- a/include/linux/genhd.h
81065+++ b/include/linux/genhd.h
81066@@ -194,7 +194,7 @@ struct gendisk {
81067 struct kobject *slave_dir;
81068
81069 struct timer_rand_state *random;
81070- atomic_t sync_io; /* RAID */
81071+ atomic_unchecked_t sync_io; /* RAID */
81072 struct disk_events *ev;
81073 #ifdef CONFIG_BLK_DEV_INTEGRITY
81074 struct blk_integrity *integrity;
81075@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81076 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81077
81078 /* drivers/char/random.c */
81079-extern void add_disk_randomness(struct gendisk *disk);
81080+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81081 extern void rand_initialize_disk(struct gendisk *disk);
81082
81083 static inline sector_t get_start_sect(struct block_device *bdev)
81084diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81085index 667c311..abac2a7 100644
81086--- a/include/linux/genl_magic_func.h
81087+++ b/include/linux/genl_magic_func.h
81088@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81089 },
81090
81091 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81092-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81093+static struct genl_ops ZZZ_genl_ops[] = {
81094 #include GENL_MAGIC_INCLUDE_FILE
81095 };
81096
81097diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81098index b840e3b..aeaeef9 100644
81099--- a/include/linux/gfp.h
81100+++ b/include/linux/gfp.h
81101@@ -34,6 +34,13 @@ struct vm_area_struct;
81102 #define ___GFP_NO_KSWAPD 0x400000u
81103 #define ___GFP_OTHER_NODE 0x800000u
81104 #define ___GFP_WRITE 0x1000000u
81105+
81106+#ifdef CONFIG_PAX_USERCOPY_SLABS
81107+#define ___GFP_USERCOPY 0x2000000u
81108+#else
81109+#define ___GFP_USERCOPY 0
81110+#endif
81111+
81112 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81113
81114 /*
81115@@ -90,6 +97,7 @@ struct vm_area_struct;
81116 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81117 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81118 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81119+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81120
81121 /*
81122 * This may seem redundant, but it's a way of annotating false positives vs.
81123@@ -97,7 +105,7 @@ struct vm_area_struct;
81124 */
81125 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81126
81127-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81128+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81129 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81130
81131 /* This equals 0, but use constants in case they ever change */
81132@@ -152,6 +160,8 @@ struct vm_area_struct;
81133 /* 4GB DMA on some platforms */
81134 #define GFP_DMA32 __GFP_DMA32
81135
81136+#define GFP_USERCOPY __GFP_USERCOPY
81137+
81138 /* Convert GFP flags to their corresponding migrate type */
81139 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
81140 {
81141diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81142new file mode 100644
81143index 0000000..91858e4
81144--- /dev/null
81145+++ b/include/linux/gracl.h
81146@@ -0,0 +1,342 @@
81147+#ifndef GR_ACL_H
81148+#define GR_ACL_H
81149+
81150+#include <linux/grdefs.h>
81151+#include <linux/resource.h>
81152+#include <linux/capability.h>
81153+#include <linux/dcache.h>
81154+#include <asm/resource.h>
81155+
81156+/* Major status information */
81157+
81158+#define GR_VERSION "grsecurity 3.1"
81159+#define GRSECURITY_VERSION 0x3100
81160+
81161+enum {
81162+ GR_SHUTDOWN = 0,
81163+ GR_ENABLE = 1,
81164+ GR_SPROLE = 2,
81165+ GR_OLDRELOAD = 3,
81166+ GR_SEGVMOD = 4,
81167+ GR_STATUS = 5,
81168+ GR_UNSPROLE = 6,
81169+ GR_PASSSET = 7,
81170+ GR_SPROLEPAM = 8,
81171+ GR_RELOAD = 9,
81172+};
81173+
81174+/* Password setup definitions
81175+ * kernel/grhash.c */
81176+enum {
81177+ GR_PW_LEN = 128,
81178+ GR_SALT_LEN = 16,
81179+ GR_SHA_LEN = 32,
81180+};
81181+
81182+enum {
81183+ GR_SPROLE_LEN = 64,
81184+};
81185+
81186+enum {
81187+ GR_NO_GLOB = 0,
81188+ GR_REG_GLOB,
81189+ GR_CREATE_GLOB
81190+};
81191+
81192+#define GR_NLIMITS 32
81193+
81194+/* Begin Data Structures */
81195+
81196+struct sprole_pw {
81197+ unsigned char *rolename;
81198+ unsigned char salt[GR_SALT_LEN];
81199+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81200+};
81201+
81202+struct name_entry {
81203+ __u32 key;
81204+ u64 inode;
81205+ dev_t device;
81206+ char *name;
81207+ __u16 len;
81208+ __u8 deleted;
81209+ struct name_entry *prev;
81210+ struct name_entry *next;
81211+};
81212+
81213+struct inodev_entry {
81214+ struct name_entry *nentry;
81215+ struct inodev_entry *prev;
81216+ struct inodev_entry *next;
81217+};
81218+
81219+struct acl_role_db {
81220+ struct acl_role_label **r_hash;
81221+ __u32 r_size;
81222+};
81223+
81224+struct inodev_db {
81225+ struct inodev_entry **i_hash;
81226+ __u32 i_size;
81227+};
81228+
81229+struct name_db {
81230+ struct name_entry **n_hash;
81231+ __u32 n_size;
81232+};
81233+
81234+struct crash_uid {
81235+ uid_t uid;
81236+ unsigned long expires;
81237+};
81238+
81239+struct gr_hash_struct {
81240+ void **table;
81241+ void **nametable;
81242+ void *first;
81243+ __u32 table_size;
81244+ __u32 used_size;
81245+ int type;
81246+};
81247+
81248+/* Userspace Grsecurity ACL data structures */
81249+
81250+struct acl_subject_label {
81251+ char *filename;
81252+ u64 inode;
81253+ dev_t device;
81254+ __u32 mode;
81255+ kernel_cap_t cap_mask;
81256+ kernel_cap_t cap_lower;
81257+ kernel_cap_t cap_invert_audit;
81258+
81259+ struct rlimit res[GR_NLIMITS];
81260+ __u32 resmask;
81261+
81262+ __u8 user_trans_type;
81263+ __u8 group_trans_type;
81264+ uid_t *user_transitions;
81265+ gid_t *group_transitions;
81266+ __u16 user_trans_num;
81267+ __u16 group_trans_num;
81268+
81269+ __u32 sock_families[2];
81270+ __u32 ip_proto[8];
81271+ __u32 ip_type;
81272+ struct acl_ip_label **ips;
81273+ __u32 ip_num;
81274+ __u32 inaddr_any_override;
81275+
81276+ __u32 crashes;
81277+ unsigned long expires;
81278+
81279+ struct acl_subject_label *parent_subject;
81280+ struct gr_hash_struct *hash;
81281+ struct acl_subject_label *prev;
81282+ struct acl_subject_label *next;
81283+
81284+ struct acl_object_label **obj_hash;
81285+ __u32 obj_hash_size;
81286+ __u16 pax_flags;
81287+};
81288+
81289+struct role_allowed_ip {
81290+ __u32 addr;
81291+ __u32 netmask;
81292+
81293+ struct role_allowed_ip *prev;
81294+ struct role_allowed_ip *next;
81295+};
81296+
81297+struct role_transition {
81298+ char *rolename;
81299+
81300+ struct role_transition *prev;
81301+ struct role_transition *next;
81302+};
81303+
81304+struct acl_role_label {
81305+ char *rolename;
81306+ uid_t uidgid;
81307+ __u16 roletype;
81308+
81309+ __u16 auth_attempts;
81310+ unsigned long expires;
81311+
81312+ struct acl_subject_label *root_label;
81313+ struct gr_hash_struct *hash;
81314+
81315+ struct acl_role_label *prev;
81316+ struct acl_role_label *next;
81317+
81318+ struct role_transition *transitions;
81319+ struct role_allowed_ip *allowed_ips;
81320+ uid_t *domain_children;
81321+ __u16 domain_child_num;
81322+
81323+ umode_t umask;
81324+
81325+ struct acl_subject_label **subj_hash;
81326+ __u32 subj_hash_size;
81327+};
81328+
81329+struct user_acl_role_db {
81330+ struct acl_role_label **r_table;
81331+ __u32 num_pointers; /* Number of allocations to track */
81332+ __u32 num_roles; /* Number of roles */
81333+ __u32 num_domain_children; /* Number of domain children */
81334+ __u32 num_subjects; /* Number of subjects */
81335+ __u32 num_objects; /* Number of objects */
81336+};
81337+
81338+struct acl_object_label {
81339+ char *filename;
81340+ u64 inode;
81341+ dev_t device;
81342+ __u32 mode;
81343+
81344+ struct acl_subject_label *nested;
81345+ struct acl_object_label *globbed;
81346+
81347+ /* next two structures not used */
81348+
81349+ struct acl_object_label *prev;
81350+ struct acl_object_label *next;
81351+};
81352+
81353+struct acl_ip_label {
81354+ char *iface;
81355+ __u32 addr;
81356+ __u32 netmask;
81357+ __u16 low, high;
81358+ __u8 mode;
81359+ __u32 type;
81360+ __u32 proto[8];
81361+
81362+ /* next two structures not used */
81363+
81364+ struct acl_ip_label *prev;
81365+ struct acl_ip_label *next;
81366+};
81367+
81368+struct gr_arg {
81369+ struct user_acl_role_db role_db;
81370+ unsigned char pw[GR_PW_LEN];
81371+ unsigned char salt[GR_SALT_LEN];
81372+ unsigned char sum[GR_SHA_LEN];
81373+ unsigned char sp_role[GR_SPROLE_LEN];
81374+ struct sprole_pw *sprole_pws;
81375+ dev_t segv_device;
81376+ u64 segv_inode;
81377+ uid_t segv_uid;
81378+ __u16 num_sprole_pws;
81379+ __u16 mode;
81380+};
81381+
81382+struct gr_arg_wrapper {
81383+ struct gr_arg *arg;
81384+ __u32 version;
81385+ __u32 size;
81386+};
81387+
81388+struct subject_map {
81389+ struct acl_subject_label *user;
81390+ struct acl_subject_label *kernel;
81391+ struct subject_map *prev;
81392+ struct subject_map *next;
81393+};
81394+
81395+struct acl_subj_map_db {
81396+ struct subject_map **s_hash;
81397+ __u32 s_size;
81398+};
81399+
81400+struct gr_policy_state {
81401+ struct sprole_pw **acl_special_roles;
81402+ __u16 num_sprole_pws;
81403+ struct acl_role_label *kernel_role;
81404+ struct acl_role_label *role_list;
81405+ struct acl_role_label *default_role;
81406+ struct acl_role_db acl_role_set;
81407+ struct acl_subj_map_db subj_map_set;
81408+ struct name_db name_set;
81409+ struct inodev_db inodev_set;
81410+};
81411+
81412+struct gr_alloc_state {
81413+ unsigned long alloc_stack_next;
81414+ unsigned long alloc_stack_size;
81415+ void **alloc_stack;
81416+};
81417+
81418+struct gr_reload_state {
81419+ struct gr_policy_state oldpolicy;
81420+ struct gr_alloc_state oldalloc;
81421+ struct gr_policy_state newpolicy;
81422+ struct gr_alloc_state newalloc;
81423+ struct gr_policy_state *oldpolicy_ptr;
81424+ struct gr_alloc_state *oldalloc_ptr;
81425+ unsigned char oldmode;
81426+};
81427+
81428+/* End Data Structures Section */
81429+
81430+/* Hash functions generated by empirical testing by Brad Spengler
81431+ Makes good use of the low bits of the inode. Generally 0-1 times
81432+ in loop for successful match. 0-3 for unsuccessful match.
81433+ Shift/add algorithm with modulus of table size and an XOR*/
81434+
81435+static __inline__ unsigned int
81436+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
81437+{
81438+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
81439+}
81440+
81441+ static __inline__ unsigned int
81442+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
81443+{
81444+ return ((const unsigned long)userp % sz);
81445+}
81446+
81447+static __inline__ unsigned int
81448+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
81449+{
81450+ unsigned int rem;
81451+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
81452+ return rem;
81453+}
81454+
81455+static __inline__ unsigned int
81456+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
81457+{
81458+ return full_name_hash((const unsigned char *)name, len) % sz;
81459+}
81460+
81461+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
81462+ subj = NULL; \
81463+ iter = 0; \
81464+ while (iter < role->subj_hash_size) { \
81465+ if (subj == NULL) \
81466+ subj = role->subj_hash[iter]; \
81467+ if (subj == NULL) { \
81468+ iter++; \
81469+ continue; \
81470+ }
81471+
81472+#define FOR_EACH_SUBJECT_END(subj,iter) \
81473+ subj = subj->next; \
81474+ if (subj == NULL) \
81475+ iter++; \
81476+ }
81477+
81478+
81479+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
81480+ subj = role->hash->first; \
81481+ while (subj != NULL) {
81482+
81483+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
81484+ subj = subj->next; \
81485+ }
81486+
81487+#endif
81488+
81489diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
81490new file mode 100644
81491index 0000000..af64092
81492--- /dev/null
81493+++ b/include/linux/gracl_compat.h
81494@@ -0,0 +1,156 @@
81495+#ifndef GR_ACL_COMPAT_H
81496+#define GR_ACL_COMPAT_H
81497+
81498+#include <linux/resource.h>
81499+#include <asm/resource.h>
81500+
81501+struct sprole_pw_compat {
81502+ compat_uptr_t rolename;
81503+ unsigned char salt[GR_SALT_LEN];
81504+ unsigned char sum[GR_SHA_LEN];
81505+};
81506+
81507+struct gr_hash_struct_compat {
81508+ compat_uptr_t table;
81509+ compat_uptr_t nametable;
81510+ compat_uptr_t first;
81511+ __u32 table_size;
81512+ __u32 used_size;
81513+ int type;
81514+};
81515+
81516+struct acl_subject_label_compat {
81517+ compat_uptr_t filename;
81518+ compat_u64 inode;
81519+ __u32 device;
81520+ __u32 mode;
81521+ kernel_cap_t cap_mask;
81522+ kernel_cap_t cap_lower;
81523+ kernel_cap_t cap_invert_audit;
81524+
81525+ struct compat_rlimit res[GR_NLIMITS];
81526+ __u32 resmask;
81527+
81528+ __u8 user_trans_type;
81529+ __u8 group_trans_type;
81530+ compat_uptr_t user_transitions;
81531+ compat_uptr_t group_transitions;
81532+ __u16 user_trans_num;
81533+ __u16 group_trans_num;
81534+
81535+ __u32 sock_families[2];
81536+ __u32 ip_proto[8];
81537+ __u32 ip_type;
81538+ compat_uptr_t ips;
81539+ __u32 ip_num;
81540+ __u32 inaddr_any_override;
81541+
81542+ __u32 crashes;
81543+ compat_ulong_t expires;
81544+
81545+ compat_uptr_t parent_subject;
81546+ compat_uptr_t hash;
81547+ compat_uptr_t prev;
81548+ compat_uptr_t next;
81549+
81550+ compat_uptr_t obj_hash;
81551+ __u32 obj_hash_size;
81552+ __u16 pax_flags;
81553+};
81554+
81555+struct role_allowed_ip_compat {
81556+ __u32 addr;
81557+ __u32 netmask;
81558+
81559+ compat_uptr_t prev;
81560+ compat_uptr_t next;
81561+};
81562+
81563+struct role_transition_compat {
81564+ compat_uptr_t rolename;
81565+
81566+ compat_uptr_t prev;
81567+ compat_uptr_t next;
81568+};
81569+
81570+struct acl_role_label_compat {
81571+ compat_uptr_t rolename;
81572+ uid_t uidgid;
81573+ __u16 roletype;
81574+
81575+ __u16 auth_attempts;
81576+ compat_ulong_t expires;
81577+
81578+ compat_uptr_t root_label;
81579+ compat_uptr_t hash;
81580+
81581+ compat_uptr_t prev;
81582+ compat_uptr_t next;
81583+
81584+ compat_uptr_t transitions;
81585+ compat_uptr_t allowed_ips;
81586+ compat_uptr_t domain_children;
81587+ __u16 domain_child_num;
81588+
81589+ umode_t umask;
81590+
81591+ compat_uptr_t subj_hash;
81592+ __u32 subj_hash_size;
81593+};
81594+
81595+struct user_acl_role_db_compat {
81596+ compat_uptr_t r_table;
81597+ __u32 num_pointers;
81598+ __u32 num_roles;
81599+ __u32 num_domain_children;
81600+ __u32 num_subjects;
81601+ __u32 num_objects;
81602+};
81603+
81604+struct acl_object_label_compat {
81605+ compat_uptr_t filename;
81606+ compat_u64 inode;
81607+ __u32 device;
81608+ __u32 mode;
81609+
81610+ compat_uptr_t nested;
81611+ compat_uptr_t globbed;
81612+
81613+ compat_uptr_t prev;
81614+ compat_uptr_t next;
81615+};
81616+
81617+struct acl_ip_label_compat {
81618+ compat_uptr_t iface;
81619+ __u32 addr;
81620+ __u32 netmask;
81621+ __u16 low, high;
81622+ __u8 mode;
81623+ __u32 type;
81624+ __u32 proto[8];
81625+
81626+ compat_uptr_t prev;
81627+ compat_uptr_t next;
81628+};
81629+
81630+struct gr_arg_compat {
81631+ struct user_acl_role_db_compat role_db;
81632+ unsigned char pw[GR_PW_LEN];
81633+ unsigned char salt[GR_SALT_LEN];
81634+ unsigned char sum[GR_SHA_LEN];
81635+ unsigned char sp_role[GR_SPROLE_LEN];
81636+ compat_uptr_t sprole_pws;
81637+ __u32 segv_device;
81638+ compat_u64 segv_inode;
81639+ uid_t segv_uid;
81640+ __u16 num_sprole_pws;
81641+ __u16 mode;
81642+};
81643+
81644+struct gr_arg_wrapper_compat {
81645+ compat_uptr_t arg;
81646+ __u32 version;
81647+ __u32 size;
81648+};
81649+
81650+#endif
81651diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
81652new file mode 100644
81653index 0000000..323ecf2
81654--- /dev/null
81655+++ b/include/linux/gralloc.h
81656@@ -0,0 +1,9 @@
81657+#ifndef __GRALLOC_H
81658+#define __GRALLOC_H
81659+
81660+void acl_free_all(void);
81661+int acl_alloc_stack_init(unsigned long size);
81662+void *acl_alloc(unsigned long len);
81663+void *acl_alloc_num(unsigned long num, unsigned long len);
81664+
81665+#endif
81666diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
81667new file mode 100644
81668index 0000000..be66033
81669--- /dev/null
81670+++ b/include/linux/grdefs.h
81671@@ -0,0 +1,140 @@
81672+#ifndef GRDEFS_H
81673+#define GRDEFS_H
81674+
81675+/* Begin grsecurity status declarations */
81676+
81677+enum {
81678+ GR_READY = 0x01,
81679+ GR_STATUS_INIT = 0x00 // disabled state
81680+};
81681+
81682+/* Begin ACL declarations */
81683+
81684+/* Role flags */
81685+
81686+enum {
81687+ GR_ROLE_USER = 0x0001,
81688+ GR_ROLE_GROUP = 0x0002,
81689+ GR_ROLE_DEFAULT = 0x0004,
81690+ GR_ROLE_SPECIAL = 0x0008,
81691+ GR_ROLE_AUTH = 0x0010,
81692+ GR_ROLE_NOPW = 0x0020,
81693+ GR_ROLE_GOD = 0x0040,
81694+ GR_ROLE_LEARN = 0x0080,
81695+ GR_ROLE_TPE = 0x0100,
81696+ GR_ROLE_DOMAIN = 0x0200,
81697+ GR_ROLE_PAM = 0x0400,
81698+ GR_ROLE_PERSIST = 0x0800
81699+};
81700+
81701+/* ACL Subject and Object mode flags */
81702+enum {
81703+ GR_DELETED = 0x80000000
81704+};
81705+
81706+/* ACL Object-only mode flags */
81707+enum {
81708+ GR_READ = 0x00000001,
81709+ GR_APPEND = 0x00000002,
81710+ GR_WRITE = 0x00000004,
81711+ GR_EXEC = 0x00000008,
81712+ GR_FIND = 0x00000010,
81713+ GR_INHERIT = 0x00000020,
81714+ GR_SETID = 0x00000040,
81715+ GR_CREATE = 0x00000080,
81716+ GR_DELETE = 0x00000100,
81717+ GR_LINK = 0x00000200,
81718+ GR_AUDIT_READ = 0x00000400,
81719+ GR_AUDIT_APPEND = 0x00000800,
81720+ GR_AUDIT_WRITE = 0x00001000,
81721+ GR_AUDIT_EXEC = 0x00002000,
81722+ GR_AUDIT_FIND = 0x00004000,
81723+ GR_AUDIT_INHERIT= 0x00008000,
81724+ GR_AUDIT_SETID = 0x00010000,
81725+ GR_AUDIT_CREATE = 0x00020000,
81726+ GR_AUDIT_DELETE = 0x00040000,
81727+ GR_AUDIT_LINK = 0x00080000,
81728+ GR_PTRACERD = 0x00100000,
81729+ GR_NOPTRACE = 0x00200000,
81730+ GR_SUPPRESS = 0x00400000,
81731+ GR_NOLEARN = 0x00800000,
81732+ GR_INIT_TRANSFER= 0x01000000
81733+};
81734+
81735+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
81736+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
81737+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
81738+
81739+/* ACL subject-only mode flags */
81740+enum {
81741+ GR_KILL = 0x00000001,
81742+ GR_VIEW = 0x00000002,
81743+ GR_PROTECTED = 0x00000004,
81744+ GR_LEARN = 0x00000008,
81745+ GR_OVERRIDE = 0x00000010,
81746+ /* just a placeholder, this mode is only used in userspace */
81747+ GR_DUMMY = 0x00000020,
81748+ GR_PROTSHM = 0x00000040,
81749+ GR_KILLPROC = 0x00000080,
81750+ GR_KILLIPPROC = 0x00000100,
81751+ /* just a placeholder, this mode is only used in userspace */
81752+ GR_NOTROJAN = 0x00000200,
81753+ GR_PROTPROCFD = 0x00000400,
81754+ GR_PROCACCT = 0x00000800,
81755+ GR_RELAXPTRACE = 0x00001000,
81756+ //GR_NESTED = 0x00002000,
81757+ GR_INHERITLEARN = 0x00004000,
81758+ GR_PROCFIND = 0x00008000,
81759+ GR_POVERRIDE = 0x00010000,
81760+ GR_KERNELAUTH = 0x00020000,
81761+ GR_ATSECURE = 0x00040000,
81762+ GR_SHMEXEC = 0x00080000
81763+};
81764+
81765+enum {
81766+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
81767+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
81768+ GR_PAX_ENABLE_MPROTECT = 0x0004,
81769+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
81770+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
81771+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
81772+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
81773+ GR_PAX_DISABLE_MPROTECT = 0x0400,
81774+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
81775+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
81776+};
81777+
81778+enum {
81779+ GR_ID_USER = 0x01,
81780+ GR_ID_GROUP = 0x02,
81781+};
81782+
81783+enum {
81784+ GR_ID_ALLOW = 0x01,
81785+ GR_ID_DENY = 0x02,
81786+};
81787+
81788+#define GR_CRASH_RES 31
81789+#define GR_UIDTABLE_MAX 500
81790+
81791+/* begin resource learning section */
81792+enum {
81793+ GR_RLIM_CPU_BUMP = 60,
81794+ GR_RLIM_FSIZE_BUMP = 50000,
81795+ GR_RLIM_DATA_BUMP = 10000,
81796+ GR_RLIM_STACK_BUMP = 1000,
81797+ GR_RLIM_CORE_BUMP = 10000,
81798+ GR_RLIM_RSS_BUMP = 500000,
81799+ GR_RLIM_NPROC_BUMP = 1,
81800+ GR_RLIM_NOFILE_BUMP = 5,
81801+ GR_RLIM_MEMLOCK_BUMP = 50000,
81802+ GR_RLIM_AS_BUMP = 500000,
81803+ GR_RLIM_LOCKS_BUMP = 2,
81804+ GR_RLIM_SIGPENDING_BUMP = 5,
81805+ GR_RLIM_MSGQUEUE_BUMP = 10000,
81806+ GR_RLIM_NICE_BUMP = 1,
81807+ GR_RLIM_RTPRIO_BUMP = 1,
81808+ GR_RLIM_RTTIME_BUMP = 1000000
81809+};
81810+
81811+#endif
81812diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
81813new file mode 100644
81814index 0000000..fb1de5d
81815--- /dev/null
81816+++ b/include/linux/grinternal.h
81817@@ -0,0 +1,230 @@
81818+#ifndef __GRINTERNAL_H
81819+#define __GRINTERNAL_H
81820+
81821+#ifdef CONFIG_GRKERNSEC
81822+
81823+#include <linux/fs.h>
81824+#include <linux/mnt_namespace.h>
81825+#include <linux/nsproxy.h>
81826+#include <linux/gracl.h>
81827+#include <linux/grdefs.h>
81828+#include <linux/grmsg.h>
81829+
81830+void gr_add_learn_entry(const char *fmt, ...)
81831+ __attribute__ ((format (printf, 1, 2)));
81832+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
81833+ const struct vfsmount *mnt);
81834+__u32 gr_check_create(const struct dentry *new_dentry,
81835+ const struct dentry *parent,
81836+ const struct vfsmount *mnt, const __u32 mode);
81837+int gr_check_protected_task(const struct task_struct *task);
81838+__u32 to_gr_audit(const __u32 reqmode);
81839+int gr_set_acls(const int type);
81840+int gr_acl_is_enabled(void);
81841+char gr_roletype_to_char(void);
81842+
81843+void gr_handle_alertkill(struct task_struct *task);
81844+char *gr_to_filename(const struct dentry *dentry,
81845+ const struct vfsmount *mnt);
81846+char *gr_to_filename1(const struct dentry *dentry,
81847+ const struct vfsmount *mnt);
81848+char *gr_to_filename2(const struct dentry *dentry,
81849+ const struct vfsmount *mnt);
81850+char *gr_to_filename3(const struct dentry *dentry,
81851+ const struct vfsmount *mnt);
81852+
81853+extern int grsec_enable_ptrace_readexec;
81854+extern int grsec_enable_harden_ptrace;
81855+extern int grsec_enable_link;
81856+extern int grsec_enable_fifo;
81857+extern int grsec_enable_execve;
81858+extern int grsec_enable_shm;
81859+extern int grsec_enable_execlog;
81860+extern int grsec_enable_signal;
81861+extern int grsec_enable_audit_ptrace;
81862+extern int grsec_enable_forkfail;
81863+extern int grsec_enable_time;
81864+extern int grsec_enable_rofs;
81865+extern int grsec_deny_new_usb;
81866+extern int grsec_enable_chroot_shmat;
81867+extern int grsec_enable_chroot_mount;
81868+extern int grsec_enable_chroot_double;
81869+extern int grsec_enable_chroot_pivot;
81870+extern int grsec_enable_chroot_chdir;
81871+extern int grsec_enable_chroot_chmod;
81872+extern int grsec_enable_chroot_mknod;
81873+extern int grsec_enable_chroot_fchdir;
81874+extern int grsec_enable_chroot_nice;
81875+extern int grsec_enable_chroot_execlog;
81876+extern int grsec_enable_chroot_caps;
81877+extern int grsec_enable_chroot_rename;
81878+extern int grsec_enable_chroot_sysctl;
81879+extern int grsec_enable_chroot_unix;
81880+extern int grsec_enable_symlinkown;
81881+extern kgid_t grsec_symlinkown_gid;
81882+extern int grsec_enable_tpe;
81883+extern kgid_t grsec_tpe_gid;
81884+extern int grsec_enable_tpe_all;
81885+extern int grsec_enable_tpe_invert;
81886+extern int grsec_enable_socket_all;
81887+extern kgid_t grsec_socket_all_gid;
81888+extern int grsec_enable_socket_client;
81889+extern kgid_t grsec_socket_client_gid;
81890+extern int grsec_enable_socket_server;
81891+extern kgid_t grsec_socket_server_gid;
81892+extern kgid_t grsec_audit_gid;
81893+extern int grsec_enable_group;
81894+extern int grsec_enable_log_rwxmaps;
81895+extern int grsec_enable_mount;
81896+extern int grsec_enable_chdir;
81897+extern int grsec_resource_logging;
81898+extern int grsec_enable_blackhole;
81899+extern int grsec_lastack_retries;
81900+extern int grsec_enable_brute;
81901+extern int grsec_enable_harden_ipc;
81902+extern int grsec_lock;
81903+
81904+extern spinlock_t grsec_alert_lock;
81905+extern unsigned long grsec_alert_wtime;
81906+extern unsigned long grsec_alert_fyet;
81907+
81908+extern spinlock_t grsec_audit_lock;
81909+
81910+extern rwlock_t grsec_exec_file_lock;
81911+
81912+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
81913+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
81914+ (tsk)->exec_file->f_path.mnt) : "/")
81915+
81916+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
81917+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
81918+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
81919+
81920+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
81921+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
81922+ (tsk)->exec_file->f_path.mnt) : "/")
81923+
81924+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
81925+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
81926+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
81927+
81928+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
81929+
81930+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
81931+
81932+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
81933+{
81934+ if (file1 && file2) {
81935+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
81936+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
81937+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
81938+ return true;
81939+ }
81940+
81941+ return false;
81942+}
81943+
81944+#define GR_CHROOT_CAPS {{ \
81945+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
81946+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
81947+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
81948+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
81949+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
81950+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
81951+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
81952+
81953+#define security_learn(normal_msg,args...) \
81954+({ \
81955+ read_lock(&grsec_exec_file_lock); \
81956+ gr_add_learn_entry(normal_msg "\n", ## args); \
81957+ read_unlock(&grsec_exec_file_lock); \
81958+})
81959+
81960+enum {
81961+ GR_DO_AUDIT,
81962+ GR_DONT_AUDIT,
81963+ /* used for non-audit messages that we shouldn't kill the task on */
81964+ GR_DONT_AUDIT_GOOD
81965+};
81966+
81967+enum {
81968+ GR_TTYSNIFF,
81969+ GR_RBAC,
81970+ GR_RBAC_STR,
81971+ GR_STR_RBAC,
81972+ GR_RBAC_MODE2,
81973+ GR_RBAC_MODE3,
81974+ GR_FILENAME,
81975+ GR_SYSCTL_HIDDEN,
81976+ GR_NOARGS,
81977+ GR_ONE_INT,
81978+ GR_ONE_INT_TWO_STR,
81979+ GR_ONE_STR,
81980+ GR_STR_INT,
81981+ GR_TWO_STR_INT,
81982+ GR_TWO_INT,
81983+ GR_TWO_U64,
81984+ GR_THREE_INT,
81985+ GR_FIVE_INT_TWO_STR,
81986+ GR_TWO_STR,
81987+ GR_THREE_STR,
81988+ GR_FOUR_STR,
81989+ GR_STR_FILENAME,
81990+ GR_FILENAME_STR,
81991+ GR_FILENAME_TWO_INT,
81992+ GR_FILENAME_TWO_INT_STR,
81993+ GR_TEXTREL,
81994+ GR_PTRACE,
81995+ GR_RESOURCE,
81996+ GR_CAP,
81997+ GR_SIG,
81998+ GR_SIG2,
81999+ GR_CRASH1,
82000+ GR_CRASH2,
82001+ GR_PSACCT,
82002+ GR_RWXMAP,
82003+ GR_RWXMAPVMA
82004+};
82005+
82006+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82007+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82008+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82009+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82010+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82011+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82012+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82013+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82014+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82015+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82016+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82017+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82018+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82019+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82020+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82021+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82022+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82023+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82024+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82025+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82026+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82027+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82028+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82029+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82030+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82031+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82032+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82033+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82034+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82035+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82036+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82037+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82038+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82039+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82040+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82041+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82042+
82043+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82044+
82045+#endif
82046+
82047+#endif
82048diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82049new file mode 100644
82050index 0000000..26ef560
82051--- /dev/null
82052+++ b/include/linux/grmsg.h
82053@@ -0,0 +1,118 @@
82054+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82055+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82056+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82057+#define GR_STOPMOD_MSG "denied modification of module state by "
82058+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82059+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82060+#define GR_IOPERM_MSG "denied use of ioperm() by "
82061+#define GR_IOPL_MSG "denied use of iopl() by "
82062+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82063+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82064+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82065+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82066+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82067+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82068+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82069+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82070+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82071+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82072+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82073+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82074+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82075+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82076+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82077+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82078+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82079+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82080+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82081+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82082+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82083+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82084+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82085+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82086+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82087+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82088+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82089+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82090+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82091+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82092+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82093+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82094+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82095+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82096+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82097+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
82098+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82099+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82100+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82101+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82102+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82103+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82104+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82105+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82106+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82107+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82108+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82109+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82110+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82111+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82112+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82113+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82114+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82115+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82116+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82117+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82118+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82119+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82120+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82121+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82122+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82123+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82124+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82125+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82126+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82127+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82128+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82129+#define GR_NICE_CHROOT_MSG "denied priority change by "
82130+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82131+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82132+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82133+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82134+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82135+#define GR_TIME_MSG "time set by "
82136+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82137+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82138+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82139+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82140+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82141+#define GR_BIND_MSG "denied bind() by "
82142+#define GR_CONNECT_MSG "denied connect() by "
82143+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82144+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82145+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82146+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82147+#define GR_CAP_ACL_MSG "use of %s denied for "
82148+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82149+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82150+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82151+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82152+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82153+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82154+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82155+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82156+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82157+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82158+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82159+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82160+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82161+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82162+#define GR_VM86_MSG "denied use of vm86 by "
82163+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82164+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82165+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82166+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82167+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82168+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82169+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82170+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82171+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82172diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82173new file mode 100644
82174index 0000000..63c1850
82175--- /dev/null
82176+++ b/include/linux/grsecurity.h
82177@@ -0,0 +1,250 @@
82178+#ifndef GR_SECURITY_H
82179+#define GR_SECURITY_H
82180+#include <linux/fs.h>
82181+#include <linux/fs_struct.h>
82182+#include <linux/binfmts.h>
82183+#include <linux/gracl.h>
82184+
82185+/* notify of brain-dead configs */
82186+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82187+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82188+#endif
82189+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82190+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82191+#endif
82192+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82193+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82194+#endif
82195+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82196+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82197+#endif
82198+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82199+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82200+#endif
82201+
82202+int gr_handle_new_usb(void);
82203+
82204+void gr_handle_brute_attach(int dumpable);
82205+void gr_handle_brute_check(void);
82206+void gr_handle_kernel_exploit(void);
82207+
82208+char gr_roletype_to_char(void);
82209+
82210+int gr_proc_is_restricted(void);
82211+
82212+int gr_acl_enable_at_secure(void);
82213+
82214+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82215+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82216+
82217+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82218+
82219+void gr_del_task_from_ip_table(struct task_struct *p);
82220+
82221+int gr_pid_is_chrooted(struct task_struct *p);
82222+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82223+int gr_handle_chroot_nice(void);
82224+int gr_handle_chroot_sysctl(const int op);
82225+int gr_handle_chroot_setpriority(struct task_struct *p,
82226+ const int niceval);
82227+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82228+int gr_chroot_fhandle(void);
82229+int gr_handle_chroot_chroot(const struct dentry *dentry,
82230+ const struct vfsmount *mnt);
82231+void gr_handle_chroot_chdir(const struct path *path);
82232+int gr_handle_chroot_chmod(const struct dentry *dentry,
82233+ const struct vfsmount *mnt, const int mode);
82234+int gr_handle_chroot_mknod(const struct dentry *dentry,
82235+ const struct vfsmount *mnt, const int mode);
82236+int gr_handle_chroot_mount(const struct dentry *dentry,
82237+ const struct vfsmount *mnt,
82238+ const char *dev_name);
82239+int gr_handle_chroot_pivot(void);
82240+int gr_handle_chroot_unix(const pid_t pid);
82241+
82242+int gr_handle_rawio(const struct inode *inode);
82243+
82244+void gr_handle_ioperm(void);
82245+void gr_handle_iopl(void);
82246+void gr_handle_msr_write(void);
82247+
82248+umode_t gr_acl_umask(void);
82249+
82250+int gr_tpe_allow(const struct file *file);
82251+
82252+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82253+void gr_clear_chroot_entries(struct task_struct *task);
82254+
82255+void gr_log_forkfail(const int retval);
82256+void gr_log_timechange(void);
82257+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82258+void gr_log_chdir(const struct dentry *dentry,
82259+ const struct vfsmount *mnt);
82260+void gr_log_chroot_exec(const struct dentry *dentry,
82261+ const struct vfsmount *mnt);
82262+void gr_log_remount(const char *devname, const int retval);
82263+void gr_log_unmount(const char *devname, const int retval);
82264+void gr_log_mount(const char *from, struct path *to, const int retval);
82265+void gr_log_textrel(struct vm_area_struct *vma);
82266+void gr_log_ptgnustack(struct file *file);
82267+void gr_log_rwxmmap(struct file *file);
82268+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82269+
82270+int gr_handle_follow_link(const struct inode *parent,
82271+ const struct inode *inode,
82272+ const struct dentry *dentry,
82273+ const struct vfsmount *mnt);
82274+int gr_handle_fifo(const struct dentry *dentry,
82275+ const struct vfsmount *mnt,
82276+ const struct dentry *dir, const int flag,
82277+ const int acc_mode);
82278+int gr_handle_hardlink(const struct dentry *dentry,
82279+ const struct vfsmount *mnt,
82280+ struct inode *inode,
82281+ const int mode, const struct filename *to);
82282+
82283+int gr_is_capable(const int cap);
82284+int gr_is_capable_nolog(const int cap);
82285+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82286+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82287+
82288+void gr_copy_label(struct task_struct *tsk);
82289+void gr_handle_crash(struct task_struct *task, const int sig);
82290+int gr_handle_signal(const struct task_struct *p, const int sig);
82291+int gr_check_crash_uid(const kuid_t uid);
82292+int gr_check_protected_task(const struct task_struct *task);
82293+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82294+int gr_acl_handle_mmap(const struct file *file,
82295+ const unsigned long prot);
82296+int gr_acl_handle_mprotect(const struct file *file,
82297+ const unsigned long prot);
82298+int gr_check_hidden_task(const struct task_struct *tsk);
82299+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82300+ const struct vfsmount *mnt);
82301+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82302+ const struct vfsmount *mnt);
82303+__u32 gr_acl_handle_access(const struct dentry *dentry,
82304+ const struct vfsmount *mnt, const int fmode);
82305+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82306+ const struct vfsmount *mnt, umode_t *mode);
82307+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82308+ const struct vfsmount *mnt);
82309+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82310+ const struct vfsmount *mnt);
82311+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82312+ const struct vfsmount *mnt);
82313+int gr_handle_ptrace(struct task_struct *task, const long request);
82314+int gr_handle_proc_ptrace(struct task_struct *task);
82315+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82316+ const struct vfsmount *mnt);
82317+int gr_check_crash_exec(const struct file *filp);
82318+int gr_acl_is_enabled(void);
82319+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82320+ const kgid_t gid);
82321+int gr_set_proc_label(const struct dentry *dentry,
82322+ const struct vfsmount *mnt,
82323+ const int unsafe_flags);
82324+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
82325+ const struct vfsmount *mnt);
82326+__u32 gr_acl_handle_open(const struct dentry *dentry,
82327+ const struct vfsmount *mnt, int acc_mode);
82328+__u32 gr_acl_handle_creat(const struct dentry *dentry,
82329+ const struct dentry *p_dentry,
82330+ const struct vfsmount *p_mnt,
82331+ int open_flags, int acc_mode, const int imode);
82332+void gr_handle_create(const struct dentry *dentry,
82333+ const struct vfsmount *mnt);
82334+void gr_handle_proc_create(const struct dentry *dentry,
82335+ const struct inode *inode);
82336+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
82337+ const struct dentry *parent_dentry,
82338+ const struct vfsmount *parent_mnt,
82339+ const int mode);
82340+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
82341+ const struct dentry *parent_dentry,
82342+ const struct vfsmount *parent_mnt);
82343+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
82344+ const struct vfsmount *mnt);
82345+void gr_handle_delete(const u64 ino, const dev_t dev);
82346+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
82347+ const struct vfsmount *mnt);
82348+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
82349+ const struct dentry *parent_dentry,
82350+ const struct vfsmount *parent_mnt,
82351+ const struct filename *from);
82352+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
82353+ const struct dentry *parent_dentry,
82354+ const struct vfsmount *parent_mnt,
82355+ const struct dentry *old_dentry,
82356+ const struct vfsmount *old_mnt, const struct filename *to);
82357+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
82358+int gr_acl_handle_rename(struct dentry *new_dentry,
82359+ struct dentry *parent_dentry,
82360+ const struct vfsmount *parent_mnt,
82361+ struct dentry *old_dentry,
82362+ struct inode *old_parent_inode,
82363+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
82364+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
82365+ struct dentry *old_dentry,
82366+ struct dentry *new_dentry,
82367+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
82368+__u32 gr_check_link(const struct dentry *new_dentry,
82369+ const struct dentry *parent_dentry,
82370+ const struct vfsmount *parent_mnt,
82371+ const struct dentry *old_dentry,
82372+ const struct vfsmount *old_mnt);
82373+int gr_acl_handle_filldir(const struct file *file, const char *name,
82374+ const unsigned int namelen, const u64 ino);
82375+
82376+__u32 gr_acl_handle_unix(const struct dentry *dentry,
82377+ const struct vfsmount *mnt);
82378+void gr_acl_handle_exit(void);
82379+void gr_acl_handle_psacct(struct task_struct *task, const long code);
82380+int gr_acl_handle_procpidmem(const struct task_struct *task);
82381+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
82382+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
82383+void gr_audit_ptrace(struct task_struct *task);
82384+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
82385+u64 gr_get_ino_from_dentry(struct dentry *dentry);
82386+void gr_put_exec_file(struct task_struct *task);
82387+
82388+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
82389+
82390+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82391+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82392+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
82393+ struct dentry *newdentry, struct vfsmount *newmnt);
82394+
82395+#ifdef CONFIG_GRKERNSEC_RESLOG
82396+extern void gr_log_resource(const struct task_struct *task, const int res,
82397+ const unsigned long wanted, const int gt);
82398+#else
82399+static inline void gr_log_resource(const struct task_struct *task, const int res,
82400+ const unsigned long wanted, const int gt)
82401+{
82402+}
82403+#endif
82404+
82405+#ifdef CONFIG_GRKERNSEC
82406+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
82407+void gr_handle_vm86(void);
82408+void gr_handle_mem_readwrite(u64 from, u64 to);
82409+
82410+void gr_log_badprocpid(const char *entry);
82411+
82412+extern int grsec_enable_dmesg;
82413+extern int grsec_disable_privio;
82414+
82415+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
82416+extern kgid_t grsec_proc_gid;
82417+#endif
82418+
82419+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82420+extern int grsec_enable_chroot_findtask;
82421+#endif
82422+#ifdef CONFIG_GRKERNSEC_SETXID
82423+extern int grsec_enable_setxid;
82424+#endif
82425+#endif
82426+
82427+#endif
82428diff --git a/include/linux/grsock.h b/include/linux/grsock.h
82429new file mode 100644
82430index 0000000..e7ffaaf
82431--- /dev/null
82432+++ b/include/linux/grsock.h
82433@@ -0,0 +1,19 @@
82434+#ifndef __GRSOCK_H
82435+#define __GRSOCK_H
82436+
82437+extern void gr_attach_curr_ip(const struct sock *sk);
82438+extern int gr_handle_sock_all(const int family, const int type,
82439+ const int protocol);
82440+extern int gr_handle_sock_server(const struct sockaddr *sck);
82441+extern int gr_handle_sock_server_other(const struct sock *sck);
82442+extern int gr_handle_sock_client(const struct sockaddr *sck);
82443+extern int gr_search_connect(struct socket * sock,
82444+ struct sockaddr_in * addr);
82445+extern int gr_search_bind(struct socket * sock,
82446+ struct sockaddr_in * addr);
82447+extern int gr_search_listen(struct socket * sock);
82448+extern int gr_search_accept(struct socket * sock);
82449+extern int gr_search_socket(const int domain, const int type,
82450+ const int protocol);
82451+
82452+#endif
82453diff --git a/include/linux/highmem.h b/include/linux/highmem.h
82454index 9286a46..373f27f 100644
82455--- a/include/linux/highmem.h
82456+++ b/include/linux/highmem.h
82457@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
82458 kunmap_atomic(kaddr);
82459 }
82460
82461+static inline void sanitize_highpage(struct page *page)
82462+{
82463+ void *kaddr;
82464+ unsigned long flags;
82465+
82466+ local_irq_save(flags);
82467+ kaddr = kmap_atomic(page);
82468+ clear_page(kaddr);
82469+ kunmap_atomic(kaddr);
82470+ local_irq_restore(flags);
82471+}
82472+
82473 static inline void zero_user_segments(struct page *page,
82474 unsigned start1, unsigned end1,
82475 unsigned start2, unsigned end2)
82476diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
82477index 1c7b89a..7dda400 100644
82478--- a/include/linux/hwmon-sysfs.h
82479+++ b/include/linux/hwmon-sysfs.h
82480@@ -25,7 +25,8 @@
82481 struct sensor_device_attribute{
82482 struct device_attribute dev_attr;
82483 int index;
82484-};
82485+} __do_const;
82486+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
82487 #define to_sensor_dev_attr(_dev_attr) \
82488 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
82489
82490@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
82491 struct device_attribute dev_attr;
82492 u8 index;
82493 u8 nr;
82494-};
82495+} __do_const;
82496+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
82497 #define to_sensor_dev_attr_2(_dev_attr) \
82498 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
82499
82500diff --git a/include/linux/i2c.h b/include/linux/i2c.h
82501index 7c76959..153e597 100644
82502--- a/include/linux/i2c.h
82503+++ b/include/linux/i2c.h
82504@@ -413,6 +413,7 @@ struct i2c_algorithm {
82505 int (*unreg_slave)(struct i2c_client *client);
82506 #endif
82507 };
82508+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
82509
82510 /**
82511 * struct i2c_bus_recovery_info - I2C bus recovery information
82512diff --git a/include/linux/i2o.h b/include/linux/i2o.h
82513index d23c3c2..eb63c81 100644
82514--- a/include/linux/i2o.h
82515+++ b/include/linux/i2o.h
82516@@ -565,7 +565,7 @@ struct i2o_controller {
82517 struct i2o_device *exec; /* Executive */
82518 #if BITS_PER_LONG == 64
82519 spinlock_t context_list_lock; /* lock for context_list */
82520- atomic_t context_list_counter; /* needed for unique contexts */
82521+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
82522 struct list_head context_list; /* list of context id's
82523 and pointers */
82524 #endif
82525diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
82526index aff7ad8..3942bbd 100644
82527--- a/include/linux/if_pppox.h
82528+++ b/include/linux/if_pppox.h
82529@@ -76,7 +76,7 @@ struct pppox_proto {
82530 int (*ioctl)(struct socket *sock, unsigned int cmd,
82531 unsigned long arg);
82532 struct module *owner;
82533-};
82534+} __do_const;
82535
82536 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
82537 extern void unregister_pppox_proto(int proto_num);
82538diff --git a/include/linux/init.h b/include/linux/init.h
82539index 2df8e8d..3e1280d 100644
82540--- a/include/linux/init.h
82541+++ b/include/linux/init.h
82542@@ -37,9 +37,17 @@
82543 * section.
82544 */
82545
82546+#define add_init_latent_entropy __latent_entropy
82547+
82548+#ifdef CONFIG_MEMORY_HOTPLUG
82549+#define add_meminit_latent_entropy
82550+#else
82551+#define add_meminit_latent_entropy __latent_entropy
82552+#endif
82553+
82554 /* These are for everybody (although not all archs will actually
82555 discard it in modules) */
82556-#define __init __section(.init.text) __cold notrace
82557+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
82558 #define __initdata __section(.init.data)
82559 #define __initconst __constsection(.init.rodata)
82560 #define __exitdata __section(.exit.data)
82561@@ -100,7 +108,7 @@
82562 #define __cpuexitconst
82563
82564 /* Used for MEMORY_HOTPLUG */
82565-#define __meminit __section(.meminit.text) __cold notrace
82566+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
82567 #define __meminitdata __section(.meminit.data)
82568 #define __meminitconst __constsection(.meminit.rodata)
82569 #define __memexit __section(.memexit.text) __exitused __cold notrace
82570diff --git a/include/linux/init_task.h b/include/linux/init_task.h
82571index 3037fc0..c6527ce 100644
82572--- a/include/linux/init_task.h
82573+++ b/include/linux/init_task.h
82574@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
82575
82576 #define INIT_TASK_COMM "swapper"
82577
82578+#ifdef CONFIG_X86
82579+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
82580+#else
82581+#define INIT_TASK_THREAD_INFO
82582+#endif
82583+
82584 #ifdef CONFIG_RT_MUTEXES
82585 # define INIT_RT_MUTEXES(tsk) \
82586 .pi_waiters = RB_ROOT, \
82587@@ -214,6 +220,7 @@ extern struct task_group root_task_group;
82588 RCU_POINTER_INITIALIZER(cred, &init_cred), \
82589 .comm = INIT_TASK_COMM, \
82590 .thread = INIT_THREAD, \
82591+ INIT_TASK_THREAD_INFO \
82592 .fs = &init_fs, \
82593 .files = &init_files, \
82594 .signal = &init_signals, \
82595diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
82596index d9b05b5..e5f5b7b 100644
82597--- a/include/linux/interrupt.h
82598+++ b/include/linux/interrupt.h
82599@@ -413,8 +413,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
82600
82601 struct softirq_action
82602 {
82603- void (*action)(struct softirq_action *);
82604-};
82605+ void (*action)(void);
82606+} __no_const;
82607
82608 asmlinkage void do_softirq(void);
82609 asmlinkage void __do_softirq(void);
82610@@ -428,7 +428,7 @@ static inline void do_softirq_own_stack(void)
82611 }
82612 #endif
82613
82614-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
82615+extern void open_softirq(int nr, void (*action)(void));
82616 extern void softirq_init(void);
82617 extern void __raise_softirq_irqoff(unsigned int nr);
82618
82619diff --git a/include/linux/iommu.h b/include/linux/iommu.h
82620index 38daa45..4de4317 100644
82621--- a/include/linux/iommu.h
82622+++ b/include/linux/iommu.h
82623@@ -147,7 +147,7 @@ struct iommu_ops {
82624
82625 unsigned long pgsize_bitmap;
82626 void *priv;
82627-};
82628+} __do_const;
82629
82630 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
82631 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
82632diff --git a/include/linux/ioport.h b/include/linux/ioport.h
82633index 2c525022..345b106 100644
82634--- a/include/linux/ioport.h
82635+++ b/include/linux/ioport.h
82636@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
82637 int adjust_resource(struct resource *res, resource_size_t start,
82638 resource_size_t size);
82639 resource_size_t resource_alignment(struct resource *res);
82640-static inline resource_size_t resource_size(const struct resource *res)
82641+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
82642 {
82643 return res->end - res->start + 1;
82644 }
82645diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
82646index 1eee6bc..9cf4912 100644
82647--- a/include/linux/ipc_namespace.h
82648+++ b/include/linux/ipc_namespace.h
82649@@ -60,7 +60,7 @@ struct ipc_namespace {
82650 struct user_namespace *user_ns;
82651
82652 struct ns_common ns;
82653-};
82654+} __randomize_layout;
82655
82656 extern struct ipc_namespace init_ipc_ns;
82657 extern atomic_t nr_ipc_ns;
82658diff --git a/include/linux/irq.h b/include/linux/irq.h
82659index d09ec7a..f373eb5 100644
82660--- a/include/linux/irq.h
82661+++ b/include/linux/irq.h
82662@@ -364,7 +364,8 @@ struct irq_chip {
82663 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
82664
82665 unsigned long flags;
82666-};
82667+} __do_const;
82668+typedef struct irq_chip __no_const irq_chip_no_const;
82669
82670 /*
82671 * irq_chip specific flags
82672diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
82673index 71d706d..817cdec 100644
82674--- a/include/linux/irqchip/arm-gic.h
82675+++ b/include/linux/irqchip/arm-gic.h
82676@@ -95,7 +95,7 @@
82677
82678 struct device_node;
82679
82680-extern struct irq_chip gic_arch_extn;
82681+extern irq_chip_no_const gic_arch_extn;
82682
82683 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
82684 u32 offset, struct device_node *);
82685diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
82686index faf433a..7dcb186 100644
82687--- a/include/linux/irqdesc.h
82688+++ b/include/linux/irqdesc.h
82689@@ -61,7 +61,7 @@ struct irq_desc {
82690 unsigned int irq_count; /* For detecting broken IRQs */
82691 unsigned long last_unhandled; /* Aging timer for unhandled count */
82692 unsigned int irqs_unhandled;
82693- atomic_t threads_handled;
82694+ atomic_unchecked_t threads_handled;
82695 int threads_handled_last;
82696 raw_spinlock_t lock;
82697 struct cpumask *percpu_enabled;
82698diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
82699index c367cbd..c9b79e6 100644
82700--- a/include/linux/jiffies.h
82701+++ b/include/linux/jiffies.h
82702@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
82703 /*
82704 * Convert various time units to each other:
82705 */
82706-extern unsigned int jiffies_to_msecs(const unsigned long j);
82707-extern unsigned int jiffies_to_usecs(const unsigned long j);
82708+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
82709+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
82710
82711-static inline u64 jiffies_to_nsecs(const unsigned long j)
82712+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
82713 {
82714 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
82715 }
82716
82717-extern unsigned long msecs_to_jiffies(const unsigned int m);
82718-extern unsigned long usecs_to_jiffies(const unsigned int u);
82719+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
82720+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
82721 extern unsigned long timespec_to_jiffies(const struct timespec *value);
82722 extern void jiffies_to_timespec(const unsigned long jiffies,
82723- struct timespec *value);
82724-extern unsigned long timeval_to_jiffies(const struct timeval *value);
82725+ struct timespec *value) __intentional_overflow(-1);
82726+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
82727 extern void jiffies_to_timeval(const unsigned long jiffies,
82728 struct timeval *value);
82729
82730diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
82731index 6883e19..e854fcb 100644
82732--- a/include/linux/kallsyms.h
82733+++ b/include/linux/kallsyms.h
82734@@ -15,7 +15,8 @@
82735
82736 struct module;
82737
82738-#ifdef CONFIG_KALLSYMS
82739+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
82740+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
82741 /* Lookup the address for a symbol. Returns 0 if not found. */
82742 unsigned long kallsyms_lookup_name(const char *name);
82743
82744@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
82745 /* Stupid that this does nothing, but I didn't create this mess. */
82746 #define __print_symbol(fmt, addr)
82747 #endif /*CONFIG_KALLSYMS*/
82748+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
82749+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
82750+extern unsigned long kallsyms_lookup_name(const char *name);
82751+extern void __print_symbol(const char *fmt, unsigned long address);
82752+extern int sprint_backtrace(char *buffer, unsigned long address);
82753+extern int sprint_symbol(char *buffer, unsigned long address);
82754+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
82755+const char *kallsyms_lookup(unsigned long addr,
82756+ unsigned long *symbolsize,
82757+ unsigned long *offset,
82758+ char **modname, char *namebuf);
82759+extern int kallsyms_lookup_size_offset(unsigned long addr,
82760+ unsigned long *symbolsize,
82761+ unsigned long *offset);
82762+#endif
82763
82764 /* This macro allows us to keep printk typechecking */
82765 static __printf(1, 2)
82766diff --git a/include/linux/kernel.h b/include/linux/kernel.h
82767index 64ce58b..6bcdbfa 100644
82768--- a/include/linux/kernel.h
82769+++ b/include/linux/kernel.h
82770@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
82771 /* Obsolete, do not use. Use kstrto<foo> instead */
82772
82773 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
82774-extern long simple_strtol(const char *,char **,unsigned int);
82775+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
82776 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
82777 extern long long simple_strtoll(const char *,char **,unsigned int);
82778
82779diff --git a/include/linux/key-type.h b/include/linux/key-type.h
82780index ff9f1d3..6712be5 100644
82781--- a/include/linux/key-type.h
82782+++ b/include/linux/key-type.h
82783@@ -152,7 +152,7 @@ struct key_type {
82784 /* internal fields */
82785 struct list_head link; /* link in types list */
82786 struct lock_class_key lock_class; /* key->sem lock class */
82787-};
82788+} __do_const;
82789
82790 extern struct key_type key_type_keyring;
82791
82792diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
82793index e465bb1..19f605fd 100644
82794--- a/include/linux/kgdb.h
82795+++ b/include/linux/kgdb.h
82796@@ -52,7 +52,7 @@ extern int kgdb_connected;
82797 extern int kgdb_io_module_registered;
82798
82799 extern atomic_t kgdb_setting_breakpoint;
82800-extern atomic_t kgdb_cpu_doing_single_step;
82801+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
82802
82803 extern struct task_struct *kgdb_usethread;
82804 extern struct task_struct *kgdb_contthread;
82805@@ -254,7 +254,7 @@ struct kgdb_arch {
82806 void (*correct_hw_break)(void);
82807
82808 void (*enable_nmi)(bool on);
82809-};
82810+} __do_const;
82811
82812 /**
82813 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
82814@@ -279,7 +279,7 @@ struct kgdb_io {
82815 void (*pre_exception) (void);
82816 void (*post_exception) (void);
82817 int is_console;
82818-};
82819+} __do_const;
82820
82821 extern struct kgdb_arch arch_kgdb_ops;
82822
82823diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
82824index e705467..a92471d 100644
82825--- a/include/linux/kmemleak.h
82826+++ b/include/linux/kmemleak.h
82827@@ -27,7 +27,7 @@
82828
82829 extern void kmemleak_init(void) __ref;
82830 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82831- gfp_t gfp) __ref;
82832+ gfp_t gfp) __ref __size_overflow(2);
82833 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
82834 extern void kmemleak_free(const void *ptr) __ref;
82835 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
82836@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
82837 static inline void kmemleak_init(void)
82838 {
82839 }
82840-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82841+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
82842 gfp_t gfp)
82843 {
82844 }
82845diff --git a/include/linux/kmod.h b/include/linux/kmod.h
82846index 0555cc6..40116ce 100644
82847--- a/include/linux/kmod.h
82848+++ b/include/linux/kmod.h
82849@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
82850 * usually useless though. */
82851 extern __printf(2, 3)
82852 int __request_module(bool wait, const char *name, ...);
82853+extern __printf(3, 4)
82854+int ___request_module(bool wait, char *param_name, const char *name, ...);
82855 #define request_module(mod...) __request_module(true, mod)
82856 #define request_module_nowait(mod...) __request_module(false, mod)
82857 #define try_then_request_module(x, mod...) \
82858@@ -57,6 +59,9 @@ struct subprocess_info {
82859 struct work_struct work;
82860 struct completion *complete;
82861 char *path;
82862+#ifdef CONFIG_GRKERNSEC
82863+ char *origpath;
82864+#endif
82865 char **argv;
82866 char **envp;
82867 int wait;
82868diff --git a/include/linux/kobject.h b/include/linux/kobject.h
82869index 2d61b90..a1d0a13 100644
82870--- a/include/linux/kobject.h
82871+++ b/include/linux/kobject.h
82872@@ -118,7 +118,7 @@ struct kobj_type {
82873 struct attribute **default_attrs;
82874 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
82875 const void *(*namespace)(struct kobject *kobj);
82876-};
82877+} __do_const;
82878
82879 struct kobj_uevent_env {
82880 char *argv[3];
82881@@ -142,6 +142,7 @@ struct kobj_attribute {
82882 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
82883 const char *buf, size_t count);
82884 };
82885+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
82886
82887 extern const struct sysfs_ops kobj_sysfs_ops;
82888
82889@@ -169,7 +170,7 @@ struct kset {
82890 spinlock_t list_lock;
82891 struct kobject kobj;
82892 const struct kset_uevent_ops *uevent_ops;
82893-};
82894+} __randomize_layout;
82895
82896 extern void kset_init(struct kset *kset);
82897 extern int __must_check kset_register(struct kset *kset);
82898diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
82899index df32d25..fb52e27 100644
82900--- a/include/linux/kobject_ns.h
82901+++ b/include/linux/kobject_ns.h
82902@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
82903 const void *(*netlink_ns)(struct sock *sk);
82904 const void *(*initial_ns)(void);
82905 void (*drop_ns)(void *);
82906-};
82907+} __do_const;
82908
82909 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
82910 int kobj_ns_type_registered(enum kobj_ns_type type);
82911diff --git a/include/linux/kref.h b/include/linux/kref.h
82912index 484604d..0f6c5b6 100644
82913--- a/include/linux/kref.h
82914+++ b/include/linux/kref.h
82915@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
82916 static inline int kref_sub(struct kref *kref, unsigned int count,
82917 void (*release)(struct kref *kref))
82918 {
82919- WARN_ON(release == NULL);
82920+ BUG_ON(release == NULL);
82921
82922 if (atomic_sub_and_test((int) count, &kref->refcount)) {
82923 release(kref);
82924diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
82925index 26f1060..bafc04a 100644
82926--- a/include/linux/kvm_host.h
82927+++ b/include/linux/kvm_host.h
82928@@ -470,7 +470,7 @@ static inline void kvm_irqfd_exit(void)
82929 {
82930 }
82931 #endif
82932-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
82933+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
82934 struct module *module);
82935 void kvm_exit(void);
82936
82937@@ -639,7 +639,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
82938 struct kvm_guest_debug *dbg);
82939 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
82940
82941-int kvm_arch_init(void *opaque);
82942+int kvm_arch_init(const void *opaque);
82943 void kvm_arch_exit(void);
82944
82945 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
82946diff --git a/include/linux/libata.h b/include/linux/libata.h
82947index 91f705d..24be831 100644
82948--- a/include/linux/libata.h
82949+++ b/include/linux/libata.h
82950@@ -979,7 +979,7 @@ struct ata_port_operations {
82951 * fields must be pointers.
82952 */
82953 const struct ata_port_operations *inherits;
82954-};
82955+} __do_const;
82956
82957 struct ata_port_info {
82958 unsigned long flags;
82959diff --git a/include/linux/linkage.h b/include/linux/linkage.h
82960index a6a42dd..6c5ebce 100644
82961--- a/include/linux/linkage.h
82962+++ b/include/linux/linkage.h
82963@@ -36,6 +36,7 @@
82964 #endif
82965
82966 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
82967+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
82968 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
82969
82970 /*
82971diff --git a/include/linux/list.h b/include/linux/list.h
82972index feb773c..98f3075 100644
82973--- a/include/linux/list.h
82974+++ b/include/linux/list.h
82975@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
82976 extern void list_del(struct list_head *entry);
82977 #endif
82978
82979+extern void __pax_list_add(struct list_head *new,
82980+ struct list_head *prev,
82981+ struct list_head *next);
82982+static inline void pax_list_add(struct list_head *new, struct list_head *head)
82983+{
82984+ __pax_list_add(new, head, head->next);
82985+}
82986+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
82987+{
82988+ __pax_list_add(new, head->prev, head);
82989+}
82990+extern void pax_list_del(struct list_head *entry);
82991+
82992 /**
82993 * list_replace - replace old entry by new one
82994 * @old : the element to be replaced
82995@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
82996 INIT_LIST_HEAD(entry);
82997 }
82998
82999+extern void pax_list_del_init(struct list_head *entry);
83000+
83001 /**
83002 * list_move - delete from one list and add as another's head
83003 * @list: the entry to move
83004diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83005index 4bfde0e..d6e2e09 100644
83006--- a/include/linux/lockref.h
83007+++ b/include/linux/lockref.h
83008@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83009 return ((int)l->count < 0);
83010 }
83011
83012+static inline unsigned int __lockref_read(struct lockref *lockref)
83013+{
83014+ return lockref->count;
83015+}
83016+
83017+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83018+{
83019+ lockref->count = count;
83020+}
83021+
83022+static inline void __lockref_inc(struct lockref *lockref)
83023+{
83024+
83025+#ifdef CONFIG_PAX_REFCOUNT
83026+ atomic_inc((atomic_t *)&lockref->count);
83027+#else
83028+ lockref->count++;
83029+#endif
83030+
83031+}
83032+
83033+static inline void __lockref_dec(struct lockref *lockref)
83034+{
83035+
83036+#ifdef CONFIG_PAX_REFCOUNT
83037+ atomic_dec((atomic_t *)&lockref->count);
83038+#else
83039+ lockref->count--;
83040+#endif
83041+
83042+}
83043+
83044 #endif /* __LINUX_LOCKREF_H */
83045diff --git a/include/linux/math64.h b/include/linux/math64.h
83046index c45c089..298841c 100644
83047--- a/include/linux/math64.h
83048+++ b/include/linux/math64.h
83049@@ -15,7 +15,7 @@
83050 * This is commonly provided by 32bit archs to provide an optimized 64bit
83051 * divide.
83052 */
83053-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83054+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83055 {
83056 *remainder = dividend % divisor;
83057 return dividend / divisor;
83058@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83059 /**
83060 * div64_u64 - unsigned 64bit divide with 64bit divisor
83061 */
83062-static inline u64 div64_u64(u64 dividend, u64 divisor)
83063+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83064 {
83065 return dividend / divisor;
83066 }
83067@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83068 #define div64_ul(x, y) div_u64((x), (y))
83069
83070 #ifndef div_u64_rem
83071-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83072+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83073 {
83074 *remainder = do_div(dividend, divisor);
83075 return dividend;
83076@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83077 #endif
83078
83079 #ifndef div64_u64
83080-extern u64 div64_u64(u64 dividend, u64 divisor);
83081+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83082 #endif
83083
83084 #ifndef div64_s64
83085@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83086 * divide.
83087 */
83088 #ifndef div_u64
83089-static inline u64 div_u64(u64 dividend, u32 divisor)
83090+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83091 {
83092 u32 remainder;
83093 return div_u64_rem(dividend, divisor, &remainder);
83094diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83095index 3d385c8..deacb6a 100644
83096--- a/include/linux/mempolicy.h
83097+++ b/include/linux/mempolicy.h
83098@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83099 }
83100
83101 #define vma_policy(vma) ((vma)->vm_policy)
83102+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83103+{
83104+ vma->vm_policy = pol;
83105+}
83106
83107 static inline void mpol_get(struct mempolicy *pol)
83108 {
83109@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83110 }
83111
83112 #define vma_policy(vma) NULL
83113+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83114+{
83115+}
83116
83117 static inline int
83118 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83119diff --git a/include/linux/mm.h b/include/linux/mm.h
83120index dd5ea30..cf81cd1 100644
83121--- a/include/linux/mm.h
83122+++ b/include/linux/mm.h
83123@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
83124
83125 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
83126 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
83127+
83128+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83129+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
83130+#endif
83131+
83132 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
83133 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
83134 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83135@@ -256,8 +261,8 @@ struct vm_operations_struct {
83136 /* called by access_process_vm when get_user_pages() fails, typically
83137 * for use by special VMAs that can switch between memory and hardware
83138 */
83139- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83140- void *buf, int len, int write);
83141+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83142+ void *buf, size_t len, int write);
83143
83144 /* Called by the /proc/PID/maps code to ask the vma whether it
83145 * has a special name. Returning non-NULL will also cause this
83146@@ -291,6 +296,7 @@ struct vm_operations_struct {
83147 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
83148 unsigned long size, pgoff_t pgoff);
83149 };
83150+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83151
83152 struct mmu_gather;
83153 struct inode;
83154@@ -1183,8 +1189,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83155 unsigned long *pfn);
83156 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83157 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83158-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83159- void *buf, int len, int write);
83160+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83161+ void *buf, size_t len, int write);
83162
83163 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83164 loff_t const holebegin, loff_t const holelen)
83165@@ -1224,9 +1230,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83166 }
83167 #endif
83168
83169-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83170-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83171- void *buf, int len, int write);
83172+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83173+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83174+ void *buf, size_t len, int write);
83175
83176 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83177 unsigned long start, unsigned long nr_pages,
83178@@ -1258,34 +1264,6 @@ int set_page_dirty_lock(struct page *page);
83179 int clear_page_dirty_for_io(struct page *page);
83180 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83181
83182-/* Is the vma a continuation of the stack vma above it? */
83183-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83184-{
83185- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83186-}
83187-
83188-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83189- unsigned long addr)
83190-{
83191- return (vma->vm_flags & VM_GROWSDOWN) &&
83192- (vma->vm_start == addr) &&
83193- !vma_growsdown(vma->vm_prev, addr);
83194-}
83195-
83196-/* Is the vma a continuation of the stack vma below it? */
83197-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83198-{
83199- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83200-}
83201-
83202-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83203- unsigned long addr)
83204-{
83205- return (vma->vm_flags & VM_GROWSUP) &&
83206- (vma->vm_end == addr) &&
83207- !vma_growsup(vma->vm_next, addr);
83208-}
83209-
83210 extern struct task_struct *task_of_stack(struct task_struct *task,
83211 struct vm_area_struct *vma, bool in_group);
83212
83213@@ -1403,8 +1381,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83214 {
83215 return 0;
83216 }
83217+
83218+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83219+ unsigned long address)
83220+{
83221+ return 0;
83222+}
83223 #else
83224 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83225+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83226 #endif
83227
83228 #ifdef __PAGETABLE_PMD_FOLDED
83229@@ -1413,8 +1398,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83230 {
83231 return 0;
83232 }
83233+
83234+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83235+ unsigned long address)
83236+{
83237+ return 0;
83238+}
83239 #else
83240 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83241+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83242 #endif
83243
83244 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
83245@@ -1432,11 +1424,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83246 NULL: pud_offset(pgd, address);
83247 }
83248
83249+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83250+{
83251+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83252+ NULL: pud_offset(pgd, address);
83253+}
83254+
83255 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83256 {
83257 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83258 NULL: pmd_offset(pud, address);
83259 }
83260+
83261+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83262+{
83263+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83264+ NULL: pmd_offset(pud, address);
83265+}
83266 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83267
83268 #if USE_SPLIT_PTE_PTLOCKS
83269@@ -1819,12 +1823,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
83270 bool *need_rmap_locks);
83271 extern void exit_mmap(struct mm_struct *);
83272
83273+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83274+extern void gr_learn_resource(const struct task_struct *task, const int res,
83275+ const unsigned long wanted, const int gt);
83276+#else
83277+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83278+ const unsigned long wanted, const int gt)
83279+{
83280+}
83281+#endif
83282+
83283 static inline int check_data_rlimit(unsigned long rlim,
83284 unsigned long new,
83285 unsigned long start,
83286 unsigned long end_data,
83287 unsigned long start_data)
83288 {
83289+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
83290 if (rlim < RLIM_INFINITY) {
83291 if (((new - start) + (end_data - start_data)) > rlim)
83292 return -ENOSPC;
83293@@ -1849,7 +1864,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83294 unsigned long addr, unsigned long len,
83295 unsigned long flags, struct page **pages);
83296
83297-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83298+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83299
83300 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83301 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83302@@ -1857,6 +1872,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83303 unsigned long len, unsigned long prot, unsigned long flags,
83304 unsigned long pgoff, unsigned long *populate);
83305 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83306+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83307
83308 #ifdef CONFIG_MMU
83309 extern int __mm_populate(unsigned long addr, unsigned long len,
83310@@ -1885,10 +1901,11 @@ struct vm_unmapped_area_info {
83311 unsigned long high_limit;
83312 unsigned long align_mask;
83313 unsigned long align_offset;
83314+ unsigned long threadstack_offset;
83315 };
83316
83317-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83318-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83319+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83320+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83321
83322 /*
83323 * Search for an unmapped address range.
83324@@ -1900,7 +1917,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83325 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
83326 */
83327 static inline unsigned long
83328-vm_unmapped_area(struct vm_unmapped_area_info *info)
83329+vm_unmapped_area(const struct vm_unmapped_area_info *info)
83330 {
83331 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
83332 return unmapped_area(info);
83333@@ -1962,6 +1979,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
83334 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
83335 struct vm_area_struct **pprev);
83336
83337+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
83338+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
83339+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
83340+
83341 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
83342 NULL if none. Assume start_addr < end_addr. */
83343 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
83344@@ -1991,10 +2012,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
83345 }
83346
83347 #ifdef CONFIG_MMU
83348-pgprot_t vm_get_page_prot(unsigned long vm_flags);
83349+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
83350 void vma_set_page_prot(struct vm_area_struct *vma);
83351 #else
83352-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
83353+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
83354 {
83355 return __pgprot(0);
83356 }
83357@@ -2056,6 +2077,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
83358 static inline void vm_stat_account(struct mm_struct *mm,
83359 unsigned long flags, struct file *file, long pages)
83360 {
83361+
83362+#ifdef CONFIG_PAX_RANDMMAP
83363+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
83364+#endif
83365+
83366 mm->total_vm += pages;
83367 }
83368 #endif /* CONFIG_PROC_FS */
83369@@ -2159,7 +2185,7 @@ extern int unpoison_memory(unsigned long pfn);
83370 extern int sysctl_memory_failure_early_kill;
83371 extern int sysctl_memory_failure_recovery;
83372 extern void shake_page(struct page *p, int access);
83373-extern atomic_long_t num_poisoned_pages;
83374+extern atomic_long_unchecked_t num_poisoned_pages;
83375 extern int soft_offline_page(struct page *page, int flags);
83376
83377 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
83378@@ -2210,5 +2236,11 @@ void __init setup_nr_node_ids(void);
83379 static inline void setup_nr_node_ids(void) {}
83380 #endif
83381
83382+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83383+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
83384+#else
83385+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
83386+#endif
83387+
83388 #endif /* __KERNEL__ */
83389 #endif /* _LINUX_MM_H */
83390diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
83391index 6d34aa2..d73d848 100644
83392--- a/include/linux/mm_types.h
83393+++ b/include/linux/mm_types.h
83394@@ -309,7 +309,9 @@ struct vm_area_struct {
83395 #ifdef CONFIG_NUMA
83396 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
83397 #endif
83398-};
83399+
83400+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
83401+} __randomize_layout;
83402
83403 struct core_thread {
83404 struct task_struct *task;
83405@@ -459,7 +461,25 @@ struct mm_struct {
83406 /* address of the bounds directory */
83407 void __user *bd_addr;
83408 #endif
83409-};
83410+
83411+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
83412+ unsigned long pax_flags;
83413+#endif
83414+
83415+#ifdef CONFIG_PAX_DLRESOLVE
83416+ unsigned long call_dl_resolve;
83417+#endif
83418+
83419+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
83420+ unsigned long call_syscall;
83421+#endif
83422+
83423+#ifdef CONFIG_PAX_ASLR
83424+ unsigned long delta_mmap; /* randomized offset */
83425+ unsigned long delta_stack; /* randomized offset */
83426+#endif
83427+
83428+} __randomize_layout;
83429
83430 static inline void mm_init_cpumask(struct mm_struct *mm)
83431 {
83432diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
83433index c5d5278..f0b68c8 100644
83434--- a/include/linux/mmiotrace.h
83435+++ b/include/linux/mmiotrace.h
83436@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
83437 /* Called from ioremap.c */
83438 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
83439 void __iomem *addr);
83440-extern void mmiotrace_iounmap(volatile void __iomem *addr);
83441+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
83442
83443 /* For anyone to insert markers. Remember trailing newline. */
83444 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
83445@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
83446 {
83447 }
83448
83449-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
83450+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
83451 {
83452 }
83453
83454diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
83455index 2f0856d..5a4bc1e 100644
83456--- a/include/linux/mmzone.h
83457+++ b/include/linux/mmzone.h
83458@@ -527,7 +527,7 @@ struct zone {
83459
83460 ZONE_PADDING(_pad3_)
83461 /* Zone statistics */
83462- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83463+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83464 } ____cacheline_internodealigned_in_smp;
83465
83466 enum zone_flags {
83467diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
83468index 745def8..08a820b 100644
83469--- a/include/linux/mod_devicetable.h
83470+++ b/include/linux/mod_devicetable.h
83471@@ -139,7 +139,7 @@ struct usb_device_id {
83472 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
83473 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
83474
83475-#define HID_ANY_ID (~0)
83476+#define HID_ANY_ID (~0U)
83477 #define HID_BUS_ANY 0xffff
83478 #define HID_GROUP_ANY 0x0000
83479
83480@@ -475,7 +475,7 @@ struct dmi_system_id {
83481 const char *ident;
83482 struct dmi_strmatch matches[4];
83483 void *driver_data;
83484-};
83485+} __do_const;
83486 /*
83487 * struct dmi_device_id appears during expansion of
83488 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
83489diff --git a/include/linux/module.h b/include/linux/module.h
83490index b653d7c..22a238f 100644
83491--- a/include/linux/module.h
83492+++ b/include/linux/module.h
83493@@ -17,9 +17,11 @@
83494 #include <linux/moduleparam.h>
83495 #include <linux/jump_label.h>
83496 #include <linux/export.h>
83497+#include <linux/fs.h>
83498
83499 #include <linux/percpu.h>
83500 #include <asm/module.h>
83501+#include <asm/pgtable.h>
83502
83503 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
83504 #define MODULE_SIG_STRING "~Module signature appended~\n"
83505@@ -42,7 +44,7 @@ struct module_kobject {
83506 struct kobject *drivers_dir;
83507 struct module_param_attrs *mp;
83508 struct completion *kobj_completion;
83509-};
83510+} __randomize_layout;
83511
83512 struct module_attribute {
83513 struct attribute attr;
83514@@ -54,12 +56,13 @@ struct module_attribute {
83515 int (*test)(struct module *);
83516 void (*free)(struct module *);
83517 };
83518+typedef struct module_attribute __no_const module_attribute_no_const;
83519
83520 struct module_version_attribute {
83521 struct module_attribute mattr;
83522 const char *module_name;
83523 const char *version;
83524-} __attribute__ ((__aligned__(sizeof(void *))));
83525+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
83526
83527 extern ssize_t __modver_version_show(struct module_attribute *,
83528 struct module_kobject *, char *);
83529@@ -221,7 +224,7 @@ struct module {
83530
83531 /* Sysfs stuff. */
83532 struct module_kobject mkobj;
83533- struct module_attribute *modinfo_attrs;
83534+ module_attribute_no_const *modinfo_attrs;
83535 const char *version;
83536 const char *srcversion;
83537 struct kobject *holders_dir;
83538@@ -270,19 +273,16 @@ struct module {
83539 int (*init)(void);
83540
83541 /* If this is non-NULL, vfree after init() returns */
83542- void *module_init;
83543+ void *module_init_rx, *module_init_rw;
83544
83545 /* Here is the actual code + data, vfree'd on unload. */
83546- void *module_core;
83547+ void *module_core_rx, *module_core_rw;
83548
83549 /* Here are the sizes of the init and core sections */
83550- unsigned int init_size, core_size;
83551+ unsigned int init_size_rw, core_size_rw;
83552
83553 /* The size of the executable code in each section. */
83554- unsigned int init_text_size, core_text_size;
83555-
83556- /* Size of RO sections of the module (text+rodata) */
83557- unsigned int init_ro_size, core_ro_size;
83558+ unsigned int init_size_rx, core_size_rx;
83559
83560 /* Arch-specific module values */
83561 struct mod_arch_specific arch;
83562@@ -338,6 +338,10 @@ struct module {
83563 #ifdef CONFIG_EVENT_TRACING
83564 struct ftrace_event_call **trace_events;
83565 unsigned int num_trace_events;
83566+ struct file_operations trace_id;
83567+ struct file_operations trace_enable;
83568+ struct file_operations trace_format;
83569+ struct file_operations trace_filter;
83570 #endif
83571 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
83572 unsigned int num_ftrace_callsites;
83573@@ -361,7 +365,7 @@ struct module {
83574 ctor_fn_t *ctors;
83575 unsigned int num_ctors;
83576 #endif
83577-};
83578+} __randomize_layout;
83579 #ifndef MODULE_ARCH_INIT
83580 #define MODULE_ARCH_INIT {}
83581 #endif
83582@@ -382,18 +386,48 @@ bool is_module_address(unsigned long addr);
83583 bool is_module_percpu_address(unsigned long addr);
83584 bool is_module_text_address(unsigned long addr);
83585
83586+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
83587+{
83588+
83589+#ifdef CONFIG_PAX_KERNEXEC
83590+ if (ktla_ktva(addr) >= (unsigned long)start &&
83591+ ktla_ktva(addr) < (unsigned long)start + size)
83592+ return 1;
83593+#endif
83594+
83595+ return ((void *)addr >= start && (void *)addr < start + size);
83596+}
83597+
83598+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
83599+{
83600+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
83601+}
83602+
83603+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
83604+{
83605+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
83606+}
83607+
83608+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
83609+{
83610+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
83611+}
83612+
83613+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
83614+{
83615+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
83616+}
83617+
83618 static inline bool within_module_core(unsigned long addr,
83619 const struct module *mod)
83620 {
83621- return (unsigned long)mod->module_core <= addr &&
83622- addr < (unsigned long)mod->module_core + mod->core_size;
83623+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
83624 }
83625
83626 static inline bool within_module_init(unsigned long addr,
83627 const struct module *mod)
83628 {
83629- return (unsigned long)mod->module_init <= addr &&
83630- addr < (unsigned long)mod->module_init + mod->init_size;
83631+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
83632 }
83633
83634 static inline bool within_module(unsigned long addr, const struct module *mod)
83635diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
83636index f755626..641f822 100644
83637--- a/include/linux/moduleloader.h
83638+++ b/include/linux/moduleloader.h
83639@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
83640 sections. Returns NULL on failure. */
83641 void *module_alloc(unsigned long size);
83642
83643+#ifdef CONFIG_PAX_KERNEXEC
83644+void *module_alloc_exec(unsigned long size);
83645+#else
83646+#define module_alloc_exec(x) module_alloc(x)
83647+#endif
83648+
83649 /* Free memory returned from module_alloc. */
83650 void module_memfree(void *module_region);
83651
83652+#ifdef CONFIG_PAX_KERNEXEC
83653+void module_memfree_exec(void *module_region);
83654+#else
83655+#define module_memfree_exec(x) module_memfree((x))
83656+#endif
83657+
83658 /*
83659 * Apply the given relocation to the (simplified) ELF. Return -error
83660 * or 0.
83661@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
83662 unsigned int relsec,
83663 struct module *me)
83664 {
83665+#ifdef CONFIG_MODULES
83666 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83667 module_name(me));
83668+#endif
83669 return -ENOEXEC;
83670 }
83671 #endif
83672@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
83673 unsigned int relsec,
83674 struct module *me)
83675 {
83676+#ifdef CONFIG_MODULES
83677 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83678 module_name(me));
83679+#endif
83680 return -ENOEXEC;
83681 }
83682 #endif
83683diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
83684index 1c9effa..1160bdd 100644
83685--- a/include/linux/moduleparam.h
83686+++ b/include/linux/moduleparam.h
83687@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
83688 * @len is usually just sizeof(string).
83689 */
83690 #define module_param_string(name, string, len, perm) \
83691- static const struct kparam_string __param_string_##name \
83692+ static const struct kparam_string __param_string_##name __used \
83693 = { len, string }; \
83694 __module_param_call(MODULE_PARAM_PREFIX, name, \
83695 &param_ops_string, \
83696@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
83697 */
83698 #define module_param_array_named(name, array, type, nump, perm) \
83699 param_check_##type(name, &(array)[0]); \
83700- static const struct kparam_array __param_arr_##name \
83701+ static const struct kparam_array __param_arr_##name __used \
83702 = { .max = ARRAY_SIZE(array), .num = nump, \
83703 .ops = &param_ops_##type, \
83704 .elemsize = sizeof(array[0]), .elem = array }; \
83705diff --git a/include/linux/mount.h b/include/linux/mount.h
83706index c2c561d..a5f2a8c 100644
83707--- a/include/linux/mount.h
83708+++ b/include/linux/mount.h
83709@@ -66,7 +66,7 @@ struct vfsmount {
83710 struct dentry *mnt_root; /* root of the mounted tree */
83711 struct super_block *mnt_sb; /* pointer to superblock */
83712 int mnt_flags;
83713-};
83714+} __randomize_layout;
83715
83716 struct file; /* forward dec */
83717 struct path;
83718diff --git a/include/linux/namei.h b/include/linux/namei.h
83719index c899077..b9a2010 100644
83720--- a/include/linux/namei.h
83721+++ b/include/linux/namei.h
83722@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
83723 extern void unlock_rename(struct dentry *, struct dentry *);
83724
83725 extern void nd_jump_link(struct nameidata *nd, struct path *path);
83726-extern void nd_set_link(struct nameidata *nd, char *path);
83727-extern char *nd_get_link(struct nameidata *nd);
83728+extern void nd_set_link(struct nameidata *nd, const char *path);
83729+extern const char *nd_get_link(const struct nameidata *nd);
83730
83731 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
83732 {
83733diff --git a/include/linux/net.h b/include/linux/net.h
83734index 17d8339..81656c0 100644
83735--- a/include/linux/net.h
83736+++ b/include/linux/net.h
83737@@ -192,7 +192,7 @@ struct net_proto_family {
83738 int (*create)(struct net *net, struct socket *sock,
83739 int protocol, int kern);
83740 struct module *owner;
83741-};
83742+} __do_const;
83743
83744 struct iovec;
83745 struct kvec;
83746diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
83747index 52fd8e8..19430a1 100644
83748--- a/include/linux/netdevice.h
83749+++ b/include/linux/netdevice.h
83750@@ -1191,6 +1191,7 @@ struct net_device_ops {
83751 u8 state);
83752 #endif
83753 };
83754+typedef struct net_device_ops __no_const net_device_ops_no_const;
83755
83756 /**
83757 * enum net_device_priv_flags - &struct net_device priv_flags
83758@@ -1537,10 +1538,10 @@ struct net_device {
83759
83760 struct net_device_stats stats;
83761
83762- atomic_long_t rx_dropped;
83763- atomic_long_t tx_dropped;
83764+ atomic_long_unchecked_t rx_dropped;
83765+ atomic_long_unchecked_t tx_dropped;
83766
83767- atomic_t carrier_changes;
83768+ atomic_unchecked_t carrier_changes;
83769
83770 #ifdef CONFIG_WIRELESS_EXT
83771 const struct iw_handler_def * wireless_handlers;
83772diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
83773index 2517ece..0bbfcfb 100644
83774--- a/include/linux/netfilter.h
83775+++ b/include/linux/netfilter.h
83776@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
83777 #endif
83778 /* Use the module struct to lock set/get code in place */
83779 struct module *owner;
83780-};
83781+} __do_const;
83782
83783 /* Function to register/unregister hook points. */
83784 int nf_register_hook(struct nf_hook_ops *reg);
83785diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
83786index e955d47..04a5338 100644
83787--- a/include/linux/netfilter/nfnetlink.h
83788+++ b/include/linux/netfilter/nfnetlink.h
83789@@ -19,7 +19,7 @@ struct nfnl_callback {
83790 const struct nlattr * const cda[]);
83791 const struct nla_policy *policy; /* netlink attribute policy */
83792 const u_int16_t attr_count; /* number of nlattr's */
83793-};
83794+} __do_const;
83795
83796 struct nfnetlink_subsystem {
83797 const char *name;
83798diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
83799new file mode 100644
83800index 0000000..33f4af8
83801--- /dev/null
83802+++ b/include/linux/netfilter/xt_gradm.h
83803@@ -0,0 +1,9 @@
83804+#ifndef _LINUX_NETFILTER_XT_GRADM_H
83805+#define _LINUX_NETFILTER_XT_GRADM_H 1
83806+
83807+struct xt_gradm_mtinfo {
83808+ __u16 flags;
83809+ __u16 invflags;
83810+};
83811+
83812+#endif
83813diff --git a/include/linux/nls.h b/include/linux/nls.h
83814index 520681b..2b7fabb 100644
83815--- a/include/linux/nls.h
83816+++ b/include/linux/nls.h
83817@@ -31,7 +31,7 @@ struct nls_table {
83818 const unsigned char *charset2upper;
83819 struct module *owner;
83820 struct nls_table *next;
83821-};
83822+} __do_const;
83823
83824 /* this value hold the maximum octet of charset */
83825 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
83826@@ -46,7 +46,7 @@ enum utf16_endian {
83827 /* nls_base.c */
83828 extern int __register_nls(struct nls_table *, struct module *);
83829 extern int unregister_nls(struct nls_table *);
83830-extern struct nls_table *load_nls(char *);
83831+extern struct nls_table *load_nls(const char *);
83832 extern void unload_nls(struct nls_table *);
83833 extern struct nls_table *load_nls_default(void);
83834 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
83835diff --git a/include/linux/notifier.h b/include/linux/notifier.h
83836index d14a4c3..a078786 100644
83837--- a/include/linux/notifier.h
83838+++ b/include/linux/notifier.h
83839@@ -54,7 +54,8 @@ struct notifier_block {
83840 notifier_fn_t notifier_call;
83841 struct notifier_block __rcu *next;
83842 int priority;
83843-};
83844+} __do_const;
83845+typedef struct notifier_block __no_const notifier_block_no_const;
83846
83847 struct atomic_notifier_head {
83848 spinlock_t lock;
83849diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
83850index b2a0f15..4d7da32 100644
83851--- a/include/linux/oprofile.h
83852+++ b/include/linux/oprofile.h
83853@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
83854 int oprofilefs_create_ro_ulong(struct dentry * root,
83855 char const * name, ulong * val);
83856
83857-/** Create a file for read-only access to an atomic_t. */
83858+/** Create a file for read-only access to an atomic_unchecked_t. */
83859 int oprofilefs_create_ro_atomic(struct dentry * root,
83860- char const * name, atomic_t * val);
83861+ char const * name, atomic_unchecked_t * val);
83862
83863 /** create a directory */
83864 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
83865diff --git a/include/linux/padata.h b/include/linux/padata.h
83866index 4386946..f50c615 100644
83867--- a/include/linux/padata.h
83868+++ b/include/linux/padata.h
83869@@ -129,7 +129,7 @@ struct parallel_data {
83870 struct padata_serial_queue __percpu *squeue;
83871 atomic_t reorder_objects;
83872 atomic_t refcnt;
83873- atomic_t seq_nr;
83874+ atomic_unchecked_t seq_nr;
83875 struct padata_cpumask cpumask;
83876 spinlock_t lock ____cacheline_aligned;
83877 unsigned int processed;
83878diff --git a/include/linux/path.h b/include/linux/path.h
83879index d137218..be0c176 100644
83880--- a/include/linux/path.h
83881+++ b/include/linux/path.h
83882@@ -1,13 +1,15 @@
83883 #ifndef _LINUX_PATH_H
83884 #define _LINUX_PATH_H
83885
83886+#include <linux/compiler.h>
83887+
83888 struct dentry;
83889 struct vfsmount;
83890
83891 struct path {
83892 struct vfsmount *mnt;
83893 struct dentry *dentry;
83894-};
83895+} __randomize_layout;
83896
83897 extern void path_get(const struct path *);
83898 extern void path_put(const struct path *);
83899diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
83900index 8c78950..0d74ed9 100644
83901--- a/include/linux/pci_hotplug.h
83902+++ b/include/linux/pci_hotplug.h
83903@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
83904 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
83905 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
83906 int (*reset_slot) (struct hotplug_slot *slot, int probe);
83907-};
83908+} __do_const;
83909+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
83910
83911 /**
83912 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
83913diff --git a/include/linux/percpu.h b/include/linux/percpu.h
83914index caebf2a..4c3ae9d 100644
83915--- a/include/linux/percpu.h
83916+++ b/include/linux/percpu.h
83917@@ -34,7 +34,7 @@
83918 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
83919 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
83920 */
83921-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
83922+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
83923 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
83924
83925 /*
83926diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
83927index 664de5a..b3e1bf4 100644
83928--- a/include/linux/perf_event.h
83929+++ b/include/linux/perf_event.h
83930@@ -336,8 +336,8 @@ struct perf_event {
83931
83932 enum perf_event_active_state state;
83933 unsigned int attach_state;
83934- local64_t count;
83935- atomic64_t child_count;
83936+ local64_t count; /* PaX: fix it one day */
83937+ atomic64_unchecked_t child_count;
83938
83939 /*
83940 * These are the total time in nanoseconds that the event
83941@@ -388,8 +388,8 @@ struct perf_event {
83942 * These accumulate total time (in nanoseconds) that children
83943 * events have been enabled and running, respectively.
83944 */
83945- atomic64_t child_total_time_enabled;
83946- atomic64_t child_total_time_running;
83947+ atomic64_unchecked_t child_total_time_enabled;
83948+ atomic64_unchecked_t child_total_time_running;
83949
83950 /*
83951 * Protect attach/detach and child_list:
83952@@ -733,7 +733,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
83953 entry->ip[entry->nr++] = ip;
83954 }
83955
83956-extern int sysctl_perf_event_paranoid;
83957+extern int sysctl_perf_event_legitimately_concerned;
83958 extern int sysctl_perf_event_mlock;
83959 extern int sysctl_perf_event_sample_rate;
83960 extern int sysctl_perf_cpu_time_max_percent;
83961@@ -748,19 +748,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
83962 loff_t *ppos);
83963
83964
83965+static inline bool perf_paranoid_any(void)
83966+{
83967+ return sysctl_perf_event_legitimately_concerned > 2;
83968+}
83969+
83970 static inline bool perf_paranoid_tracepoint_raw(void)
83971 {
83972- return sysctl_perf_event_paranoid > -1;
83973+ return sysctl_perf_event_legitimately_concerned > -1;
83974 }
83975
83976 static inline bool perf_paranoid_cpu(void)
83977 {
83978- return sysctl_perf_event_paranoid > 0;
83979+ return sysctl_perf_event_legitimately_concerned > 0;
83980 }
83981
83982 static inline bool perf_paranoid_kernel(void)
83983 {
83984- return sysctl_perf_event_paranoid > 1;
83985+ return sysctl_perf_event_legitimately_concerned > 1;
83986 }
83987
83988 extern void perf_event_init(void);
83989@@ -891,7 +896,7 @@ struct perf_pmu_events_attr {
83990 struct device_attribute attr;
83991 u64 id;
83992 const char *event_str;
83993-};
83994+} __do_const;
83995
83996 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
83997 static struct perf_pmu_events_attr _var = { \
83998diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
83999index b9cf6c5..5462472 100644
84000--- a/include/linux/pid_namespace.h
84001+++ b/include/linux/pid_namespace.h
84002@@ -45,7 +45,7 @@ struct pid_namespace {
84003 int hide_pid;
84004 int reboot; /* group exit code if this pidns was rebooted */
84005 struct ns_common ns;
84006-};
84007+} __randomize_layout;
84008
84009 extern struct pid_namespace init_pid_ns;
84010
84011diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84012index eb8b8ac..62649e1 100644
84013--- a/include/linux/pipe_fs_i.h
84014+++ b/include/linux/pipe_fs_i.h
84015@@ -47,10 +47,10 @@ struct pipe_inode_info {
84016 struct mutex mutex;
84017 wait_queue_head_t wait;
84018 unsigned int nrbufs, curbuf, buffers;
84019- unsigned int readers;
84020- unsigned int writers;
84021- unsigned int files;
84022- unsigned int waiting_writers;
84023+ atomic_t readers;
84024+ atomic_t writers;
84025+ atomic_t files;
84026+ atomic_t waiting_writers;
84027 unsigned int r_counter;
84028 unsigned int w_counter;
84029 struct page *tmp_page;
84030diff --git a/include/linux/pm.h b/include/linux/pm.h
84031index 8b59763..8a05939 100644
84032--- a/include/linux/pm.h
84033+++ b/include/linux/pm.h
84034@@ -608,6 +608,7 @@ struct dev_pm_domain {
84035 struct dev_pm_ops ops;
84036 void (*detach)(struct device *dev, bool power_off);
84037 };
84038+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84039
84040 /*
84041 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84042diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84043index a9edab2..8bada56 100644
84044--- a/include/linux/pm_domain.h
84045+++ b/include/linux/pm_domain.h
84046@@ -39,11 +39,11 @@ struct gpd_dev_ops {
84047 int (*save_state)(struct device *dev);
84048 int (*restore_state)(struct device *dev);
84049 bool (*active_wakeup)(struct device *dev);
84050-};
84051+} __no_const;
84052
84053 struct gpd_cpuidle_data {
84054 unsigned int saved_exit_latency;
84055- struct cpuidle_state *idle_state;
84056+ cpuidle_state_no_const *idle_state;
84057 };
84058
84059 struct generic_pm_domain {
84060diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84061index 30e84d4..22278b4 100644
84062--- a/include/linux/pm_runtime.h
84063+++ b/include/linux/pm_runtime.h
84064@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84065
84066 static inline void pm_runtime_mark_last_busy(struct device *dev)
84067 {
84068- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84069+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84070 }
84071
84072 static inline bool pm_runtime_is_irq_safe(struct device *dev)
84073diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84074index 195aafc..49a7bc2 100644
84075--- a/include/linux/pnp.h
84076+++ b/include/linux/pnp.h
84077@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84078 struct pnp_fixup {
84079 char id[7];
84080 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84081-};
84082+} __do_const;
84083
84084 /* config parameters */
84085 #define PNP_CONFIG_NORMAL 0x0001
84086diff --git a/include/linux/poison.h b/include/linux/poison.h
84087index 2110a81..13a11bb 100644
84088--- a/include/linux/poison.h
84089+++ b/include/linux/poison.h
84090@@ -19,8 +19,8 @@
84091 * under normal circumstances, used to verify that nobody uses
84092 * non-initialized list entries.
84093 */
84094-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84095-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84096+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84097+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84098
84099 /********** include/linux/timer.h **********/
84100 /*
84101diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84102index d8b187c3..9a9257a 100644
84103--- a/include/linux/power/smartreflex.h
84104+++ b/include/linux/power/smartreflex.h
84105@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84106 int (*notify)(struct omap_sr *sr, u32 status);
84107 u8 notify_flags;
84108 u8 class_type;
84109-};
84110+} __do_const;
84111
84112 /**
84113 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84114diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84115index 4ea1d37..80f4b33 100644
84116--- a/include/linux/ppp-comp.h
84117+++ b/include/linux/ppp-comp.h
84118@@ -84,7 +84,7 @@ struct compressor {
84119 struct module *owner;
84120 /* Extra skb space needed by the compressor algorithm */
84121 unsigned int comp_extra;
84122-};
84123+} __do_const;
84124
84125 /*
84126 * The return value from decompress routine is the length of the
84127diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84128index de83b4e..c4b997d 100644
84129--- a/include/linux/preempt.h
84130+++ b/include/linux/preempt.h
84131@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84132 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84133 #endif
84134
84135+#define raw_preempt_count_add(val) __preempt_count_add(val)
84136+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84137+
84138 #define __preempt_count_inc() __preempt_count_add(1)
84139 #define __preempt_count_dec() __preempt_count_sub(1)
84140
84141 #define preempt_count_inc() preempt_count_add(1)
84142+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84143 #define preempt_count_dec() preempt_count_sub(1)
84144+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84145
84146 #ifdef CONFIG_PREEMPT_COUNT
84147
84148@@ -41,6 +46,12 @@ do { \
84149 barrier(); \
84150 } while (0)
84151
84152+#define raw_preempt_disable() \
84153+do { \
84154+ raw_preempt_count_inc(); \
84155+ barrier(); \
84156+} while (0)
84157+
84158 #define sched_preempt_enable_no_resched() \
84159 do { \
84160 barrier(); \
84161@@ -49,6 +60,12 @@ do { \
84162
84163 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84164
84165+#define raw_preempt_enable_no_resched() \
84166+do { \
84167+ barrier(); \
84168+ raw_preempt_count_dec(); \
84169+} while (0)
84170+
84171 #ifdef CONFIG_PREEMPT
84172 #define preempt_enable() \
84173 do { \
84174@@ -113,8 +130,10 @@ do { \
84175 * region.
84176 */
84177 #define preempt_disable() barrier()
84178+#define raw_preempt_disable() barrier()
84179 #define sched_preempt_enable_no_resched() barrier()
84180 #define preempt_enable_no_resched() barrier()
84181+#define raw_preempt_enable_no_resched() barrier()
84182 #define preempt_enable() barrier()
84183 #define preempt_check_resched() do { } while (0)
84184
84185@@ -128,11 +147,13 @@ do { \
84186 /*
84187 * Modules have no business playing preemption tricks.
84188 */
84189+#ifndef CONFIG_PAX_KERNEXEC
84190 #undef sched_preempt_enable_no_resched
84191 #undef preempt_enable_no_resched
84192 #undef preempt_enable_no_resched_notrace
84193 #undef preempt_check_resched
84194 #endif
84195+#endif
84196
84197 #define preempt_set_need_resched() \
84198 do { \
84199diff --git a/include/linux/printk.h b/include/linux/printk.h
84200index 4d5bf57..d94eccf 100644
84201--- a/include/linux/printk.h
84202+++ b/include/linux/printk.h
84203@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
84204 #endif
84205
84206 typedef int(*printk_func_t)(const char *fmt, va_list args);
84207+extern int kptr_restrict;
84208
84209 #ifdef CONFIG_PRINTK
84210 asmlinkage __printf(5, 0)
84211@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84212
84213 extern int printk_delay_msec;
84214 extern int dmesg_restrict;
84215-extern int kptr_restrict;
84216
84217 extern void wake_up_klogd(void);
84218
84219diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84220index b97bf2e..f14c92d4 100644
84221--- a/include/linux/proc_fs.h
84222+++ b/include/linux/proc_fs.h
84223@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84224 extern struct proc_dir_entry *proc_symlink(const char *,
84225 struct proc_dir_entry *, const char *);
84226 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84227+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84228 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84229 struct proc_dir_entry *, void *);
84230+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84231+ struct proc_dir_entry *, void *);
84232 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84233 struct proc_dir_entry *);
84234
84235@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84236 return proc_create_data(name, mode, parent, proc_fops, NULL);
84237 }
84238
84239+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84240+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84241+{
84242+#ifdef CONFIG_GRKERNSEC_PROC_USER
84243+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84244+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84245+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84246+#else
84247+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84248+#endif
84249+}
84250+
84251+
84252 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84253 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84254 extern void *PDE_DATA(const struct inode *);
84255@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84256 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84257 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84258 struct proc_dir_entry *parent) {return NULL;}
84259+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84260+ struct proc_dir_entry *parent) { return NULL; }
84261 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84262 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84263+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84264+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84265 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84266 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84267 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84268@@ -79,7 +99,7 @@ struct net;
84269 static inline struct proc_dir_entry *proc_net_mkdir(
84270 struct net *net, const char *name, struct proc_dir_entry *parent)
84271 {
84272- return proc_mkdir_data(name, 0, parent, net);
84273+ return proc_mkdir_data_restrict(name, 0, parent, net);
84274 }
84275
84276 #endif /* _LINUX_PROC_FS_H */
84277diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84278index 42dfc61..8113a99 100644
84279--- a/include/linux/proc_ns.h
84280+++ b/include/linux/proc_ns.h
84281@@ -16,7 +16,7 @@ struct proc_ns_operations {
84282 struct ns_common *(*get)(struct task_struct *task);
84283 void (*put)(struct ns_common *ns);
84284 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
84285-};
84286+} __do_const __randomize_layout;
84287
84288 extern const struct proc_ns_operations netns_operations;
84289 extern const struct proc_ns_operations utsns_operations;
84290diff --git a/include/linux/quota.h b/include/linux/quota.h
84291index b86df49..8002997 100644
84292--- a/include/linux/quota.h
84293+++ b/include/linux/quota.h
84294@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
84295
84296 extern bool qid_eq(struct kqid left, struct kqid right);
84297 extern bool qid_lt(struct kqid left, struct kqid right);
84298-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84299+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84300 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84301 extern bool qid_valid(struct kqid qid);
84302
84303diff --git a/include/linux/random.h b/include/linux/random.h
84304index b05856e..0a9f14e 100644
84305--- a/include/linux/random.h
84306+++ b/include/linux/random.h
84307@@ -9,9 +9,19 @@
84308 #include <uapi/linux/random.h>
84309
84310 extern void add_device_randomness(const void *, unsigned int);
84311+
84312+static inline void add_latent_entropy(void)
84313+{
84314+
84315+#ifdef LATENT_ENTROPY_PLUGIN
84316+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84317+#endif
84318+
84319+}
84320+
84321 extern void add_input_randomness(unsigned int type, unsigned int code,
84322- unsigned int value);
84323-extern void add_interrupt_randomness(int irq, int irq_flags);
84324+ unsigned int value) __latent_entropy;
84325+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
84326
84327 extern void get_random_bytes(void *buf, int nbytes);
84328 extern void get_random_bytes_arch(void *buf, int nbytes);
84329@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
84330 extern const struct file_operations random_fops, urandom_fops;
84331 #endif
84332
84333-unsigned int get_random_int(void);
84334+unsigned int __intentional_overflow(-1) get_random_int(void);
84335 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
84336
84337-u32 prandom_u32(void);
84338+u32 prandom_u32(void) __intentional_overflow(-1);
84339 void prandom_bytes(void *buf, size_t nbytes);
84340 void prandom_seed(u32 seed);
84341 void prandom_reseed_late(void);
84342@@ -37,6 +47,11 @@ struct rnd_state {
84343 u32 prandom_u32_state(struct rnd_state *state);
84344 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84345
84346+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
84347+{
84348+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
84349+}
84350+
84351 /**
84352 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
84353 * @ep_ro: right open interval endpoint
84354@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84355 *
84356 * Returns: pseudo-random number in interval [0, ep_ro)
84357 */
84358-static inline u32 prandom_u32_max(u32 ep_ro)
84359+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
84360 {
84361 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
84362 }
84363diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
84364index 378c5ee..aa84a47 100644
84365--- a/include/linux/rbtree_augmented.h
84366+++ b/include/linux/rbtree_augmented.h
84367@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
84368 old->rbaugmented = rbcompute(old); \
84369 } \
84370 rbstatic const struct rb_augment_callbacks rbname = { \
84371- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
84372+ .propagate = rbname ## _propagate, \
84373+ .copy = rbname ## _copy, \
84374+ .rotate = rbname ## _rotate \
84375 };
84376
84377
84378diff --git a/include/linux/rculist.h b/include/linux/rculist.h
84379index 529bc94..82ce778 100644
84380--- a/include/linux/rculist.h
84381+++ b/include/linux/rculist.h
84382@@ -29,8 +29,8 @@
84383 */
84384 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
84385 {
84386- ACCESS_ONCE(list->next) = list;
84387- ACCESS_ONCE(list->prev) = list;
84388+ ACCESS_ONCE_RW(list->next) = list;
84389+ ACCESS_ONCE_RW(list->prev) = list;
84390 }
84391
84392 /*
84393@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
84394 struct list_head *prev, struct list_head *next);
84395 #endif
84396
84397+void __pax_list_add_rcu(struct list_head *new,
84398+ struct list_head *prev, struct list_head *next);
84399+
84400 /**
84401 * list_add_rcu - add a new entry to rcu-protected list
84402 * @new: new entry to be added
84403@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
84404 __list_add_rcu(new, head, head->next);
84405 }
84406
84407+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
84408+{
84409+ __pax_list_add_rcu(new, head, head->next);
84410+}
84411+
84412 /**
84413 * list_add_tail_rcu - add a new entry to rcu-protected list
84414 * @new: new entry to be added
84415@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
84416 __list_add_rcu(new, head->prev, head);
84417 }
84418
84419+static inline void pax_list_add_tail_rcu(struct list_head *new,
84420+ struct list_head *head)
84421+{
84422+ __pax_list_add_rcu(new, head->prev, head);
84423+}
84424+
84425 /**
84426 * list_del_rcu - deletes entry from list without re-initialization
84427 * @entry: the element to delete from the list.
84428@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
84429 entry->prev = LIST_POISON2;
84430 }
84431
84432+extern void pax_list_del_rcu(struct list_head *entry);
84433+
84434 /**
84435 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
84436 * @n: the element to delete from the hash list.
84437diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
84438index ed4f593..8a51501 100644
84439--- a/include/linux/rcupdate.h
84440+++ b/include/linux/rcupdate.h
84441@@ -332,7 +332,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
84442 #define rcu_note_voluntary_context_switch(t) \
84443 do { \
84444 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
84445- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
84446+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
84447 } while (0)
84448 #else /* #ifdef CONFIG_TASKS_RCU */
84449 #define TASKS_RCU(x) do { } while (0)
84450diff --git a/include/linux/reboot.h b/include/linux/reboot.h
84451index 67fc8fc..a90f7d8 100644
84452--- a/include/linux/reboot.h
84453+++ b/include/linux/reboot.h
84454@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
84455 */
84456
84457 extern void migrate_to_reboot_cpu(void);
84458-extern void machine_restart(char *cmd);
84459-extern void machine_halt(void);
84460-extern void machine_power_off(void);
84461+extern void machine_restart(char *cmd) __noreturn;
84462+extern void machine_halt(void) __noreturn;
84463+extern void machine_power_off(void) __noreturn;
84464
84465 extern void machine_shutdown(void);
84466 struct pt_regs;
84467@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
84468 */
84469
84470 extern void kernel_restart_prepare(char *cmd);
84471-extern void kernel_restart(char *cmd);
84472-extern void kernel_halt(void);
84473-extern void kernel_power_off(void);
84474+extern void kernel_restart(char *cmd) __noreturn;
84475+extern void kernel_halt(void) __noreturn;
84476+extern void kernel_power_off(void) __noreturn;
84477
84478 extern int C_A_D; /* for sysctl */
84479 void ctrl_alt_del(void);
84480@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
84481 * Emergency restart, callable from an interrupt handler.
84482 */
84483
84484-extern void emergency_restart(void);
84485+extern void emergency_restart(void) __noreturn;
84486 #include <asm/emergency-restart.h>
84487
84488 #endif /* _LINUX_REBOOT_H */
84489diff --git a/include/linux/regset.h b/include/linux/regset.h
84490index 8e0c9fe..ac4d221 100644
84491--- a/include/linux/regset.h
84492+++ b/include/linux/regset.h
84493@@ -161,7 +161,8 @@ struct user_regset {
84494 unsigned int align;
84495 unsigned int bias;
84496 unsigned int core_note_type;
84497-};
84498+} __do_const;
84499+typedef struct user_regset __no_const user_regset_no_const;
84500
84501 /**
84502 * struct user_regset_view - available regsets
84503diff --git a/include/linux/relay.h b/include/linux/relay.h
84504index d7c8359..818daf5 100644
84505--- a/include/linux/relay.h
84506+++ b/include/linux/relay.h
84507@@ -157,7 +157,7 @@ struct rchan_callbacks
84508 * The callback should return 0 if successful, negative if not.
84509 */
84510 int (*remove_buf_file)(struct dentry *dentry);
84511-};
84512+} __no_const;
84513
84514 /*
84515 * CONFIG_RELAY kernel API, kernel/relay.c
84516diff --git a/include/linux/rio.h b/include/linux/rio.h
84517index 6bda06f..bf39a9b 100644
84518--- a/include/linux/rio.h
84519+++ b/include/linux/rio.h
84520@@ -358,7 +358,7 @@ struct rio_ops {
84521 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
84522 u64 rstart, u32 size, u32 flags);
84523 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
84524-};
84525+} __no_const;
84526
84527 #define RIO_RESOURCE_MEM 0x00000100
84528 #define RIO_RESOURCE_DOORBELL 0x00000200
84529diff --git a/include/linux/rmap.h b/include/linux/rmap.h
84530index d9d7e7e..86f47ac 100644
84531--- a/include/linux/rmap.h
84532+++ b/include/linux/rmap.h
84533@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
84534 void anon_vma_init(void); /* create anon_vma_cachep */
84535 int anon_vma_prepare(struct vm_area_struct *);
84536 void unlink_anon_vmas(struct vm_area_struct *);
84537-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
84538-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
84539+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
84540+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
84541
84542 static inline void anon_vma_merge(struct vm_area_struct *vma,
84543 struct vm_area_struct *next)
84544diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
84545index ed8f9e7..999bc96 100644
84546--- a/include/linux/scatterlist.h
84547+++ b/include/linux/scatterlist.h
84548@@ -1,6 +1,7 @@
84549 #ifndef _LINUX_SCATTERLIST_H
84550 #define _LINUX_SCATTERLIST_H
84551
84552+#include <linux/sched.h>
84553 #include <linux/string.h>
84554 #include <linux/bug.h>
84555 #include <linux/mm.h>
84556@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
84557 #ifdef CONFIG_DEBUG_SG
84558 BUG_ON(!virt_addr_valid(buf));
84559 #endif
84560+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84561+ if (object_starts_on_stack(buf)) {
84562+ void *adjbuf = buf - current->stack + current->lowmem_stack;
84563+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
84564+ } else
84565+#endif
84566 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
84567 }
84568
84569diff --git a/include/linux/sched.h b/include/linux/sched.h
84570index 8db31ef..0af1f81 100644
84571--- a/include/linux/sched.h
84572+++ b/include/linux/sched.h
84573@@ -133,6 +133,7 @@ struct fs_struct;
84574 struct perf_event_context;
84575 struct blk_plug;
84576 struct filename;
84577+struct linux_binprm;
84578
84579 #define VMACACHE_BITS 2
84580 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
84581@@ -415,7 +416,7 @@ extern char __sched_text_start[], __sched_text_end[];
84582 extern int in_sched_functions(unsigned long addr);
84583
84584 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
84585-extern signed long schedule_timeout(signed long timeout);
84586+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
84587 extern signed long schedule_timeout_interruptible(signed long timeout);
84588 extern signed long schedule_timeout_killable(signed long timeout);
84589 extern signed long schedule_timeout_uninterruptible(signed long timeout);
84590@@ -426,6 +427,19 @@ struct nsproxy;
84591 struct user_namespace;
84592
84593 #ifdef CONFIG_MMU
84594+
84595+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
84596+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
84597+#else
84598+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
84599+{
84600+ return 0;
84601+}
84602+#endif
84603+
84604+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
84605+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
84606+
84607 extern void arch_pick_mmap_layout(struct mm_struct *mm);
84608 extern unsigned long
84609 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
84610@@ -724,6 +738,17 @@ struct signal_struct {
84611 #ifdef CONFIG_TASKSTATS
84612 struct taskstats *stats;
84613 #endif
84614+
84615+#ifdef CONFIG_GRKERNSEC
84616+ u32 curr_ip;
84617+ u32 saved_ip;
84618+ u32 gr_saddr;
84619+ u32 gr_daddr;
84620+ u16 gr_sport;
84621+ u16 gr_dport;
84622+ u8 used_accept:1;
84623+#endif
84624+
84625 #ifdef CONFIG_AUDIT
84626 unsigned audit_tty;
84627 unsigned audit_tty_log_passwd;
84628@@ -750,7 +775,7 @@ struct signal_struct {
84629 struct mutex cred_guard_mutex; /* guard against foreign influences on
84630 * credential calculations
84631 * (notably. ptrace) */
84632-};
84633+} __randomize_layout;
84634
84635 /*
84636 * Bits in flags field of signal_struct.
84637@@ -803,6 +828,14 @@ struct user_struct {
84638 struct key *session_keyring; /* UID's default session keyring */
84639 #endif
84640
84641+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
84642+ unsigned char kernel_banned;
84643+#endif
84644+#ifdef CONFIG_GRKERNSEC_BRUTE
84645+ unsigned char suid_banned;
84646+ unsigned long suid_ban_expires;
84647+#endif
84648+
84649 /* Hash table maintenance information */
84650 struct hlist_node uidhash_node;
84651 kuid_t uid;
84652@@ -810,7 +843,7 @@ struct user_struct {
84653 #ifdef CONFIG_PERF_EVENTS
84654 atomic_long_t locked_vm;
84655 #endif
84656-};
84657+} __randomize_layout;
84658
84659 extern int uids_sysfs_init(void);
84660
84661@@ -1274,6 +1307,9 @@ enum perf_event_task_context {
84662 struct task_struct {
84663 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
84664 void *stack;
84665+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84666+ void *lowmem_stack;
84667+#endif
84668 atomic_t usage;
84669 unsigned int flags; /* per process flags, defined below */
84670 unsigned int ptrace;
84671@@ -1405,8 +1441,8 @@ struct task_struct {
84672 struct list_head thread_node;
84673
84674 struct completion *vfork_done; /* for vfork() */
84675- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
84676- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84677+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
84678+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84679
84680 cputime_t utime, stime, utimescaled, stimescaled;
84681 cputime_t gtime;
84682@@ -1431,11 +1467,6 @@ struct task_struct {
84683 struct task_cputime cputime_expires;
84684 struct list_head cpu_timers[3];
84685
84686-/* process credentials */
84687- const struct cred __rcu *real_cred; /* objective and real subjective task
84688- * credentials (COW) */
84689- const struct cred __rcu *cred; /* effective (overridable) subjective task
84690- * credentials (COW) */
84691 char comm[TASK_COMM_LEN]; /* executable name excluding path
84692 - access with [gs]et_task_comm (which lock
84693 it with task_lock())
84694@@ -1453,6 +1484,10 @@ struct task_struct {
84695 #endif
84696 /* CPU-specific state of this task */
84697 struct thread_struct thread;
84698+/* thread_info moved to task_struct */
84699+#ifdef CONFIG_X86
84700+ struct thread_info tinfo;
84701+#endif
84702 /* filesystem information */
84703 struct fs_struct *fs;
84704 /* open file information */
84705@@ -1527,6 +1562,10 @@ struct task_struct {
84706 gfp_t lockdep_reclaim_gfp;
84707 #endif
84708
84709+/* process credentials */
84710+ const struct cred __rcu *real_cred; /* objective and real subjective task
84711+ * credentials (COW) */
84712+
84713 /* journalling filesystem info */
84714 void *journal_info;
84715
84716@@ -1565,6 +1604,10 @@ struct task_struct {
84717 /* cg_list protected by css_set_lock and tsk->alloc_lock */
84718 struct list_head cg_list;
84719 #endif
84720+
84721+ const struct cred __rcu *cred; /* effective (overridable) subjective task
84722+ * credentials (COW) */
84723+
84724 #ifdef CONFIG_FUTEX
84725 struct robust_list_head __user *robust_list;
84726 #ifdef CONFIG_COMPAT
84727@@ -1673,7 +1716,7 @@ struct task_struct {
84728 * Number of functions that haven't been traced
84729 * because of depth overrun.
84730 */
84731- atomic_t trace_overrun;
84732+ atomic_unchecked_t trace_overrun;
84733 /* Pause for the tracing */
84734 atomic_t tracing_graph_pause;
84735 #endif
84736@@ -1701,7 +1744,78 @@ struct task_struct {
84737 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
84738 unsigned long task_state_change;
84739 #endif
84740-};
84741+
84742+#ifdef CONFIG_GRKERNSEC
84743+ /* grsecurity */
84744+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84745+ u64 exec_id;
84746+#endif
84747+#ifdef CONFIG_GRKERNSEC_SETXID
84748+ const struct cred *delayed_cred;
84749+#endif
84750+ struct dentry *gr_chroot_dentry;
84751+ struct acl_subject_label *acl;
84752+ struct acl_subject_label *tmpacl;
84753+ struct acl_role_label *role;
84754+ struct file *exec_file;
84755+ unsigned long brute_expires;
84756+ u16 acl_role_id;
84757+ u8 inherited;
84758+ /* is this the task that authenticated to the special role */
84759+ u8 acl_sp_role;
84760+ u8 is_writable;
84761+ u8 brute;
84762+ u8 gr_is_chrooted;
84763+#endif
84764+
84765+} __randomize_layout;
84766+
84767+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
84768+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
84769+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
84770+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
84771+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
84772+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
84773+
84774+#ifdef CONFIG_PAX_SOFTMODE
84775+extern int pax_softmode;
84776+#endif
84777+
84778+extern int pax_check_flags(unsigned long *);
84779+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
84780+
84781+/* if tsk != current then task_lock must be held on it */
84782+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84783+static inline unsigned long pax_get_flags(struct task_struct *tsk)
84784+{
84785+ if (likely(tsk->mm))
84786+ return tsk->mm->pax_flags;
84787+ else
84788+ return 0UL;
84789+}
84790+
84791+/* if tsk != current then task_lock must be held on it */
84792+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
84793+{
84794+ if (likely(tsk->mm)) {
84795+ tsk->mm->pax_flags = flags;
84796+ return 0;
84797+ }
84798+ return -EINVAL;
84799+}
84800+#endif
84801+
84802+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84803+extern void pax_set_initial_flags(struct linux_binprm *bprm);
84804+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
84805+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
84806+#endif
84807+
84808+struct path;
84809+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
84810+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
84811+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
84812+extern void pax_report_refcount_overflow(struct pt_regs *regs);
84813
84814 /* Future-safe accessor for struct task_struct's cpus_allowed. */
84815 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
84816@@ -1783,7 +1897,7 @@ struct pid_namespace;
84817 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
84818 struct pid_namespace *ns);
84819
84820-static inline pid_t task_pid_nr(struct task_struct *tsk)
84821+static inline pid_t task_pid_nr(const struct task_struct *tsk)
84822 {
84823 return tsk->pid;
84824 }
84825@@ -2150,6 +2264,25 @@ extern u64 sched_clock_cpu(int cpu);
84826
84827 extern void sched_clock_init(void);
84828
84829+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84830+static inline void populate_stack(void)
84831+{
84832+ struct task_struct *curtask = current;
84833+ int c;
84834+ int *ptr = curtask->stack;
84835+ int *end = curtask->stack + THREAD_SIZE;
84836+
84837+ while (ptr < end) {
84838+ c = *(volatile int *)ptr;
84839+ ptr += PAGE_SIZE/sizeof(int);
84840+ }
84841+}
84842+#else
84843+static inline void populate_stack(void)
84844+{
84845+}
84846+#endif
84847+
84848 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
84849 static inline void sched_clock_tick(void)
84850 {
84851@@ -2283,7 +2416,9 @@ void yield(void);
84852 extern struct exec_domain default_exec_domain;
84853
84854 union thread_union {
84855+#ifndef CONFIG_X86
84856 struct thread_info thread_info;
84857+#endif
84858 unsigned long stack[THREAD_SIZE/sizeof(long)];
84859 };
84860
84861@@ -2316,6 +2451,7 @@ extern struct pid_namespace init_pid_ns;
84862 */
84863
84864 extern struct task_struct *find_task_by_vpid(pid_t nr);
84865+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
84866 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
84867 struct pid_namespace *ns);
84868
84869@@ -2480,7 +2616,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
84870 extern void exit_itimers(struct signal_struct *);
84871 extern void flush_itimer_signals(void);
84872
84873-extern void do_group_exit(int);
84874+extern __noreturn void do_group_exit(int);
84875
84876 extern int do_execve(struct filename *,
84877 const char __user * const __user *,
84878@@ -2701,9 +2837,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
84879 #define task_stack_end_corrupted(task) \
84880 (*(end_of_stack(task)) != STACK_END_MAGIC)
84881
84882-static inline int object_is_on_stack(void *obj)
84883+static inline int object_starts_on_stack(const void *obj)
84884 {
84885- void *stack = task_stack_page(current);
84886+ const void *stack = task_stack_page(current);
84887
84888 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
84889 }
84890diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
84891index 596a0e0..bea77ec 100644
84892--- a/include/linux/sched/sysctl.h
84893+++ b/include/linux/sched/sysctl.h
84894@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
84895 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
84896
84897 extern int sysctl_max_map_count;
84898+extern unsigned long sysctl_heap_stack_gap;
84899
84900 extern unsigned int sysctl_sched_latency;
84901 extern unsigned int sysctl_sched_min_granularity;
84902diff --git a/include/linux/security.h b/include/linux/security.h
84903index ba96471..74fb3f6 100644
84904--- a/include/linux/security.h
84905+++ b/include/linux/security.h
84906@@ -27,6 +27,7 @@
84907 #include <linux/slab.h>
84908 #include <linux/err.h>
84909 #include <linux/string.h>
84910+#include <linux/grsecurity.h>
84911
84912 struct linux_binprm;
84913 struct cred;
84914@@ -116,8 +117,6 @@ struct seq_file;
84915
84916 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
84917
84918-void reset_security_ops(void);
84919-
84920 #ifdef CONFIG_MMU
84921 extern unsigned long mmap_min_addr;
84922 extern unsigned long dac_mmap_min_addr;
84923@@ -1729,7 +1728,7 @@ struct security_operations {
84924 struct audit_context *actx);
84925 void (*audit_rule_free) (void *lsmrule);
84926 #endif /* CONFIG_AUDIT */
84927-};
84928+} __randomize_layout;
84929
84930 /* prototypes */
84931 extern int security_init(void);
84932diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
84933index dc368b8..e895209 100644
84934--- a/include/linux/semaphore.h
84935+++ b/include/linux/semaphore.h
84936@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
84937 }
84938
84939 extern void down(struct semaphore *sem);
84940-extern int __must_check down_interruptible(struct semaphore *sem);
84941+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
84942 extern int __must_check down_killable(struct semaphore *sem);
84943 extern int __must_check down_trylock(struct semaphore *sem);
84944 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
84945diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
84946index cf6a9da..bd86b1f 100644
84947--- a/include/linux/seq_file.h
84948+++ b/include/linux/seq_file.h
84949@@ -27,6 +27,9 @@ struct seq_file {
84950 struct mutex lock;
84951 const struct seq_operations *op;
84952 int poll_event;
84953+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84954+ u64 exec_id;
84955+#endif
84956 #ifdef CONFIG_USER_NS
84957 struct user_namespace *user_ns;
84958 #endif
84959@@ -39,6 +42,7 @@ struct seq_operations {
84960 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
84961 int (*show) (struct seq_file *m, void *v);
84962 };
84963+typedef struct seq_operations __no_const seq_operations_no_const;
84964
84965 #define SEQ_SKIP 1
84966
84967@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
84968
84969 char *mangle_path(char *s, const char *p, const char *esc);
84970 int seq_open(struct file *, const struct seq_operations *);
84971+int seq_open_restrict(struct file *, const struct seq_operations *);
84972 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
84973 loff_t seq_lseek(struct file *, loff_t, int);
84974 int seq_release(struct inode *, struct file *);
84975@@ -153,6 +158,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
84976 }
84977
84978 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
84979+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
84980 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
84981 int single_release(struct inode *, struct file *);
84982 void *__seq_open_private(struct file *, const struct seq_operations *, int);
84983diff --git a/include/linux/shm.h b/include/linux/shm.h
84984index 6fb8016..ab4465e 100644
84985--- a/include/linux/shm.h
84986+++ b/include/linux/shm.h
84987@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
84988 /* The task created the shm object. NULL if the task is dead. */
84989 struct task_struct *shm_creator;
84990 struct list_head shm_clist; /* list by creator */
84991+#ifdef CONFIG_GRKERNSEC
84992+ u64 shm_createtime;
84993+ pid_t shm_lapid;
84994+#endif
84995 };
84996
84997 /* shm_mode upper byte flags */
84998diff --git a/include/linux/signal.h b/include/linux/signal.h
84999index ab1e039..ad4229e 100644
85000--- a/include/linux/signal.h
85001+++ b/include/linux/signal.h
85002@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
85003 * know it'll be handled, so that they don't get converted to
85004 * SIGKILL or just silently dropped.
85005 */
85006- kernel_sigaction(sig, (__force __sighandler_t)2);
85007+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85008 }
85009
85010 static inline void disallow_signal(int sig)
85011diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85012index 85ab7d7..eb1585a 100644
85013--- a/include/linux/skbuff.h
85014+++ b/include/linux/skbuff.h
85015@@ -763,7 +763,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85016 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85017 int node);
85018 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85019-static inline struct sk_buff *alloc_skb(unsigned int size,
85020+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85021 gfp_t priority)
85022 {
85023 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85024@@ -1952,7 +1952,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85025 return skb->inner_transport_header - skb->inner_network_header;
85026 }
85027
85028-static inline int skb_network_offset(const struct sk_buff *skb)
85029+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85030 {
85031 return skb_network_header(skb) - skb->data;
85032 }
85033@@ -2012,7 +2012,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
85034 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85035 */
85036 #ifndef NET_SKB_PAD
85037-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85038+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85039 #endif
85040
85041 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85042@@ -2655,9 +2655,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85043 int *err);
85044 unsigned int datagram_poll(struct file *file, struct socket *sock,
85045 struct poll_table_struct *wait);
85046-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85047+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85048 struct iov_iter *to, int size);
85049-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85050+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85051 struct msghdr *msg, int size)
85052 {
85053 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
85054@@ -3131,6 +3131,9 @@ static inline void nf_reset(struct sk_buff *skb)
85055 nf_bridge_put(skb->nf_bridge);
85056 skb->nf_bridge = NULL;
85057 #endif
85058+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85059+ skb->nf_trace = 0;
85060+#endif
85061 }
85062
85063 static inline void nf_reset_trace(struct sk_buff *skb)
85064diff --git a/include/linux/slab.h b/include/linux/slab.h
85065index 9a139b6..aab37b4 100644
85066--- a/include/linux/slab.h
85067+++ b/include/linux/slab.h
85068@@ -14,15 +14,29 @@
85069 #include <linux/gfp.h>
85070 #include <linux/types.h>
85071 #include <linux/workqueue.h>
85072-
85073+#include <linux/err.h>
85074
85075 /*
85076 * Flags to pass to kmem_cache_create().
85077 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85078 */
85079 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85080+
85081+#ifdef CONFIG_PAX_USERCOPY_SLABS
85082+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85083+#else
85084+#define SLAB_USERCOPY 0x00000000UL
85085+#endif
85086+
85087 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85088 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85089+
85090+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85091+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85092+#else
85093+#define SLAB_NO_SANITIZE 0x00000000UL
85094+#endif
85095+
85096 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85097 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85098 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85099@@ -98,10 +112,13 @@
85100 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85101 * Both make kfree a no-op.
85102 */
85103-#define ZERO_SIZE_PTR ((void *)16)
85104+#define ZERO_SIZE_PTR \
85105+({ \
85106+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85107+ (void *)(-MAX_ERRNO-1L); \
85108+})
85109
85110-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85111- (unsigned long)ZERO_SIZE_PTR)
85112+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85113
85114 #include <linux/kmemleak.h>
85115
85116@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85117 void kfree(const void *);
85118 void kzfree(const void *);
85119 size_t ksize(const void *);
85120+const char *check_heap_object(const void *ptr, unsigned long n);
85121+bool is_usercopy_object(const void *ptr);
85122
85123 /*
85124 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85125@@ -236,6 +255,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85126 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85127 #endif
85128
85129+#ifdef CONFIG_PAX_USERCOPY_SLABS
85130+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85131+#endif
85132+
85133 /*
85134 * Figure out which kmalloc slab an allocation of a certain size
85135 * belongs to.
85136@@ -244,7 +267,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85137 * 2 = 120 .. 192 bytes
85138 * n = 2^(n-1) .. 2^n -1
85139 */
85140-static __always_inline int kmalloc_index(size_t size)
85141+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85142 {
85143 if (!size)
85144 return 0;
85145@@ -287,14 +310,14 @@ static __always_inline int kmalloc_index(size_t size)
85146 }
85147 #endif /* !CONFIG_SLOB */
85148
85149-void *__kmalloc(size_t size, gfp_t flags);
85150+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
85151 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85152
85153 #ifdef CONFIG_NUMA
85154-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85155+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
85156 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85157 #else
85158-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85159+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
85160 {
85161 return __kmalloc(size, flags);
85162 }
85163diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85164index b869d16..1453c73 100644
85165--- a/include/linux/slab_def.h
85166+++ b/include/linux/slab_def.h
85167@@ -40,7 +40,7 @@ struct kmem_cache {
85168 /* 4) cache creation/removal */
85169 const char *name;
85170 struct list_head list;
85171- int refcount;
85172+ atomic_t refcount;
85173 int object_size;
85174 int align;
85175
85176@@ -56,10 +56,14 @@ struct kmem_cache {
85177 unsigned long node_allocs;
85178 unsigned long node_frees;
85179 unsigned long node_overflow;
85180- atomic_t allochit;
85181- atomic_t allocmiss;
85182- atomic_t freehit;
85183- atomic_t freemiss;
85184+ atomic_unchecked_t allochit;
85185+ atomic_unchecked_t allocmiss;
85186+ atomic_unchecked_t freehit;
85187+ atomic_unchecked_t freemiss;
85188+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85189+ atomic_unchecked_t sanitized;
85190+ atomic_unchecked_t not_sanitized;
85191+#endif
85192
85193 /*
85194 * If debugging is enabled, then the allocator can add additional
85195diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85196index d82abd4..408c3a0 100644
85197--- a/include/linux/slub_def.h
85198+++ b/include/linux/slub_def.h
85199@@ -74,7 +74,7 @@ struct kmem_cache {
85200 struct kmem_cache_order_objects max;
85201 struct kmem_cache_order_objects min;
85202 gfp_t allocflags; /* gfp flags to use on each alloc */
85203- int refcount; /* Refcount for slab cache destroy */
85204+ atomic_t refcount; /* Refcount for slab cache destroy */
85205 void (*ctor)(void *);
85206 int inuse; /* Offset to metadata */
85207 int align; /* Alignment */
85208diff --git a/include/linux/smp.h b/include/linux/smp.h
85209index 93dff5f..933c561 100644
85210--- a/include/linux/smp.h
85211+++ b/include/linux/smp.h
85212@@ -176,7 +176,9 @@ static inline void wake_up_all_idle_cpus(void) { }
85213 #endif
85214
85215 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85216+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85217 #define put_cpu() preempt_enable()
85218+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85219
85220 /*
85221 * Callback to arch code if there's nosmp or maxcpus=0 on the
85222diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85223index 46cca4c..3323536 100644
85224--- a/include/linux/sock_diag.h
85225+++ b/include/linux/sock_diag.h
85226@@ -11,7 +11,7 @@ struct sock;
85227 struct sock_diag_handler {
85228 __u8 family;
85229 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85230-};
85231+} __do_const;
85232
85233 int sock_diag_register(const struct sock_diag_handler *h);
85234 void sock_diag_unregister(const struct sock_diag_handler *h);
85235diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85236index 680f9a3..f13aeb0 100644
85237--- a/include/linux/sonet.h
85238+++ b/include/linux/sonet.h
85239@@ -7,7 +7,7 @@
85240 #include <uapi/linux/sonet.h>
85241
85242 struct k_sonet_stats {
85243-#define __HANDLE_ITEM(i) atomic_t i
85244+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85245 __SONET_ITEMS
85246 #undef __HANDLE_ITEM
85247 };
85248diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85249index 07d8e53..dc934c9 100644
85250--- a/include/linux/sunrpc/addr.h
85251+++ b/include/linux/sunrpc/addr.h
85252@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85253 {
85254 switch (sap->sa_family) {
85255 case AF_INET:
85256- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85257+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85258 case AF_INET6:
85259- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85260+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85261 }
85262 return 0;
85263 }
85264@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85265 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85266 const struct sockaddr *src)
85267 {
85268- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85269+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85270 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85271
85272 dsin->sin_family = ssin->sin_family;
85273@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85274 if (sa->sa_family != AF_INET6)
85275 return 0;
85276
85277- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85278+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85279 }
85280
85281 #endif /* _LINUX_SUNRPC_ADDR_H */
85282diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85283index 598ba80..d90cba6 100644
85284--- a/include/linux/sunrpc/clnt.h
85285+++ b/include/linux/sunrpc/clnt.h
85286@@ -100,7 +100,7 @@ struct rpc_procinfo {
85287 unsigned int p_timer; /* Which RTT timer to use */
85288 u32 p_statidx; /* Which procedure to account */
85289 const char * p_name; /* name of procedure */
85290-};
85291+} __do_const;
85292
85293 #ifdef __KERNEL__
85294
85295diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85296index 6f22cfe..9fd0909 100644
85297--- a/include/linux/sunrpc/svc.h
85298+++ b/include/linux/sunrpc/svc.h
85299@@ -420,7 +420,7 @@ struct svc_procedure {
85300 unsigned int pc_count; /* call count */
85301 unsigned int pc_cachetype; /* cache info (NFS) */
85302 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85303-};
85304+} __do_const;
85305
85306 /*
85307 * Function prototypes.
85308diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85309index 975da75..318c083 100644
85310--- a/include/linux/sunrpc/svc_rdma.h
85311+++ b/include/linux/sunrpc/svc_rdma.h
85312@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
85313 extern unsigned int svcrdma_max_requests;
85314 extern unsigned int svcrdma_max_req_size;
85315
85316-extern atomic_t rdma_stat_recv;
85317-extern atomic_t rdma_stat_read;
85318-extern atomic_t rdma_stat_write;
85319-extern atomic_t rdma_stat_sq_starve;
85320-extern atomic_t rdma_stat_rq_starve;
85321-extern atomic_t rdma_stat_rq_poll;
85322-extern atomic_t rdma_stat_rq_prod;
85323-extern atomic_t rdma_stat_sq_poll;
85324-extern atomic_t rdma_stat_sq_prod;
85325+extern atomic_unchecked_t rdma_stat_recv;
85326+extern atomic_unchecked_t rdma_stat_read;
85327+extern atomic_unchecked_t rdma_stat_write;
85328+extern atomic_unchecked_t rdma_stat_sq_starve;
85329+extern atomic_unchecked_t rdma_stat_rq_starve;
85330+extern atomic_unchecked_t rdma_stat_rq_poll;
85331+extern atomic_unchecked_t rdma_stat_rq_prod;
85332+extern atomic_unchecked_t rdma_stat_sq_poll;
85333+extern atomic_unchecked_t rdma_stat_sq_prod;
85334
85335 #define RPCRDMA_VERSION 1
85336
85337diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
85338index 8d71d65..f79586e 100644
85339--- a/include/linux/sunrpc/svcauth.h
85340+++ b/include/linux/sunrpc/svcauth.h
85341@@ -120,7 +120,7 @@ struct auth_ops {
85342 int (*release)(struct svc_rqst *rq);
85343 void (*domain_release)(struct auth_domain *);
85344 int (*set_client)(struct svc_rqst *rq);
85345-};
85346+} __do_const;
85347
85348 #define SVC_GARBAGE 1
85349 #define SVC_SYSERR 2
85350diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
85351index e7a018e..49f8b17 100644
85352--- a/include/linux/swiotlb.h
85353+++ b/include/linux/swiotlb.h
85354@@ -60,7 +60,8 @@ extern void
85355
85356 extern void
85357 swiotlb_free_coherent(struct device *hwdev, size_t size,
85358- void *vaddr, dma_addr_t dma_handle);
85359+ void *vaddr, dma_addr_t dma_handle,
85360+ struct dma_attrs *attrs);
85361
85362 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
85363 unsigned long offset, size_t size,
85364diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
85365index 85893d7..4923581 100644
85366--- a/include/linux/syscalls.h
85367+++ b/include/linux/syscalls.h
85368@@ -99,10 +99,16 @@ union bpf_attr;
85369 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
85370
85371 #define __SC_DECL(t, a) t a
85372+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
85373 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
85374 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
85375 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
85376-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
85377+#define __SC_LONG(t, a) __typeof( \
85378+ __builtin_choose_expr( \
85379+ sizeof(t) > sizeof(int), \
85380+ (t) 0, \
85381+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
85382+ )) a
85383 #define __SC_CAST(t, a) (t) a
85384 #define __SC_ARGS(t, a) a
85385 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
85386@@ -384,11 +390,11 @@ asmlinkage long sys_sync(void);
85387 asmlinkage long sys_fsync(unsigned int fd);
85388 asmlinkage long sys_fdatasync(unsigned int fd);
85389 asmlinkage long sys_bdflush(int func, long data);
85390-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
85391- char __user *type, unsigned long flags,
85392+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
85393+ const char __user *type, unsigned long flags,
85394 void __user *data);
85395-asmlinkage long sys_umount(char __user *name, int flags);
85396-asmlinkage long sys_oldumount(char __user *name);
85397+asmlinkage long sys_umount(const char __user *name, int flags);
85398+asmlinkage long sys_oldumount(const char __user *name);
85399 asmlinkage long sys_truncate(const char __user *path, long length);
85400 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
85401 asmlinkage long sys_stat(const char __user *filename,
85402@@ -600,7 +606,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
85403 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
85404 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
85405 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
85406- struct sockaddr __user *, int);
85407+ struct sockaddr __user *, int) __intentional_overflow(0);
85408 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
85409 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
85410 unsigned int vlen, unsigned flags);
85411diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
85412index 27b3b0b..e093dd9 100644
85413--- a/include/linux/syscore_ops.h
85414+++ b/include/linux/syscore_ops.h
85415@@ -16,7 +16,7 @@ struct syscore_ops {
85416 int (*suspend)(void);
85417 void (*resume)(void);
85418 void (*shutdown)(void);
85419-};
85420+} __do_const;
85421
85422 extern void register_syscore_ops(struct syscore_ops *ops);
85423 extern void unregister_syscore_ops(struct syscore_ops *ops);
85424diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
85425index b7361f8..341a15a 100644
85426--- a/include/linux/sysctl.h
85427+++ b/include/linux/sysctl.h
85428@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
85429
85430 extern int proc_dostring(struct ctl_table *, int,
85431 void __user *, size_t *, loff_t *);
85432+extern int proc_dostring_modpriv(struct ctl_table *, int,
85433+ void __user *, size_t *, loff_t *);
85434 extern int proc_dointvec(struct ctl_table *, int,
85435 void __user *, size_t *, loff_t *);
85436 extern int proc_dointvec_minmax(struct ctl_table *, int,
85437@@ -113,7 +115,8 @@ struct ctl_table
85438 struct ctl_table_poll *poll;
85439 void *extra1;
85440 void *extra2;
85441-};
85442+} __do_const __randomize_layout;
85443+typedef struct ctl_table __no_const ctl_table_no_const;
85444
85445 struct ctl_node {
85446 struct rb_node node;
85447diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
85448index ddad161..a3efd26 100644
85449--- a/include/linux/sysfs.h
85450+++ b/include/linux/sysfs.h
85451@@ -34,7 +34,8 @@ struct attribute {
85452 struct lock_class_key *key;
85453 struct lock_class_key skey;
85454 #endif
85455-};
85456+} __do_const;
85457+typedef struct attribute __no_const attribute_no_const;
85458
85459 /**
85460 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
85461@@ -63,7 +64,8 @@ struct attribute_group {
85462 struct attribute *, int);
85463 struct attribute **attrs;
85464 struct bin_attribute **bin_attrs;
85465-};
85466+} __do_const;
85467+typedef struct attribute_group __no_const attribute_group_no_const;
85468
85469 /**
85470 * Use these macros to make defining attributes easier. See include/linux/device.h
85471@@ -137,7 +139,8 @@ struct bin_attribute {
85472 char *, loff_t, size_t);
85473 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
85474 struct vm_area_struct *vma);
85475-};
85476+} __do_const;
85477+typedef struct bin_attribute __no_const bin_attribute_no_const;
85478
85479 /**
85480 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
85481diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
85482index 387fa7d..3fcde6b 100644
85483--- a/include/linux/sysrq.h
85484+++ b/include/linux/sysrq.h
85485@@ -16,6 +16,7 @@
85486
85487 #include <linux/errno.h>
85488 #include <linux/types.h>
85489+#include <linux/compiler.h>
85490
85491 /* Possible values of bitmask for enabling sysrq functions */
85492 /* 0x0001 is reserved for enable everything */
85493@@ -33,7 +34,7 @@ struct sysrq_key_op {
85494 char *help_msg;
85495 char *action_msg;
85496 int enable_mask;
85497-};
85498+} __do_const;
85499
85500 #ifdef CONFIG_MAGIC_SYSRQ
85501
85502diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
85503index ff307b5..f1a4468 100644
85504--- a/include/linux/thread_info.h
85505+++ b/include/linux/thread_info.h
85506@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
85507 #error "no set_restore_sigmask() provided and default one won't work"
85508 #endif
85509
85510+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
85511+
85512+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
85513+{
85514+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
85515+}
85516+
85517 #endif /* __KERNEL__ */
85518
85519 #endif /* _LINUX_THREAD_INFO_H */
85520diff --git a/include/linux/tty.h b/include/linux/tty.h
85521index 7d66ae5..0327149 100644
85522--- a/include/linux/tty.h
85523+++ b/include/linux/tty.h
85524@@ -202,7 +202,7 @@ struct tty_port {
85525 const struct tty_port_operations *ops; /* Port operations */
85526 spinlock_t lock; /* Lock protecting tty field */
85527 int blocked_open; /* Waiting to open */
85528- int count; /* Usage count */
85529+ atomic_t count; /* Usage count */
85530 wait_queue_head_t open_wait; /* Open waiters */
85531 wait_queue_head_t close_wait; /* Close waiters */
85532 wait_queue_head_t delta_msr_wait; /* Modem status change */
85533@@ -290,7 +290,7 @@ struct tty_struct {
85534 /* If the tty has a pending do_SAK, queue it here - akpm */
85535 struct work_struct SAK_work;
85536 struct tty_port *port;
85537-};
85538+} __randomize_layout;
85539
85540 /* Each of a tty's open files has private_data pointing to tty_file_private */
85541 struct tty_file_private {
85542@@ -549,7 +549,7 @@ extern int tty_port_open(struct tty_port *port,
85543 struct tty_struct *tty, struct file *filp);
85544 static inline int tty_port_users(struct tty_port *port)
85545 {
85546- return port->count + port->blocked_open;
85547+ return atomic_read(&port->count) + port->blocked_open;
85548 }
85549
85550 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
85551diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
85552index 92e337c..f46757b 100644
85553--- a/include/linux/tty_driver.h
85554+++ b/include/linux/tty_driver.h
85555@@ -291,7 +291,7 @@ struct tty_operations {
85556 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
85557 #endif
85558 const struct file_operations *proc_fops;
85559-};
85560+} __do_const __randomize_layout;
85561
85562 struct tty_driver {
85563 int magic; /* magic number for this structure */
85564@@ -325,7 +325,7 @@ struct tty_driver {
85565
85566 const struct tty_operations *ops;
85567 struct list_head tty_drivers;
85568-};
85569+} __randomize_layout;
85570
85571 extern struct list_head tty_drivers;
85572
85573diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
85574index 00c9d68..bc0188b 100644
85575--- a/include/linux/tty_ldisc.h
85576+++ b/include/linux/tty_ldisc.h
85577@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
85578
85579 struct module *owner;
85580
85581- int refcount;
85582+ atomic_t refcount;
85583 };
85584
85585 struct tty_ldisc {
85586diff --git a/include/linux/types.h b/include/linux/types.h
85587index a0bb704..f511c77 100644
85588--- a/include/linux/types.h
85589+++ b/include/linux/types.h
85590@@ -177,10 +177,26 @@ typedef struct {
85591 int counter;
85592 } atomic_t;
85593
85594+#ifdef CONFIG_PAX_REFCOUNT
85595+typedef struct {
85596+ int counter;
85597+} atomic_unchecked_t;
85598+#else
85599+typedef atomic_t atomic_unchecked_t;
85600+#endif
85601+
85602 #ifdef CONFIG_64BIT
85603 typedef struct {
85604 long counter;
85605 } atomic64_t;
85606+
85607+#ifdef CONFIG_PAX_REFCOUNT
85608+typedef struct {
85609+ long counter;
85610+} atomic64_unchecked_t;
85611+#else
85612+typedef atomic64_t atomic64_unchecked_t;
85613+#endif
85614 #endif
85615
85616 struct list_head {
85617diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
85618index ecd3319..8a36ded 100644
85619--- a/include/linux/uaccess.h
85620+++ b/include/linux/uaccess.h
85621@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
85622 long ret; \
85623 mm_segment_t old_fs = get_fs(); \
85624 \
85625- set_fs(KERNEL_DS); \
85626 pagefault_disable(); \
85627- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
85628- pagefault_enable(); \
85629+ set_fs(KERNEL_DS); \
85630+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
85631 set_fs(old_fs); \
85632+ pagefault_enable(); \
85633 ret; \
85634 })
85635
85636diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
85637index 2d1f9b6..d7a9fce 100644
85638--- a/include/linux/uidgid.h
85639+++ b/include/linux/uidgid.h
85640@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
85641
85642 #endif /* CONFIG_USER_NS */
85643
85644+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
85645+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
85646+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
85647+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
85648+
85649 #endif /* _LINUX_UIDGID_H */
85650diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
85651index 32c0e83..671eb35 100644
85652--- a/include/linux/uio_driver.h
85653+++ b/include/linux/uio_driver.h
85654@@ -67,7 +67,7 @@ struct uio_device {
85655 struct module *owner;
85656 struct device *dev;
85657 int minor;
85658- atomic_t event;
85659+ atomic_unchecked_t event;
85660 struct fasync_struct *async_queue;
85661 wait_queue_head_t wait;
85662 struct uio_info *info;
85663diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
85664index 99c1b4d..562e6f3 100644
85665--- a/include/linux/unaligned/access_ok.h
85666+++ b/include/linux/unaligned/access_ok.h
85667@@ -4,34 +4,34 @@
85668 #include <linux/kernel.h>
85669 #include <asm/byteorder.h>
85670
85671-static inline u16 get_unaligned_le16(const void *p)
85672+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
85673 {
85674- return le16_to_cpup((__le16 *)p);
85675+ return le16_to_cpup((const __le16 *)p);
85676 }
85677
85678-static inline u32 get_unaligned_le32(const void *p)
85679+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
85680 {
85681- return le32_to_cpup((__le32 *)p);
85682+ return le32_to_cpup((const __le32 *)p);
85683 }
85684
85685-static inline u64 get_unaligned_le64(const void *p)
85686+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
85687 {
85688- return le64_to_cpup((__le64 *)p);
85689+ return le64_to_cpup((const __le64 *)p);
85690 }
85691
85692-static inline u16 get_unaligned_be16(const void *p)
85693+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
85694 {
85695- return be16_to_cpup((__be16 *)p);
85696+ return be16_to_cpup((const __be16 *)p);
85697 }
85698
85699-static inline u32 get_unaligned_be32(const void *p)
85700+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
85701 {
85702- return be32_to_cpup((__be32 *)p);
85703+ return be32_to_cpup((const __be32 *)p);
85704 }
85705
85706-static inline u64 get_unaligned_be64(const void *p)
85707+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
85708 {
85709- return be64_to_cpup((__be64 *)p);
85710+ return be64_to_cpup((const __be64 *)p);
85711 }
85712
85713 static inline void put_unaligned_le16(u16 val, void *p)
85714diff --git a/include/linux/usb.h b/include/linux/usb.h
85715index 058a769..c17a1c2c 100644
85716--- a/include/linux/usb.h
85717+++ b/include/linux/usb.h
85718@@ -566,7 +566,7 @@ struct usb_device {
85719 int maxchild;
85720
85721 u32 quirks;
85722- atomic_t urbnum;
85723+ atomic_unchecked_t urbnum;
85724
85725 unsigned long active_duration;
85726
85727@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
85728
85729 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
85730 __u8 request, __u8 requesttype, __u16 value, __u16 index,
85731- void *data, __u16 size, int timeout);
85732+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
85733 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
85734 void *data, int len, int *actual_length, int timeout);
85735 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
85736diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
85737index 9fd9e48..e2c5f35 100644
85738--- a/include/linux/usb/renesas_usbhs.h
85739+++ b/include/linux/usb/renesas_usbhs.h
85740@@ -39,7 +39,7 @@ enum {
85741 */
85742 struct renesas_usbhs_driver_callback {
85743 int (*notify_hotplug)(struct platform_device *pdev);
85744-};
85745+} __no_const;
85746
85747 /*
85748 * callback functions for platform
85749diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
85750index 8297e5b..0dfae27 100644
85751--- a/include/linux/user_namespace.h
85752+++ b/include/linux/user_namespace.h
85753@@ -39,7 +39,7 @@ struct user_namespace {
85754 struct key *persistent_keyring_register;
85755 struct rw_semaphore persistent_keyring_register_sem;
85756 #endif
85757-};
85758+} __randomize_layout;
85759
85760 extern struct user_namespace init_user_ns;
85761
85762diff --git a/include/linux/utsname.h b/include/linux/utsname.h
85763index 5093f58..c103e58 100644
85764--- a/include/linux/utsname.h
85765+++ b/include/linux/utsname.h
85766@@ -25,7 +25,7 @@ struct uts_namespace {
85767 struct new_utsname name;
85768 struct user_namespace *user_ns;
85769 struct ns_common ns;
85770-};
85771+} __randomize_layout;
85772 extern struct uts_namespace init_uts_ns;
85773
85774 #ifdef CONFIG_UTS_NS
85775diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
85776index 6f8fbcf..4efc177 100644
85777--- a/include/linux/vermagic.h
85778+++ b/include/linux/vermagic.h
85779@@ -25,9 +25,42 @@
85780 #define MODULE_ARCH_VERMAGIC ""
85781 #endif
85782
85783+#ifdef CONFIG_PAX_REFCOUNT
85784+#define MODULE_PAX_REFCOUNT "REFCOUNT "
85785+#else
85786+#define MODULE_PAX_REFCOUNT ""
85787+#endif
85788+
85789+#ifdef CONSTIFY_PLUGIN
85790+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
85791+#else
85792+#define MODULE_CONSTIFY_PLUGIN ""
85793+#endif
85794+
85795+#ifdef STACKLEAK_PLUGIN
85796+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
85797+#else
85798+#define MODULE_STACKLEAK_PLUGIN ""
85799+#endif
85800+
85801+#ifdef RANDSTRUCT_PLUGIN
85802+#include <generated/randomize_layout_hash.h>
85803+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
85804+#else
85805+#define MODULE_RANDSTRUCT_PLUGIN
85806+#endif
85807+
85808+#ifdef CONFIG_GRKERNSEC
85809+#define MODULE_GRSEC "GRSEC "
85810+#else
85811+#define MODULE_GRSEC ""
85812+#endif
85813+
85814 #define VERMAGIC_STRING \
85815 UTS_RELEASE " " \
85816 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
85817 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
85818- MODULE_ARCH_VERMAGIC
85819+ MODULE_ARCH_VERMAGIC \
85820+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
85821+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
85822
85823diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
85824index b483abd..af305ad 100644
85825--- a/include/linux/vga_switcheroo.h
85826+++ b/include/linux/vga_switcheroo.h
85827@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
85828
85829 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
85830
85831-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
85832+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
85833 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
85834-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
85835+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
85836 #else
85837
85838 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
85839@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
85840
85841 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
85842
85843-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85844+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85845 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
85846-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85847+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85848
85849 #endif
85850 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
85851diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
85852index b87696f..1d11de7 100644
85853--- a/include/linux/vmalloc.h
85854+++ b/include/linux/vmalloc.h
85855@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
85856 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
85857 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
85858 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
85859+
85860+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85861+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
85862+#endif
85863+
85864 /* bits [20..32] reserved for arch specific ioremap internals */
85865
85866 /*
85867@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
85868 unsigned long flags, pgprot_t prot);
85869 extern void vunmap(const void *addr);
85870
85871+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85872+extern void unmap_process_stacks(struct task_struct *task);
85873+#endif
85874+
85875 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
85876 unsigned long uaddr, void *kaddr,
85877 unsigned long size);
85878@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
85879
85880 /* for /dev/kmem */
85881 extern long vread(char *buf, char *addr, unsigned long count);
85882-extern long vwrite(char *buf, char *addr, unsigned long count);
85883+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
85884
85885 /*
85886 * Internals. Dont't use..
85887diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
85888index 82e7db7..f8ce3d0 100644
85889--- a/include/linux/vmstat.h
85890+++ b/include/linux/vmstat.h
85891@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
85892 /*
85893 * Zone based page accounting with per cpu differentials.
85894 */
85895-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85896+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85897
85898 static inline void zone_page_state_add(long x, struct zone *zone,
85899 enum zone_stat_item item)
85900 {
85901- atomic_long_add(x, &zone->vm_stat[item]);
85902- atomic_long_add(x, &vm_stat[item]);
85903+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
85904+ atomic_long_add_unchecked(x, &vm_stat[item]);
85905 }
85906
85907-static inline unsigned long global_page_state(enum zone_stat_item item)
85908+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
85909 {
85910- long x = atomic_long_read(&vm_stat[item]);
85911+ long x = atomic_long_read_unchecked(&vm_stat[item]);
85912 #ifdef CONFIG_SMP
85913 if (x < 0)
85914 x = 0;
85915@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
85916 return x;
85917 }
85918
85919-static inline unsigned long zone_page_state(struct zone *zone,
85920+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
85921 enum zone_stat_item item)
85922 {
85923- long x = atomic_long_read(&zone->vm_stat[item]);
85924+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
85925 #ifdef CONFIG_SMP
85926 if (x < 0)
85927 x = 0;
85928@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
85929 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
85930 enum zone_stat_item item)
85931 {
85932- long x = atomic_long_read(&zone->vm_stat[item]);
85933+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
85934
85935 #ifdef CONFIG_SMP
85936 int cpu;
85937@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
85938
85939 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
85940 {
85941- atomic_long_inc(&zone->vm_stat[item]);
85942- atomic_long_inc(&vm_stat[item]);
85943+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
85944+ atomic_long_inc_unchecked(&vm_stat[item]);
85945 }
85946
85947 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
85948 {
85949- atomic_long_dec(&zone->vm_stat[item]);
85950- atomic_long_dec(&vm_stat[item]);
85951+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
85952+ atomic_long_dec_unchecked(&vm_stat[item]);
85953 }
85954
85955 static inline void __inc_zone_page_state(struct page *page,
85956diff --git a/include/linux/xattr.h b/include/linux/xattr.h
85957index 91b0a68..0e9adf6 100644
85958--- a/include/linux/xattr.h
85959+++ b/include/linux/xattr.h
85960@@ -28,7 +28,7 @@ struct xattr_handler {
85961 size_t size, int handler_flags);
85962 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
85963 size_t size, int flags, int handler_flags);
85964-};
85965+} __do_const;
85966
85967 struct xattr {
85968 const char *name;
85969@@ -37,6 +37,9 @@ struct xattr {
85970 };
85971
85972 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
85973+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
85974+ssize_t pax_getxattr(struct dentry *, void *, size_t);
85975+#endif
85976 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
85977 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
85978 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
85979diff --git a/include/linux/zlib.h b/include/linux/zlib.h
85980index 92dbbd3..13ab0b3 100644
85981--- a/include/linux/zlib.h
85982+++ b/include/linux/zlib.h
85983@@ -31,6 +31,7 @@
85984 #define _ZLIB_H
85985
85986 #include <linux/zconf.h>
85987+#include <linux/compiler.h>
85988
85989 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
85990 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
85991@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
85992
85993 /* basic functions */
85994
85995-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
85996+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
85997 /*
85998 Returns the number of bytes that needs to be allocated for a per-
85999 stream workspace with the specified parameters. A pointer to this
86000diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86001index eb76cfd..9fd0e7c 100644
86002--- a/include/media/v4l2-dev.h
86003+++ b/include/media/v4l2-dev.h
86004@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86005 int (*mmap) (struct file *, struct vm_area_struct *);
86006 int (*open) (struct file *);
86007 int (*release) (struct file *);
86008-};
86009+} __do_const;
86010
86011 /*
86012 * Newer version of video_device, handled by videodev2.c
86013diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86014index ffb69da..040393e 100644
86015--- a/include/media/v4l2-device.h
86016+++ b/include/media/v4l2-device.h
86017@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86018 this function returns 0. If the name ends with a digit (e.g. cx18),
86019 then the name will be set to cx18-0 since cx180 looks really odd. */
86020 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86021- atomic_t *instance);
86022+ atomic_unchecked_t *instance);
86023
86024 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86025 Since the parent disappears this ensures that v4l2_dev doesn't have an
86026diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86027index 2a25dec..bf6dd8a 100644
86028--- a/include/net/9p/transport.h
86029+++ b/include/net/9p/transport.h
86030@@ -62,7 +62,7 @@ struct p9_trans_module {
86031 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86032 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86033 char *, char *, int , int, int, int);
86034-};
86035+} __do_const;
86036
86037 void v9fs_register_trans(struct p9_trans_module *m);
86038 void v9fs_unregister_trans(struct p9_trans_module *m);
86039diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86040index a175ba4..196eb8242 100644
86041--- a/include/net/af_unix.h
86042+++ b/include/net/af_unix.h
86043@@ -36,7 +36,7 @@ struct unix_skb_parms {
86044 u32 secid; /* Security ID */
86045 #endif
86046 u32 consumed;
86047-};
86048+} __randomize_layout;
86049
86050 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86051 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86052diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86053index d1bb342..e12f7d2 100644
86054--- a/include/net/bluetooth/l2cap.h
86055+++ b/include/net/bluetooth/l2cap.h
86056@@ -608,7 +608,7 @@ struct l2cap_ops {
86057 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
86058 unsigned long hdr_len,
86059 unsigned long len, int nb);
86060-};
86061+} __do_const;
86062
86063 struct l2cap_conn {
86064 struct hci_conn *hcon;
86065diff --git a/include/net/bonding.h b/include/net/bonding.h
86066index 983a94b..7aa9b16 100644
86067--- a/include/net/bonding.h
86068+++ b/include/net/bonding.h
86069@@ -647,7 +647,7 @@ extern struct rtnl_link_ops bond_link_ops;
86070
86071 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
86072 {
86073- atomic_long_inc(&dev->tx_dropped);
86074+ atomic_long_inc_unchecked(&dev->tx_dropped);
86075 dev_kfree_skb_any(skb);
86076 }
86077
86078diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86079index f2ae33d..c457cf0 100644
86080--- a/include/net/caif/cfctrl.h
86081+++ b/include/net/caif/cfctrl.h
86082@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86083 void (*radioset_rsp)(void);
86084 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86085 struct cflayer *client_layer);
86086-};
86087+} __no_const;
86088
86089 /* Link Setup Parameters for CAIF-Links. */
86090 struct cfctrl_link_param {
86091@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86092 struct cfctrl {
86093 struct cfsrvl serv;
86094 struct cfctrl_rsp res;
86095- atomic_t req_seq_no;
86096- atomic_t rsp_seq_no;
86097+ atomic_unchecked_t req_seq_no;
86098+ atomic_unchecked_t rsp_seq_no;
86099 struct list_head list;
86100 /* Protects from simultaneous access to first_req list */
86101 spinlock_t info_list_lock;
86102diff --git a/include/net/flow.h b/include/net/flow.h
86103index 8109a15..504466d 100644
86104--- a/include/net/flow.h
86105+++ b/include/net/flow.h
86106@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86107
86108 void flow_cache_flush(struct net *net);
86109 void flow_cache_flush_deferred(struct net *net);
86110-extern atomic_t flow_cache_genid;
86111+extern atomic_unchecked_t flow_cache_genid;
86112
86113 #endif
86114diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86115index 6c92415..3a352d8 100644
86116--- a/include/net/genetlink.h
86117+++ b/include/net/genetlink.h
86118@@ -130,7 +130,7 @@ struct genl_ops {
86119 u8 cmd;
86120 u8 internal_flags;
86121 u8 flags;
86122-};
86123+} __do_const;
86124
86125 int __genl_register_family(struct genl_family *family);
86126
86127diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86128index 734d9b5..48a9a4b 100644
86129--- a/include/net/gro_cells.h
86130+++ b/include/net/gro_cells.h
86131@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86132 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
86133
86134 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86135- atomic_long_inc(&dev->rx_dropped);
86136+ atomic_long_inc_unchecked(&dev->rx_dropped);
86137 kfree_skb(skb);
86138 return;
86139 }
86140diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86141index 848e85c..051c7de 100644
86142--- a/include/net/inet_connection_sock.h
86143+++ b/include/net/inet_connection_sock.h
86144@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86145 int (*bind_conflict)(const struct sock *sk,
86146 const struct inet_bind_bucket *tb, bool relax);
86147 void (*mtu_reduced)(struct sock *sk);
86148-};
86149+} __do_const;
86150
86151 /** inet_connection_sock - INET connection oriented sock
86152 *
86153diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86154index 80479ab..0c3f647 100644
86155--- a/include/net/inetpeer.h
86156+++ b/include/net/inetpeer.h
86157@@ -47,7 +47,7 @@ struct inet_peer {
86158 */
86159 union {
86160 struct {
86161- atomic_t rid; /* Frag reception counter */
86162+ atomic_unchecked_t rid; /* Frag reception counter */
86163 };
86164 struct rcu_head rcu;
86165 struct inet_peer *gc_next;
86166diff --git a/include/net/ip.h b/include/net/ip.h
86167index 09cf5ae..ab62fcf 100644
86168--- a/include/net/ip.h
86169+++ b/include/net/ip.h
86170@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86171 }
86172 }
86173
86174-u32 ip_idents_reserve(u32 hash, int segs);
86175+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86176 void __ip_select_ident(struct iphdr *iph, int segs);
86177
86178 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86179diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86180index 09a819e..3ab9e14 100644
86181--- a/include/net/ip_fib.h
86182+++ b/include/net/ip_fib.h
86183@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86184
86185 #define FIB_RES_SADDR(net, res) \
86186 ((FIB_RES_NH(res).nh_saddr_genid == \
86187- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86188+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86189 FIB_RES_NH(res).nh_saddr : \
86190 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86191 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86192diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86193index 615b20b..fd4cbd8 100644
86194--- a/include/net/ip_vs.h
86195+++ b/include/net/ip_vs.h
86196@@ -534,7 +534,7 @@ struct ip_vs_conn {
86197 struct ip_vs_conn *control; /* Master control connection */
86198 atomic_t n_control; /* Number of controlled ones */
86199 struct ip_vs_dest *dest; /* real server */
86200- atomic_t in_pkts; /* incoming packet counter */
86201+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86202
86203 /* Packet transmitter for different forwarding methods. If it
86204 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
86205@@ -682,7 +682,7 @@ struct ip_vs_dest {
86206 __be16 port; /* port number of the server */
86207 union nf_inet_addr addr; /* IP address of the server */
86208 volatile unsigned int flags; /* dest status flags */
86209- atomic_t conn_flags; /* flags to copy to conn */
86210+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86211 atomic_t weight; /* server weight */
86212
86213 atomic_t refcnt; /* reference counter */
86214@@ -928,11 +928,11 @@ struct netns_ipvs {
86215 /* ip_vs_lblc */
86216 int sysctl_lblc_expiration;
86217 struct ctl_table_header *lblc_ctl_header;
86218- struct ctl_table *lblc_ctl_table;
86219+ ctl_table_no_const *lblc_ctl_table;
86220 /* ip_vs_lblcr */
86221 int sysctl_lblcr_expiration;
86222 struct ctl_table_header *lblcr_ctl_header;
86223- struct ctl_table *lblcr_ctl_table;
86224+ ctl_table_no_const *lblcr_ctl_table;
86225 /* ip_vs_est */
86226 struct list_head est_list; /* estimator list */
86227 spinlock_t est_lock;
86228diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86229index 8d4f588..2e37ad2 100644
86230--- a/include/net/irda/ircomm_tty.h
86231+++ b/include/net/irda/ircomm_tty.h
86232@@ -33,6 +33,7 @@
86233 #include <linux/termios.h>
86234 #include <linux/timer.h>
86235 #include <linux/tty.h> /* struct tty_struct */
86236+#include <asm/local.h>
86237
86238 #include <net/irda/irias_object.h>
86239 #include <net/irda/ircomm_core.h>
86240diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86241index 714cc9a..ea05f3e 100644
86242--- a/include/net/iucv/af_iucv.h
86243+++ b/include/net/iucv/af_iucv.h
86244@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86245 struct iucv_sock_list {
86246 struct hlist_head head;
86247 rwlock_t lock;
86248- atomic_t autobind_name;
86249+ atomic_unchecked_t autobind_name;
86250 };
86251
86252 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86253diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86254index f3be818..bf46196 100644
86255--- a/include/net/llc_c_ac.h
86256+++ b/include/net/llc_c_ac.h
86257@@ -87,7 +87,7 @@
86258 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86259 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86260
86261-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86262+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86263
86264 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86265 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86266diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86267index 3948cf1..83b28c4 100644
86268--- a/include/net/llc_c_ev.h
86269+++ b/include/net/llc_c_ev.h
86270@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86271 return (struct llc_conn_state_ev *)skb->cb;
86272 }
86273
86274-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86275-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86276+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86277+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86278
86279 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86280 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86281diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86282index 48f3f89..0e92c50 100644
86283--- a/include/net/llc_c_st.h
86284+++ b/include/net/llc_c_st.h
86285@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86286 u8 next_state;
86287 const llc_conn_ev_qfyr_t *ev_qualifiers;
86288 const llc_conn_action_t *ev_actions;
86289-};
86290+} __do_const;
86291
86292 struct llc_conn_state {
86293 u8 current_state;
86294diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86295index a61b98c..aade1eb 100644
86296--- a/include/net/llc_s_ac.h
86297+++ b/include/net/llc_s_ac.h
86298@@ -23,7 +23,7 @@
86299 #define SAP_ACT_TEST_IND 9
86300
86301 /* All action functions must look like this */
86302-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86303+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86304
86305 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86306 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86307diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86308index c4359e2..76dbc4a 100644
86309--- a/include/net/llc_s_st.h
86310+++ b/include/net/llc_s_st.h
86311@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86312 llc_sap_ev_t ev;
86313 u8 next_state;
86314 const llc_sap_action_t *ev_actions;
86315-};
86316+} __do_const;
86317
86318 struct llc_sap_state {
86319 u8 curr_state;
86320diff --git a/include/net/mac80211.h b/include/net/mac80211.h
86321index 29c7be8..746bd73 100644
86322--- a/include/net/mac80211.h
86323+++ b/include/net/mac80211.h
86324@@ -4869,7 +4869,7 @@ struct rate_control_ops {
86325 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
86326
86327 u32 (*get_expected_throughput)(void *priv_sta);
86328-};
86329+} __do_const;
86330
86331 static inline int rate_supported(struct ieee80211_sta *sta,
86332 enum ieee80211_band band,
86333diff --git a/include/net/neighbour.h b/include/net/neighbour.h
86334index 76f7084..8f36e39 100644
86335--- a/include/net/neighbour.h
86336+++ b/include/net/neighbour.h
86337@@ -163,7 +163,7 @@ struct neigh_ops {
86338 void (*error_report)(struct neighbour *, struct sk_buff *);
86339 int (*output)(struct neighbour *, struct sk_buff *);
86340 int (*connected_output)(struct neighbour *, struct sk_buff *);
86341-};
86342+} __do_const;
86343
86344 struct pneigh_entry {
86345 struct pneigh_entry *next;
86346@@ -217,7 +217,7 @@ struct neigh_table {
86347 struct neigh_statistics __percpu *stats;
86348 struct neigh_hash_table __rcu *nht;
86349 struct pneigh_entry **phash_buckets;
86350-};
86351+} __randomize_layout;
86352
86353 enum {
86354 NEIGH_ARP_TABLE = 0,
86355diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
86356index 2e8756b8..0bd0083 100644
86357--- a/include/net/net_namespace.h
86358+++ b/include/net/net_namespace.h
86359@@ -130,8 +130,8 @@ struct net {
86360 struct netns_ipvs *ipvs;
86361 #endif
86362 struct sock *diag_nlsk;
86363- atomic_t fnhe_genid;
86364-};
86365+ atomic_unchecked_t fnhe_genid;
86366+} __randomize_layout;
86367
86368 #include <linux/seq_file_net.h>
86369
86370@@ -287,7 +287,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
86371 #define __net_init __init
86372 #define __net_exit __exit_refok
86373 #define __net_initdata __initdata
86374+#ifdef CONSTIFY_PLUGIN
86375 #define __net_initconst __initconst
86376+#else
86377+#define __net_initconst __initdata
86378+#endif
86379 #endif
86380
86381 struct pernet_operations {
86382@@ -297,7 +301,7 @@ struct pernet_operations {
86383 void (*exit_batch)(struct list_head *net_exit_list);
86384 int *id;
86385 size_t size;
86386-};
86387+} __do_const;
86388
86389 /*
86390 * Use these carefully. If you implement a network device and it
86391@@ -345,12 +349,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
86392
86393 static inline int rt_genid_ipv4(struct net *net)
86394 {
86395- return atomic_read(&net->ipv4.rt_genid);
86396+ return atomic_read_unchecked(&net->ipv4.rt_genid);
86397 }
86398
86399 static inline void rt_genid_bump_ipv4(struct net *net)
86400 {
86401- atomic_inc(&net->ipv4.rt_genid);
86402+ atomic_inc_unchecked(&net->ipv4.rt_genid);
86403 }
86404
86405 extern void (*__fib6_flush_trees)(struct net *net);
86406@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
86407
86408 static inline int fnhe_genid(struct net *net)
86409 {
86410- return atomic_read(&net->fnhe_genid);
86411+ return atomic_read_unchecked(&net->fnhe_genid);
86412 }
86413
86414 static inline void fnhe_genid_bump(struct net *net)
86415 {
86416- atomic_inc(&net->fnhe_genid);
86417+ atomic_inc_unchecked(&net->fnhe_genid);
86418 }
86419
86420 #endif /* __NET_NET_NAMESPACE_H */
86421diff --git a/include/net/netlink.h b/include/net/netlink.h
86422index 6415835..ab96d87 100644
86423--- a/include/net/netlink.h
86424+++ b/include/net/netlink.h
86425@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
86426 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
86427 {
86428 if (mark)
86429- skb_trim(skb, (unsigned char *) mark - skb->data);
86430+ skb_trim(skb, (const unsigned char *) mark - skb->data);
86431 }
86432
86433 /**
86434diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
86435index 29d6a94..235d3d84 100644
86436--- a/include/net/netns/conntrack.h
86437+++ b/include/net/netns/conntrack.h
86438@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
86439 struct nf_proto_net {
86440 #ifdef CONFIG_SYSCTL
86441 struct ctl_table_header *ctl_table_header;
86442- struct ctl_table *ctl_table;
86443+ ctl_table_no_const *ctl_table;
86444 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
86445 struct ctl_table_header *ctl_compat_header;
86446- struct ctl_table *ctl_compat_table;
86447+ ctl_table_no_const *ctl_compat_table;
86448 #endif
86449 #endif
86450 unsigned int users;
86451@@ -60,7 +60,7 @@ struct nf_ip_net {
86452 struct nf_icmp_net icmpv6;
86453 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
86454 struct ctl_table_header *ctl_table_header;
86455- struct ctl_table *ctl_table;
86456+ ctl_table_no_const *ctl_table;
86457 #endif
86458 };
86459
86460diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
86461index 0ffef1a..2ce1ceb 100644
86462--- a/include/net/netns/ipv4.h
86463+++ b/include/net/netns/ipv4.h
86464@@ -84,7 +84,7 @@ struct netns_ipv4 {
86465
86466 struct ping_group_range ping_group_range;
86467
86468- atomic_t dev_addr_genid;
86469+ atomic_unchecked_t dev_addr_genid;
86470
86471 #ifdef CONFIG_SYSCTL
86472 unsigned long *sysctl_local_reserved_ports;
86473@@ -98,6 +98,6 @@ struct netns_ipv4 {
86474 struct fib_rules_ops *mr_rules_ops;
86475 #endif
86476 #endif
86477- atomic_t rt_genid;
86478+ atomic_unchecked_t rt_genid;
86479 };
86480 #endif
86481diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
86482index 69ae41f..4f94868 100644
86483--- a/include/net/netns/ipv6.h
86484+++ b/include/net/netns/ipv6.h
86485@@ -75,8 +75,8 @@ struct netns_ipv6 {
86486 struct fib_rules_ops *mr6_rules_ops;
86487 #endif
86488 #endif
86489- atomic_t dev_addr_genid;
86490- atomic_t fib6_sernum;
86491+ atomic_unchecked_t dev_addr_genid;
86492+ atomic_unchecked_t fib6_sernum;
86493 };
86494
86495 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
86496diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
86497index 730d82a..045f2c4 100644
86498--- a/include/net/netns/xfrm.h
86499+++ b/include/net/netns/xfrm.h
86500@@ -78,7 +78,7 @@ struct netns_xfrm {
86501
86502 /* flow cache part */
86503 struct flow_cache flow_cache_global;
86504- atomic_t flow_cache_genid;
86505+ atomic_unchecked_t flow_cache_genid;
86506 struct list_head flow_cache_gc_list;
86507 spinlock_t flow_cache_gc_lock;
86508 struct work_struct flow_cache_gc_work;
86509diff --git a/include/net/ping.h b/include/net/ping.h
86510index f074060..830fba0 100644
86511--- a/include/net/ping.h
86512+++ b/include/net/ping.h
86513@@ -54,7 +54,7 @@ struct ping_iter_state {
86514
86515 extern struct proto ping_prot;
86516 #if IS_ENABLED(CONFIG_IPV6)
86517-extern struct pingv6_ops pingv6_ops;
86518+extern struct pingv6_ops *pingv6_ops;
86519 #endif
86520
86521 struct pingfakehdr {
86522diff --git a/include/net/protocol.h b/include/net/protocol.h
86523index d6fcc1f..ca277058 100644
86524--- a/include/net/protocol.h
86525+++ b/include/net/protocol.h
86526@@ -49,7 +49,7 @@ struct net_protocol {
86527 * socket lookup?
86528 */
86529 icmp_strict_tag_validation:1;
86530-};
86531+} __do_const;
86532
86533 #if IS_ENABLED(CONFIG_IPV6)
86534 struct inet6_protocol {
86535@@ -62,7 +62,7 @@ struct inet6_protocol {
86536 u8 type, u8 code, int offset,
86537 __be32 info);
86538 unsigned int flags; /* INET6_PROTO_xxx */
86539-};
86540+} __do_const;
86541
86542 #define INET6_PROTO_NOPOLICY 0x1
86543 #define INET6_PROTO_FINAL 0x2
86544diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
86545index e21b9f9..0191ef0 100644
86546--- a/include/net/rtnetlink.h
86547+++ b/include/net/rtnetlink.h
86548@@ -93,7 +93,7 @@ struct rtnl_link_ops {
86549 int (*fill_slave_info)(struct sk_buff *skb,
86550 const struct net_device *dev,
86551 const struct net_device *slave_dev);
86552-};
86553+} __do_const;
86554
86555 int __rtnl_link_register(struct rtnl_link_ops *ops);
86556 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
86557diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
86558index 4a5b9a3..ca27d73 100644
86559--- a/include/net/sctp/checksum.h
86560+++ b/include/net/sctp/checksum.h
86561@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
86562 unsigned int offset)
86563 {
86564 struct sctphdr *sh = sctp_hdr(skb);
86565- __le32 ret, old = sh->checksum;
86566- const struct skb_checksum_ops ops = {
86567+ __le32 ret, old = sh->checksum;
86568+ static const struct skb_checksum_ops ops = {
86569 .update = sctp_csum_update,
86570 .combine = sctp_csum_combine,
86571 };
86572diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
86573index 487ef34..d457f98 100644
86574--- a/include/net/sctp/sm.h
86575+++ b/include/net/sctp/sm.h
86576@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
86577 typedef struct {
86578 sctp_state_fn_t *fn;
86579 const char *name;
86580-} sctp_sm_table_entry_t;
86581+} __do_const sctp_sm_table_entry_t;
86582
86583 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
86584 * currently in use.
86585@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
86586 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
86587
86588 /* Extern declarations for major data structures. */
86589-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86590+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86591
86592
86593 /* Get the size of a DATA chunk payload. */
86594diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
86595index 2bb2fcf..d17c291 100644
86596--- a/include/net/sctp/structs.h
86597+++ b/include/net/sctp/structs.h
86598@@ -509,7 +509,7 @@ struct sctp_pf {
86599 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
86600 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
86601 struct sctp_af *af;
86602-};
86603+} __do_const;
86604
86605
86606 /* Structure to track chunk fragments that have been acked, but peer
86607diff --git a/include/net/sock.h b/include/net/sock.h
86608index 2210fec..2249ad0 100644
86609--- a/include/net/sock.h
86610+++ b/include/net/sock.h
86611@@ -362,7 +362,7 @@ struct sock {
86612 unsigned int sk_napi_id;
86613 unsigned int sk_ll_usec;
86614 #endif
86615- atomic_t sk_drops;
86616+ atomic_unchecked_t sk_drops;
86617 int sk_rcvbuf;
86618
86619 struct sk_filter __rcu *sk_filter;
86620@@ -1061,7 +1061,7 @@ struct proto {
86621 void (*destroy_cgroup)(struct mem_cgroup *memcg);
86622 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
86623 #endif
86624-};
86625+} __randomize_layout;
86626
86627 /*
86628 * Bits in struct cg_proto.flags
86629@@ -1239,7 +1239,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
86630 page_counter_uncharge(&prot->memory_allocated, amt);
86631 }
86632
86633-static inline long
86634+static inline long __intentional_overflow(-1)
86635 sk_memory_allocated(const struct sock *sk)
86636 {
86637 struct proto *prot = sk->sk_prot;
86638@@ -1385,7 +1385,7 @@ struct sock_iocb {
86639 struct scm_cookie *scm;
86640 struct msghdr *msg, async_msg;
86641 struct kiocb *kiocb;
86642-};
86643+} __randomize_layout;
86644
86645 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
86646 {
86647@@ -1826,7 +1826,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
86648 }
86649
86650 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
86651- char __user *from, char *to,
86652+ char __user *from, unsigned char *to,
86653 int copy, int offset)
86654 {
86655 if (skb->ip_summed == CHECKSUM_NONE) {
86656@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
86657 }
86658 }
86659
86660-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86661+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86662
86663 /**
86664 * sk_page_frag - return an appropriate page_frag
86665diff --git a/include/net/tcp.h b/include/net/tcp.h
86666index 9d9111e..349c847 100644
86667--- a/include/net/tcp.h
86668+++ b/include/net/tcp.h
86669@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
86670 void tcp_xmit_retransmit_queue(struct sock *);
86671 void tcp_simple_retransmit(struct sock *);
86672 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
86673-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86674+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86675
86676 void tcp_send_probe0(struct sock *);
86677 void tcp_send_partial(struct sock *);
86678@@ -689,8 +689,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
86679 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
86680 */
86681 struct tcp_skb_cb {
86682- __u32 seq; /* Starting sequence number */
86683- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
86684+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
86685+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
86686 union {
86687 /* Note : tcp_tw_isn is used in input path only
86688 * (isn chosen by tcp_timewait_state_process())
86689@@ -715,7 +715,7 @@ struct tcp_skb_cb {
86690
86691 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
86692 /* 1 byte hole */
86693- __u32 ack_seq; /* Sequence number ACK'd */
86694+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
86695 union {
86696 struct inet_skb_parm h4;
86697 #if IS_ENABLED(CONFIG_IPV6)
86698diff --git a/include/net/xfrm.h b/include/net/xfrm.h
86699index dc4865e..152ee4c 100644
86700--- a/include/net/xfrm.h
86701+++ b/include/net/xfrm.h
86702@@ -285,7 +285,6 @@ struct xfrm_dst;
86703 struct xfrm_policy_afinfo {
86704 unsigned short family;
86705 struct dst_ops *dst_ops;
86706- void (*garbage_collect)(struct net *net);
86707 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
86708 const xfrm_address_t *saddr,
86709 const xfrm_address_t *daddr);
86710@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
86711 struct net_device *dev,
86712 const struct flowi *fl);
86713 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
86714-};
86715+} __do_const;
86716
86717 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
86718 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
86719@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
86720 int (*transport_finish)(struct sk_buff *skb,
86721 int async);
86722 void (*local_error)(struct sk_buff *skb, u32 mtu);
86723-};
86724+} __do_const;
86725
86726 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
86727 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
86728@@ -437,7 +436,7 @@ struct xfrm_mode {
86729 struct module *owner;
86730 unsigned int encap;
86731 int flags;
86732-};
86733+} __do_const;
86734
86735 /* Flags for xfrm_mode. */
86736 enum {
86737@@ -534,7 +533,7 @@ struct xfrm_policy {
86738 struct timer_list timer;
86739
86740 struct flow_cache_object flo;
86741- atomic_t genid;
86742+ atomic_unchecked_t genid;
86743 u32 priority;
86744 u32 index;
86745 struct xfrm_mark mark;
86746@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
86747 }
86748
86749 void xfrm_garbage_collect(struct net *net);
86750+void xfrm_garbage_collect_deferred(struct net *net);
86751
86752 #else
86753
86754@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
86755 static inline void xfrm_garbage_collect(struct net *net)
86756 {
86757 }
86758+static inline void xfrm_garbage_collect_deferred(struct net *net)
86759+{
86760+}
86761 #endif
86762
86763 static __inline__
86764diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
86765index 1017e0b..227aa4d 100644
86766--- a/include/rdma/iw_cm.h
86767+++ b/include/rdma/iw_cm.h
86768@@ -122,7 +122,7 @@ struct iw_cm_verbs {
86769 int backlog);
86770
86771 int (*destroy_listen)(struct iw_cm_id *cm_id);
86772-};
86773+} __no_const;
86774
86775 /**
86776 * iw_create_cm_id - Create an IW CM identifier.
86777diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
86778index 93d14da..734b3d8 100644
86779--- a/include/scsi/libfc.h
86780+++ b/include/scsi/libfc.h
86781@@ -771,6 +771,7 @@ struct libfc_function_template {
86782 */
86783 void (*disc_stop_final) (struct fc_lport *);
86784 };
86785+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
86786
86787 /**
86788 * struct fc_disc - Discovery context
86789@@ -875,7 +876,7 @@ struct fc_lport {
86790 struct fc_vport *vport;
86791
86792 /* Operational Information */
86793- struct libfc_function_template tt;
86794+ libfc_function_template_no_const tt;
86795 u8 link_up;
86796 u8 qfull;
86797 enum fc_lport_state state;
86798diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
86799index 3a4edd1..feb2e3e 100644
86800--- a/include/scsi/scsi_device.h
86801+++ b/include/scsi/scsi_device.h
86802@@ -185,9 +185,9 @@ struct scsi_device {
86803 unsigned int max_device_blocked; /* what device_blocked counts down from */
86804 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
86805
86806- atomic_t iorequest_cnt;
86807- atomic_t iodone_cnt;
86808- atomic_t ioerr_cnt;
86809+ atomic_unchecked_t iorequest_cnt;
86810+ atomic_unchecked_t iodone_cnt;
86811+ atomic_unchecked_t ioerr_cnt;
86812
86813 struct device sdev_gendev,
86814 sdev_dev;
86815diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
86816index 007a0bc..7188db8 100644
86817--- a/include/scsi/scsi_transport_fc.h
86818+++ b/include/scsi/scsi_transport_fc.h
86819@@ -756,7 +756,8 @@ struct fc_function_template {
86820 unsigned long show_host_system_hostname:1;
86821
86822 unsigned long disable_target_scan:1;
86823-};
86824+} __do_const;
86825+typedef struct fc_function_template __no_const fc_function_template_no_const;
86826
86827
86828 /**
86829diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
86830index 396e8f7..b037e89 100644
86831--- a/include/sound/compress_driver.h
86832+++ b/include/sound/compress_driver.h
86833@@ -129,7 +129,7 @@ struct snd_compr_ops {
86834 struct snd_compr_caps *caps);
86835 int (*get_codec_caps) (struct snd_compr_stream *stream,
86836 struct snd_compr_codec_caps *codec);
86837-};
86838+} __no_const;
86839
86840 /**
86841 * struct snd_compr: Compressed device
86842diff --git a/include/sound/soc.h b/include/sound/soc.h
86843index ac8b333..59c3692 100644
86844--- a/include/sound/soc.h
86845+++ b/include/sound/soc.h
86846@@ -853,7 +853,7 @@ struct snd_soc_codec_driver {
86847 enum snd_soc_dapm_type, int);
86848
86849 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
86850-};
86851+} __do_const;
86852
86853 /* SoC platform interface */
86854 struct snd_soc_platform_driver {
86855@@ -880,7 +880,7 @@ struct snd_soc_platform_driver {
86856 const struct snd_compr_ops *compr_ops;
86857
86858 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
86859-};
86860+} __do_const;
86861
86862 struct snd_soc_dai_link_component {
86863 const char *name;
86864diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
86865index 672150b..9d4bec4 100644
86866--- a/include/target/target_core_base.h
86867+++ b/include/target/target_core_base.h
86868@@ -767,7 +767,7 @@ struct se_device {
86869 atomic_long_t write_bytes;
86870 /* Active commands on this virtual SE device */
86871 atomic_t simple_cmds;
86872- atomic_t dev_ordered_id;
86873+ atomic_unchecked_t dev_ordered_id;
86874 atomic_t dev_ordered_sync;
86875 atomic_t dev_qf_count;
86876 int export_count;
86877diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
86878new file mode 100644
86879index 0000000..fb634b7
86880--- /dev/null
86881+++ b/include/trace/events/fs.h
86882@@ -0,0 +1,53 @@
86883+#undef TRACE_SYSTEM
86884+#define TRACE_SYSTEM fs
86885+
86886+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
86887+#define _TRACE_FS_H
86888+
86889+#include <linux/fs.h>
86890+#include <linux/tracepoint.h>
86891+
86892+TRACE_EVENT(do_sys_open,
86893+
86894+ TP_PROTO(const char *filename, int flags, int mode),
86895+
86896+ TP_ARGS(filename, flags, mode),
86897+
86898+ TP_STRUCT__entry(
86899+ __string( filename, filename )
86900+ __field( int, flags )
86901+ __field( int, mode )
86902+ ),
86903+
86904+ TP_fast_assign(
86905+ __assign_str(filename, filename);
86906+ __entry->flags = flags;
86907+ __entry->mode = mode;
86908+ ),
86909+
86910+ TP_printk("\"%s\" %x %o",
86911+ __get_str(filename), __entry->flags, __entry->mode)
86912+);
86913+
86914+TRACE_EVENT(open_exec,
86915+
86916+ TP_PROTO(const char *filename),
86917+
86918+ TP_ARGS(filename),
86919+
86920+ TP_STRUCT__entry(
86921+ __string( filename, filename )
86922+ ),
86923+
86924+ TP_fast_assign(
86925+ __assign_str(filename, filename);
86926+ ),
86927+
86928+ TP_printk("\"%s\"",
86929+ __get_str(filename))
86930+);
86931+
86932+#endif /* _TRACE_FS_H */
86933+
86934+/* This part must be outside protection */
86935+#include <trace/define_trace.h>
86936diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
86937index 3608beb..df39d8a 100644
86938--- a/include/trace/events/irq.h
86939+++ b/include/trace/events/irq.h
86940@@ -36,7 +36,7 @@ struct softirq_action;
86941 */
86942 TRACE_EVENT(irq_handler_entry,
86943
86944- TP_PROTO(int irq, struct irqaction *action),
86945+ TP_PROTO(int irq, const struct irqaction *action),
86946
86947 TP_ARGS(irq, action),
86948
86949@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
86950 */
86951 TRACE_EVENT(irq_handler_exit,
86952
86953- TP_PROTO(int irq, struct irqaction *action, int ret),
86954+ TP_PROTO(int irq, const struct irqaction *action, int ret),
86955
86956 TP_ARGS(irq, action, ret),
86957
86958diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
86959index 7caf44c..23c6f27 100644
86960--- a/include/uapi/linux/a.out.h
86961+++ b/include/uapi/linux/a.out.h
86962@@ -39,6 +39,14 @@ enum machine_type {
86963 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
86964 };
86965
86966+/* Constants for the N_FLAGS field */
86967+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
86968+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
86969+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
86970+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
86971+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
86972+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
86973+
86974 #if !defined (N_MAGIC)
86975 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
86976 #endif
86977diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
86978index 22b6ad3..aeba37e 100644
86979--- a/include/uapi/linux/bcache.h
86980+++ b/include/uapi/linux/bcache.h
86981@@ -5,6 +5,7 @@
86982 * Bcache on disk data structures
86983 */
86984
86985+#include <linux/compiler.h>
86986 #include <asm/types.h>
86987
86988 #define BITMASK(name, type, field, offset, size) \
86989@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
86990 /* Btree keys - all units are in sectors */
86991
86992 struct bkey {
86993- __u64 high;
86994- __u64 low;
86995+ __u64 high __intentional_overflow(-1);
86996+ __u64 low __intentional_overflow(-1);
86997 __u64 ptr[];
86998 };
86999
87000diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87001index d876736..ccce5c0 100644
87002--- a/include/uapi/linux/byteorder/little_endian.h
87003+++ b/include/uapi/linux/byteorder/little_endian.h
87004@@ -42,51 +42,51 @@
87005
87006 static inline __le64 __cpu_to_le64p(const __u64 *p)
87007 {
87008- return (__force __le64)*p;
87009+ return (__force const __le64)*p;
87010 }
87011-static inline __u64 __le64_to_cpup(const __le64 *p)
87012+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87013 {
87014- return (__force __u64)*p;
87015+ return (__force const __u64)*p;
87016 }
87017 static inline __le32 __cpu_to_le32p(const __u32 *p)
87018 {
87019- return (__force __le32)*p;
87020+ return (__force const __le32)*p;
87021 }
87022 static inline __u32 __le32_to_cpup(const __le32 *p)
87023 {
87024- return (__force __u32)*p;
87025+ return (__force const __u32)*p;
87026 }
87027 static inline __le16 __cpu_to_le16p(const __u16 *p)
87028 {
87029- return (__force __le16)*p;
87030+ return (__force const __le16)*p;
87031 }
87032 static inline __u16 __le16_to_cpup(const __le16 *p)
87033 {
87034- return (__force __u16)*p;
87035+ return (__force const __u16)*p;
87036 }
87037 static inline __be64 __cpu_to_be64p(const __u64 *p)
87038 {
87039- return (__force __be64)__swab64p(p);
87040+ return (__force const __be64)__swab64p(p);
87041 }
87042 static inline __u64 __be64_to_cpup(const __be64 *p)
87043 {
87044- return __swab64p((__u64 *)p);
87045+ return __swab64p((const __u64 *)p);
87046 }
87047 static inline __be32 __cpu_to_be32p(const __u32 *p)
87048 {
87049- return (__force __be32)__swab32p(p);
87050+ return (__force const __be32)__swab32p(p);
87051 }
87052-static inline __u32 __be32_to_cpup(const __be32 *p)
87053+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87054 {
87055- return __swab32p((__u32 *)p);
87056+ return __swab32p((const __u32 *)p);
87057 }
87058 static inline __be16 __cpu_to_be16p(const __u16 *p)
87059 {
87060- return (__force __be16)__swab16p(p);
87061+ return (__force const __be16)__swab16p(p);
87062 }
87063 static inline __u16 __be16_to_cpup(const __be16 *p)
87064 {
87065- return __swab16p((__u16 *)p);
87066+ return __swab16p((const __u16 *)p);
87067 }
87068 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87069 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87070diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87071index 71e1d0e..6cc9caf 100644
87072--- a/include/uapi/linux/elf.h
87073+++ b/include/uapi/linux/elf.h
87074@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87075 #define PT_GNU_EH_FRAME 0x6474e550
87076
87077 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87078+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87079+
87080+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87081+
87082+/* Constants for the e_flags field */
87083+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87084+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87085+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87086+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87087+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87088+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87089
87090 /*
87091 * Extended Numbering
87092@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87093 #define DT_DEBUG 21
87094 #define DT_TEXTREL 22
87095 #define DT_JMPREL 23
87096+#define DT_FLAGS 30
87097+ #define DF_TEXTREL 0x00000004
87098 #define DT_ENCODING 32
87099 #define OLD_DT_LOOS 0x60000000
87100 #define DT_LOOS 0x6000000d
87101@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87102 #define PF_W 0x2
87103 #define PF_X 0x1
87104
87105+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87106+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87107+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87108+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87109+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87110+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87111+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87112+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87113+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87114+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87115+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87116+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87117+
87118 typedef struct elf32_phdr{
87119 Elf32_Word p_type;
87120 Elf32_Off p_offset;
87121@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87122 #define EI_OSABI 7
87123 #define EI_PAD 8
87124
87125+#define EI_PAX 14
87126+
87127 #define ELFMAG0 0x7f /* EI_MAG */
87128 #define ELFMAG1 'E'
87129 #define ELFMAG2 'L'
87130diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87131index aa169c4..6a2771d 100644
87132--- a/include/uapi/linux/personality.h
87133+++ b/include/uapi/linux/personality.h
87134@@ -30,6 +30,7 @@ enum {
87135 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87136 ADDR_NO_RANDOMIZE | \
87137 ADDR_COMPAT_LAYOUT | \
87138+ ADDR_LIMIT_3GB | \
87139 MMAP_PAGE_ZERO)
87140
87141 /*
87142diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87143index 7530e74..e714828 100644
87144--- a/include/uapi/linux/screen_info.h
87145+++ b/include/uapi/linux/screen_info.h
87146@@ -43,7 +43,8 @@ struct screen_info {
87147 __u16 pages; /* 0x32 */
87148 __u16 vesa_attributes; /* 0x34 */
87149 __u32 capabilities; /* 0x36 */
87150- __u8 _reserved[6]; /* 0x3a */
87151+ __u16 vesapm_size; /* 0x3a */
87152+ __u8 _reserved[4]; /* 0x3c */
87153 } __attribute__((packed));
87154
87155 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87156diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87157index 0e011eb..82681b1 100644
87158--- a/include/uapi/linux/swab.h
87159+++ b/include/uapi/linux/swab.h
87160@@ -43,7 +43,7 @@
87161 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87162 */
87163
87164-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87165+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87166 {
87167 #ifdef __HAVE_BUILTIN_BSWAP16__
87168 return __builtin_bswap16(val);
87169@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87170 #endif
87171 }
87172
87173-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87174+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87175 {
87176 #ifdef __HAVE_BUILTIN_BSWAP32__
87177 return __builtin_bswap32(val);
87178@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87179 #endif
87180 }
87181
87182-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87183+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87184 {
87185 #ifdef __HAVE_BUILTIN_BSWAP64__
87186 return __builtin_bswap64(val);
87187diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87188index 1590c49..5eab462 100644
87189--- a/include/uapi/linux/xattr.h
87190+++ b/include/uapi/linux/xattr.h
87191@@ -73,5 +73,9 @@
87192 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87193 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87194
87195+/* User namespace */
87196+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87197+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87198+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87199
87200 #endif /* _UAPI_LINUX_XATTR_H */
87201diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87202index f9466fa..f4e2b81 100644
87203--- a/include/video/udlfb.h
87204+++ b/include/video/udlfb.h
87205@@ -53,10 +53,10 @@ struct dlfb_data {
87206 u32 pseudo_palette[256];
87207 int blank_mode; /*one of FB_BLANK_ */
87208 /* blit-only rendering path metrics, exposed through sysfs */
87209- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87210- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87211- atomic_t bytes_sent; /* to usb, after compression including overhead */
87212- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87213+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87214+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87215+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87216+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87217 };
87218
87219 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87220diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87221index 30f5362..8ed8ac9 100644
87222--- a/include/video/uvesafb.h
87223+++ b/include/video/uvesafb.h
87224@@ -122,6 +122,7 @@ struct uvesafb_par {
87225 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87226 u8 pmi_setpal; /* PMI for palette changes */
87227 u16 *pmi_base; /* protected mode interface location */
87228+ u8 *pmi_code; /* protected mode code location */
87229 void *pmi_start;
87230 void *pmi_pal;
87231 u8 *vbe_state_orig; /*
87232diff --git a/init/Kconfig b/init/Kconfig
87233index 9afb971..27d6fca 100644
87234--- a/init/Kconfig
87235+++ b/init/Kconfig
87236@@ -1129,6 +1129,7 @@ endif # CGROUPS
87237
87238 config CHECKPOINT_RESTORE
87239 bool "Checkpoint/restore support" if EXPERT
87240+ depends on !GRKERNSEC
87241 default n
87242 help
87243 Enables additional kernel features in a sake of checkpoint/restore.
87244@@ -1654,7 +1655,7 @@ config SLUB_DEBUG
87245
87246 config COMPAT_BRK
87247 bool "Disable heap randomization"
87248- default y
87249+ default n
87250 help
87251 Randomizing heap placement makes heap exploits harder, but it
87252 also breaks ancient binaries (including anything libc5 based).
87253@@ -1985,7 +1986,7 @@ config INIT_ALL_POSSIBLE
87254 config STOP_MACHINE
87255 bool
87256 default y
87257- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87258+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87259 help
87260 Need stop_machine() primitive.
87261
87262diff --git a/init/Makefile b/init/Makefile
87263index 7bc47ee..6da2dc7 100644
87264--- a/init/Makefile
87265+++ b/init/Makefile
87266@@ -2,6 +2,9 @@
87267 # Makefile for the linux kernel.
87268 #
87269
87270+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87271+asflags-y := $(GCC_PLUGINS_AFLAGS)
87272+
87273 obj-y := main.o version.o mounts.o
87274 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87275 obj-y += noinitramfs.o
87276diff --git a/init/do_mounts.c b/init/do_mounts.c
87277index eb41008..f5dbbf9 100644
87278--- a/init/do_mounts.c
87279+++ b/init/do_mounts.c
87280@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
87281 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87282 {
87283 struct super_block *s;
87284- int err = sys_mount(name, "/root", fs, flags, data);
87285+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87286 if (err)
87287 return err;
87288
87289- sys_chdir("/root");
87290+ sys_chdir((const char __force_user *)"/root");
87291 s = current->fs->pwd.dentry->d_sb;
87292 ROOT_DEV = s->s_dev;
87293 printk(KERN_INFO
87294@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
87295 va_start(args, fmt);
87296 vsprintf(buf, fmt, args);
87297 va_end(args);
87298- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87299+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87300 if (fd >= 0) {
87301 sys_ioctl(fd, FDEJECT, 0);
87302 sys_close(fd);
87303 }
87304 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87305- fd = sys_open("/dev/console", O_RDWR, 0);
87306+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87307 if (fd >= 0) {
87308 sys_ioctl(fd, TCGETS, (long)&termios);
87309 termios.c_lflag &= ~ICANON;
87310 sys_ioctl(fd, TCSETSF, (long)&termios);
87311- sys_read(fd, &c, 1);
87312+ sys_read(fd, (char __user *)&c, 1);
87313 termios.c_lflag |= ICANON;
87314 sys_ioctl(fd, TCSETSF, (long)&termios);
87315 sys_close(fd);
87316@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
87317 mount_root();
87318 out:
87319 devtmpfs_mount("dev");
87320- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87321- sys_chroot(".");
87322+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87323+ sys_chroot((const char __force_user *)".");
87324 }
87325
87326 static bool is_tmpfs;
87327diff --git a/init/do_mounts.h b/init/do_mounts.h
87328index f5b978a..69dbfe8 100644
87329--- a/init/do_mounts.h
87330+++ b/init/do_mounts.h
87331@@ -15,15 +15,15 @@ extern int root_mountflags;
87332
87333 static inline int create_dev(char *name, dev_t dev)
87334 {
87335- sys_unlink(name);
87336- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
87337+ sys_unlink((char __force_user *)name);
87338+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
87339 }
87340
87341 #if BITS_PER_LONG == 32
87342 static inline u32 bstat(char *name)
87343 {
87344 struct stat64 stat;
87345- if (sys_stat64(name, &stat) != 0)
87346+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
87347 return 0;
87348 if (!S_ISBLK(stat.st_mode))
87349 return 0;
87350@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
87351 static inline u32 bstat(char *name)
87352 {
87353 struct stat stat;
87354- if (sys_newstat(name, &stat) != 0)
87355+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
87356 return 0;
87357 if (!S_ISBLK(stat.st_mode))
87358 return 0;
87359diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
87360index 3e0878e..8a9d7a0 100644
87361--- a/init/do_mounts_initrd.c
87362+++ b/init/do_mounts_initrd.c
87363@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
87364 {
87365 sys_unshare(CLONE_FS | CLONE_FILES);
87366 /* stdin/stdout/stderr for /linuxrc */
87367- sys_open("/dev/console", O_RDWR, 0);
87368+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
87369 sys_dup(0);
87370 sys_dup(0);
87371 /* move initrd over / and chdir/chroot in initrd root */
87372- sys_chdir("/root");
87373- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87374- sys_chroot(".");
87375+ sys_chdir((const char __force_user *)"/root");
87376+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87377+ sys_chroot((const char __force_user *)".");
87378 sys_setsid();
87379 return 0;
87380 }
87381@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
87382 create_dev("/dev/root.old", Root_RAM0);
87383 /* mount initrd on rootfs' /root */
87384 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
87385- sys_mkdir("/old", 0700);
87386- sys_chdir("/old");
87387+ sys_mkdir((const char __force_user *)"/old", 0700);
87388+ sys_chdir((const char __force_user *)"/old");
87389
87390 /* try loading default modules from initrd */
87391 load_default_modules();
87392@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
87393 current->flags &= ~PF_FREEZER_SKIP;
87394
87395 /* move initrd to rootfs' /old */
87396- sys_mount("..", ".", NULL, MS_MOVE, NULL);
87397+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
87398 /* switch root and cwd back to / of rootfs */
87399- sys_chroot("..");
87400+ sys_chroot((const char __force_user *)"..");
87401
87402 if (new_decode_dev(real_root_dev) == Root_RAM0) {
87403- sys_chdir("/old");
87404+ sys_chdir((const char __force_user *)"/old");
87405 return;
87406 }
87407
87408- sys_chdir("/");
87409+ sys_chdir((const char __force_user *)"/");
87410 ROOT_DEV = new_decode_dev(real_root_dev);
87411 mount_root();
87412
87413 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
87414- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
87415+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
87416 if (!error)
87417 printk("okay\n");
87418 else {
87419- int fd = sys_open("/dev/root.old", O_RDWR, 0);
87420+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
87421 if (error == -ENOENT)
87422 printk("/initrd does not exist. Ignored.\n");
87423 else
87424 printk("failed\n");
87425 printk(KERN_NOTICE "Unmounting old root\n");
87426- sys_umount("/old", MNT_DETACH);
87427+ sys_umount((char __force_user *)"/old", MNT_DETACH);
87428 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
87429 if (fd < 0) {
87430 error = fd;
87431@@ -127,11 +127,11 @@ int __init initrd_load(void)
87432 * mounted in the normal path.
87433 */
87434 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
87435- sys_unlink("/initrd.image");
87436+ sys_unlink((const char __force_user *)"/initrd.image");
87437 handle_initrd();
87438 return 1;
87439 }
87440 }
87441- sys_unlink("/initrd.image");
87442+ sys_unlink((const char __force_user *)"/initrd.image");
87443 return 0;
87444 }
87445diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
87446index 8cb6db5..d729f50 100644
87447--- a/init/do_mounts_md.c
87448+++ b/init/do_mounts_md.c
87449@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
87450 partitioned ? "_d" : "", minor,
87451 md_setup_args[ent].device_names);
87452
87453- fd = sys_open(name, 0, 0);
87454+ fd = sys_open((char __force_user *)name, 0, 0);
87455 if (fd < 0) {
87456 printk(KERN_ERR "md: open failed - cannot start "
87457 "array %s\n", name);
87458@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
87459 * array without it
87460 */
87461 sys_close(fd);
87462- fd = sys_open(name, 0, 0);
87463+ fd = sys_open((char __force_user *)name, 0, 0);
87464 sys_ioctl(fd, BLKRRPART, 0);
87465 }
87466 sys_close(fd);
87467@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
87468
87469 wait_for_device_probe();
87470
87471- fd = sys_open("/dev/md0", 0, 0);
87472+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
87473 if (fd >= 0) {
87474 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
87475 sys_close(fd);
87476diff --git a/init/init_task.c b/init/init_task.c
87477index ba0a7f36..2bcf1d5 100644
87478--- a/init/init_task.c
87479+++ b/init/init_task.c
87480@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
87481 * Initial thread structure. Alignment of this is handled by a special
87482 * linker map entry.
87483 */
87484+#ifdef CONFIG_X86
87485+union thread_union init_thread_union __init_task_data;
87486+#else
87487 union thread_union init_thread_union __init_task_data =
87488 { INIT_THREAD_INFO(init_task) };
87489+#endif
87490diff --git a/init/initramfs.c b/init/initramfs.c
87491index ad1bd77..dca2c1b 100644
87492--- a/init/initramfs.c
87493+++ b/init/initramfs.c
87494@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
87495
87496 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
87497 while (count) {
87498- ssize_t rv = sys_write(fd, p, count);
87499+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
87500
87501 if (rv < 0) {
87502 if (rv == -EINTR || rv == -EAGAIN)
87503@@ -107,7 +107,7 @@ static void __init free_hash(void)
87504 }
87505 }
87506
87507-static long __init do_utime(char *filename, time_t mtime)
87508+static long __init do_utime(char __force_user *filename, time_t mtime)
87509 {
87510 struct timespec t[2];
87511
87512@@ -142,7 +142,7 @@ static void __init dir_utime(void)
87513 struct dir_entry *de, *tmp;
87514 list_for_each_entry_safe(de, tmp, &dir_list, list) {
87515 list_del(&de->list);
87516- do_utime(de->name, de->mtime);
87517+ do_utime((char __force_user *)de->name, de->mtime);
87518 kfree(de->name);
87519 kfree(de);
87520 }
87521@@ -304,7 +304,7 @@ static int __init maybe_link(void)
87522 if (nlink >= 2) {
87523 char *old = find_link(major, minor, ino, mode, collected);
87524 if (old)
87525- return (sys_link(old, collected) < 0) ? -1 : 1;
87526+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
87527 }
87528 return 0;
87529 }
87530@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
87531 {
87532 struct stat st;
87533
87534- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
87535+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
87536 if (S_ISDIR(st.st_mode))
87537- sys_rmdir(path);
87538+ sys_rmdir((char __force_user *)path);
87539 else
87540- sys_unlink(path);
87541+ sys_unlink((char __force_user *)path);
87542 }
87543 }
87544
87545@@ -338,7 +338,7 @@ static int __init do_name(void)
87546 int openflags = O_WRONLY|O_CREAT;
87547 if (ml != 1)
87548 openflags |= O_TRUNC;
87549- wfd = sys_open(collected, openflags, mode);
87550+ wfd = sys_open((char __force_user *)collected, openflags, mode);
87551
87552 if (wfd >= 0) {
87553 sys_fchown(wfd, uid, gid);
87554@@ -350,17 +350,17 @@ static int __init do_name(void)
87555 }
87556 }
87557 } else if (S_ISDIR(mode)) {
87558- sys_mkdir(collected, mode);
87559- sys_chown(collected, uid, gid);
87560- sys_chmod(collected, mode);
87561+ sys_mkdir((char __force_user *)collected, mode);
87562+ sys_chown((char __force_user *)collected, uid, gid);
87563+ sys_chmod((char __force_user *)collected, mode);
87564 dir_add(collected, mtime);
87565 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
87566 S_ISFIFO(mode) || S_ISSOCK(mode)) {
87567 if (maybe_link() == 0) {
87568- sys_mknod(collected, mode, rdev);
87569- sys_chown(collected, uid, gid);
87570- sys_chmod(collected, mode);
87571- do_utime(collected, mtime);
87572+ sys_mknod((char __force_user *)collected, mode, rdev);
87573+ sys_chown((char __force_user *)collected, uid, gid);
87574+ sys_chmod((char __force_user *)collected, mode);
87575+ do_utime((char __force_user *)collected, mtime);
87576 }
87577 }
87578 return 0;
87579@@ -372,7 +372,7 @@ static int __init do_copy(void)
87580 if (xwrite(wfd, victim, body_len) != body_len)
87581 error("write error");
87582 sys_close(wfd);
87583- do_utime(vcollected, mtime);
87584+ do_utime((char __force_user *)vcollected, mtime);
87585 kfree(vcollected);
87586 eat(body_len);
87587 state = SkipIt;
87588@@ -390,9 +390,9 @@ static int __init do_symlink(void)
87589 {
87590 collected[N_ALIGN(name_len) + body_len] = '\0';
87591 clean_path(collected, 0);
87592- sys_symlink(collected + N_ALIGN(name_len), collected);
87593- sys_lchown(collected, uid, gid);
87594- do_utime(collected, mtime);
87595+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
87596+ sys_lchown((char __force_user *)collected, uid, gid);
87597+ do_utime((char __force_user *)collected, mtime);
87598 state = SkipIt;
87599 next_state = Reset;
87600 return 0;
87601diff --git a/init/main.c b/init/main.c
87602index 61b99376..85893612d 100644
87603--- a/init/main.c
87604+++ b/init/main.c
87605@@ -100,6 +100,8 @@ extern void radix_tree_init(void);
87606 static inline void mark_rodata_ro(void) { }
87607 #endif
87608
87609+extern void grsecurity_init(void);
87610+
87611 /*
87612 * Debug helper: via this flag we know that we are in 'early bootup code'
87613 * where only the boot processor is running with IRQ disabled. This means
87614@@ -161,6 +163,75 @@ static int __init set_reset_devices(char *str)
87615
87616 __setup("reset_devices", set_reset_devices);
87617
87618+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
87619+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
87620+static int __init setup_grsec_proc_gid(char *str)
87621+{
87622+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
87623+ return 1;
87624+}
87625+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
87626+#endif
87627+
87628+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
87629+unsigned long pax_user_shadow_base __read_only;
87630+EXPORT_SYMBOL(pax_user_shadow_base);
87631+extern char pax_enter_kernel_user[];
87632+extern char pax_exit_kernel_user[];
87633+#endif
87634+
87635+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
87636+static int __init setup_pax_nouderef(char *str)
87637+{
87638+#ifdef CONFIG_X86_32
87639+ unsigned int cpu;
87640+ struct desc_struct *gdt;
87641+
87642+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
87643+ gdt = get_cpu_gdt_table(cpu);
87644+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
87645+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
87646+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
87647+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
87648+ }
87649+ loadsegment(ds, __KERNEL_DS);
87650+ loadsegment(es, __KERNEL_DS);
87651+ loadsegment(ss, __KERNEL_DS);
87652+#else
87653+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
87654+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
87655+ clone_pgd_mask = ~(pgdval_t)0UL;
87656+ pax_user_shadow_base = 0UL;
87657+ setup_clear_cpu_cap(X86_FEATURE_PCID);
87658+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
87659+#endif
87660+
87661+ return 0;
87662+}
87663+early_param("pax_nouderef", setup_pax_nouderef);
87664+
87665+#ifdef CONFIG_X86_64
87666+static int __init setup_pax_weakuderef(char *str)
87667+{
87668+ if (clone_pgd_mask != ~(pgdval_t)0UL)
87669+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
87670+ return 1;
87671+}
87672+__setup("pax_weakuderef", setup_pax_weakuderef);
87673+#endif
87674+#endif
87675+
87676+#ifdef CONFIG_PAX_SOFTMODE
87677+int pax_softmode;
87678+
87679+static int __init setup_pax_softmode(char *str)
87680+{
87681+ get_option(&str, &pax_softmode);
87682+ return 1;
87683+}
87684+__setup("pax_softmode=", setup_pax_softmode);
87685+#endif
87686+
87687 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
87688 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
87689 static const char *panic_later, *panic_param;
87690@@ -735,7 +806,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
87691 struct blacklist_entry *entry;
87692 char *fn_name;
87693
87694- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
87695+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
87696 if (!fn_name)
87697 return false;
87698
87699@@ -787,7 +858,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
87700 {
87701 int count = preempt_count();
87702 int ret;
87703- char msgbuf[64];
87704+ const char *msg1 = "", *msg2 = "";
87705
87706 if (initcall_blacklisted(fn))
87707 return -EPERM;
87708@@ -797,18 +868,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
87709 else
87710 ret = fn();
87711
87712- msgbuf[0] = 0;
87713-
87714 if (preempt_count() != count) {
87715- sprintf(msgbuf, "preemption imbalance ");
87716+ msg1 = " preemption imbalance";
87717 preempt_count_set(count);
87718 }
87719 if (irqs_disabled()) {
87720- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
87721+ msg2 = " disabled interrupts";
87722 local_irq_enable();
87723 }
87724- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
87725+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
87726
87727+ add_latent_entropy();
87728 return ret;
87729 }
87730
87731@@ -914,8 +984,8 @@ static int run_init_process(const char *init_filename)
87732 {
87733 argv_init[0] = init_filename;
87734 return do_execve(getname_kernel(init_filename),
87735- (const char __user *const __user *)argv_init,
87736- (const char __user *const __user *)envp_init);
87737+ (const char __user *const __force_user *)argv_init,
87738+ (const char __user *const __force_user *)envp_init);
87739 }
87740
87741 static int try_to_run_init_process(const char *init_filename)
87742@@ -932,6 +1002,10 @@ static int try_to_run_init_process(const char *init_filename)
87743 return ret;
87744 }
87745
87746+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87747+extern int gr_init_ran;
87748+#endif
87749+
87750 static noinline void __init kernel_init_freeable(void);
87751
87752 static int __ref kernel_init(void *unused)
87753@@ -956,6 +1030,11 @@ static int __ref kernel_init(void *unused)
87754 ramdisk_execute_command, ret);
87755 }
87756
87757+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87758+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
87759+ gr_init_ran = 1;
87760+#endif
87761+
87762 /*
87763 * We try each of these until one succeeds.
87764 *
87765@@ -1016,7 +1095,7 @@ static noinline void __init kernel_init_freeable(void)
87766 do_basic_setup();
87767
87768 /* Open the /dev/console on the rootfs, this should never fail */
87769- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
87770+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
87771 pr_err("Warning: unable to open an initial console.\n");
87772
87773 (void) sys_dup(0);
87774@@ -1029,11 +1108,13 @@ static noinline void __init kernel_init_freeable(void)
87775 if (!ramdisk_execute_command)
87776 ramdisk_execute_command = "/init";
87777
87778- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
87779+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
87780 ramdisk_execute_command = NULL;
87781 prepare_namespace();
87782 }
87783
87784+ grsecurity_init();
87785+
87786 /*
87787 * Ok, we have completed the initial bootup, and
87788 * we're essentially up and running. Get rid of the
87789diff --git a/ipc/compat.c b/ipc/compat.c
87790index 9b3c85f..1c4d897 100644
87791--- a/ipc/compat.c
87792+++ b/ipc/compat.c
87793@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
87794 COMPAT_SHMLBA);
87795 if (err < 0)
87796 return err;
87797- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
87798+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
87799 }
87800 case SHMDT:
87801 return sys_shmdt(compat_ptr(ptr));
87802diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
87803index 8ad93c2..efd80f8 100644
87804--- a/ipc/ipc_sysctl.c
87805+++ b/ipc/ipc_sysctl.c
87806@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
87807 static int proc_ipc_dointvec(struct ctl_table *table, int write,
87808 void __user *buffer, size_t *lenp, loff_t *ppos)
87809 {
87810- struct ctl_table ipc_table;
87811+ ctl_table_no_const ipc_table;
87812
87813 memcpy(&ipc_table, table, sizeof(ipc_table));
87814 ipc_table.data = get_ipc(table);
87815@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
87816 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
87817 void __user *buffer, size_t *lenp, loff_t *ppos)
87818 {
87819- struct ctl_table ipc_table;
87820+ ctl_table_no_const ipc_table;
87821
87822 memcpy(&ipc_table, table, sizeof(ipc_table));
87823 ipc_table.data = get_ipc(table);
87824@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
87825 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87826 void __user *buffer, size_t *lenp, loff_t *ppos)
87827 {
87828- struct ctl_table ipc_table;
87829+ ctl_table_no_const ipc_table;
87830 memcpy(&ipc_table, table, sizeof(ipc_table));
87831 ipc_table.data = get_ipc(table);
87832
87833@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87834 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
87835 void __user *buffer, size_t *lenp, loff_t *ppos)
87836 {
87837- struct ctl_table ipc_table;
87838+ ctl_table_no_const ipc_table;
87839 int dummy = 0;
87840
87841 memcpy(&ipc_table, table, sizeof(ipc_table));
87842diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
87843index 68d4e95..1477ded 100644
87844--- a/ipc/mq_sysctl.c
87845+++ b/ipc/mq_sysctl.c
87846@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
87847 static int proc_mq_dointvec(struct ctl_table *table, int write,
87848 void __user *buffer, size_t *lenp, loff_t *ppos)
87849 {
87850- struct ctl_table mq_table;
87851+ ctl_table_no_const mq_table;
87852 memcpy(&mq_table, table, sizeof(mq_table));
87853 mq_table.data = get_mq(table);
87854
87855@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
87856 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
87857 void __user *buffer, size_t *lenp, loff_t *ppos)
87858 {
87859- struct ctl_table mq_table;
87860+ ctl_table_no_const mq_table;
87861 memcpy(&mq_table, table, sizeof(mq_table));
87862 mq_table.data = get_mq(table);
87863
87864diff --git a/ipc/mqueue.c b/ipc/mqueue.c
87865index 7635a1c..7432cb6 100644
87866--- a/ipc/mqueue.c
87867+++ b/ipc/mqueue.c
87868@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
87869 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
87870 info->attr.mq_msgsize);
87871
87872+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
87873 spin_lock(&mq_lock);
87874 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
87875 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
87876diff --git a/ipc/shm.c b/ipc/shm.c
87877index 19633b4..d454904 100644
87878--- a/ipc/shm.c
87879+++ b/ipc/shm.c
87880@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
87881 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
87882 #endif
87883
87884+#ifdef CONFIG_GRKERNSEC
87885+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
87886+ const u64 shm_createtime, const kuid_t cuid,
87887+ const int shmid);
87888+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
87889+ const u64 shm_createtime);
87890+#endif
87891+
87892 void shm_init_ns(struct ipc_namespace *ns)
87893 {
87894 ns->shm_ctlmax = SHMMAX;
87895@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
87896 shp->shm_lprid = 0;
87897 shp->shm_atim = shp->shm_dtim = 0;
87898 shp->shm_ctim = get_seconds();
87899+#ifdef CONFIG_GRKERNSEC
87900+ shp->shm_createtime = ktime_get_ns();
87901+#endif
87902 shp->shm_segsz = size;
87903 shp->shm_nattch = 0;
87904 shp->shm_file = file;
87905@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87906 f_mode = FMODE_READ | FMODE_WRITE;
87907 }
87908 if (shmflg & SHM_EXEC) {
87909+
87910+#ifdef CONFIG_PAX_MPROTECT
87911+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
87912+ goto out;
87913+#endif
87914+
87915 prot |= PROT_EXEC;
87916 acc_mode |= S_IXUGO;
87917 }
87918@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87919 if (err)
87920 goto out_unlock;
87921
87922+#ifdef CONFIG_GRKERNSEC
87923+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
87924+ shp->shm_perm.cuid, shmid) ||
87925+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
87926+ err = -EACCES;
87927+ goto out_unlock;
87928+ }
87929+#endif
87930+
87931 ipc_lock_object(&shp->shm_perm);
87932
87933 /* check if shm_destroy() is tearing down shp */
87934@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87935 path = shp->shm_file->f_path;
87936 path_get(&path);
87937 shp->shm_nattch++;
87938+#ifdef CONFIG_GRKERNSEC
87939+ shp->shm_lapid = current->pid;
87940+#endif
87941 size = i_size_read(path.dentry->d_inode);
87942 ipc_unlock_object(&shp->shm_perm);
87943 rcu_read_unlock();
87944diff --git a/ipc/util.c b/ipc/util.c
87945index 106bed0..f851429 100644
87946--- a/ipc/util.c
87947+++ b/ipc/util.c
87948@@ -71,6 +71,8 @@ struct ipc_proc_iface {
87949 int (*show)(struct seq_file *, void *);
87950 };
87951
87952+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
87953+
87954 /**
87955 * ipc_init - initialise ipc subsystem
87956 *
87957@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
87958 granted_mode >>= 6;
87959 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
87960 granted_mode >>= 3;
87961+
87962+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
87963+ return -1;
87964+
87965 /* is there some bit set in requested_mode but not in granted_mode? */
87966 if ((requested_mode & ~granted_mode & 0007) &&
87967 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
87968diff --git a/kernel/audit.c b/kernel/audit.c
87969index 72ab759..757deba 100644
87970--- a/kernel/audit.c
87971+++ b/kernel/audit.c
87972@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
87973 3) suppressed due to audit_rate_limit
87974 4) suppressed due to audit_backlog_limit
87975 */
87976-static atomic_t audit_lost = ATOMIC_INIT(0);
87977+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
87978
87979 /* The netlink socket. */
87980 static struct sock *audit_sock;
87981@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
87982 unsigned long now;
87983 int print;
87984
87985- atomic_inc(&audit_lost);
87986+ atomic_inc_unchecked(&audit_lost);
87987
87988 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
87989
87990@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
87991 if (print) {
87992 if (printk_ratelimit())
87993 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
87994- atomic_read(&audit_lost),
87995+ atomic_read_unchecked(&audit_lost),
87996 audit_rate_limit,
87997 audit_backlog_limit);
87998 audit_panic(message);
87999@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88000 s.pid = audit_pid;
88001 s.rate_limit = audit_rate_limit;
88002 s.backlog_limit = audit_backlog_limit;
88003- s.lost = atomic_read(&audit_lost);
88004+ s.lost = atomic_read_unchecked(&audit_lost);
88005 s.backlog = skb_queue_len(&audit_skb_queue);
88006 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
88007 s.backlog_wait_time = audit_backlog_wait_time;
88008diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88009index 072566d..1190489 100644
88010--- a/kernel/auditsc.c
88011+++ b/kernel/auditsc.c
88012@@ -2056,7 +2056,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88013 }
88014
88015 /* global counter which is incremented every time something logs in */
88016-static atomic_t session_id = ATOMIC_INIT(0);
88017+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88018
88019 static int audit_set_loginuid_perm(kuid_t loginuid)
88020 {
88021@@ -2123,7 +2123,7 @@ int audit_set_loginuid(kuid_t loginuid)
88022
88023 /* are we setting or clearing? */
88024 if (uid_valid(loginuid))
88025- sessionid = (unsigned int)atomic_inc_return(&session_id);
88026+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88027
88028 task->sessionid = sessionid;
88029 task->loginuid = loginuid;
88030diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88031index a64e7a2..2e69448 100644
88032--- a/kernel/bpf/core.c
88033+++ b/kernel/bpf/core.c
88034@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88035 * random section of illegal instructions.
88036 */
88037 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
88038- hdr = module_alloc(size);
88039+ hdr = module_alloc_exec(size);
88040 if (hdr == NULL)
88041 return NULL;
88042
88043 /* Fill space with illegal/arch-dep instructions. */
88044 bpf_fill_ill_insns(hdr, size);
88045
88046+ pax_open_kernel();
88047 hdr->pages = size / PAGE_SIZE;
88048+ pax_close_kernel();
88049+
88050 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
88051 PAGE_SIZE - sizeof(*hdr));
88052 start = (prandom_u32() % hole) & ~(alignment - 1);
88053@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88054
88055 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
88056 {
88057- module_memfree(hdr);
88058+ module_memfree_exec(hdr);
88059 }
88060 #endif /* CONFIG_BPF_JIT */
88061
88062diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
88063index 536edc2..d28c85d 100644
88064--- a/kernel/bpf/syscall.c
88065+++ b/kernel/bpf/syscall.c
88066@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
88067 int err;
88068
88069 /* the syscall is limited to root temporarily. This restriction will be
88070- * lifted when security audit is clean. Note that eBPF+tracing must have
88071- * this restriction, since it may pass kernel data to user space
88072+ * lifted by upstream when a half-assed security audit is clean. Note
88073+ * that eBPF+tracing must have this restriction, since it may pass
88074+ * kernel data to user space
88075 */
88076 if (!capable(CAP_SYS_ADMIN))
88077 return -EPERM;
88078+#ifdef CONFIG_GRKERNSEC
88079+ return -EPERM;
88080+#endif
88081
88082 if (!access_ok(VERIFY_READ, uattr, 1))
88083 return -EFAULT;
88084diff --git a/kernel/capability.c b/kernel/capability.c
88085index 989f5bf..d317ca0 100644
88086--- a/kernel/capability.c
88087+++ b/kernel/capability.c
88088@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88089 * before modification is attempted and the application
88090 * fails.
88091 */
88092+ if (tocopy > ARRAY_SIZE(kdata))
88093+ return -EFAULT;
88094+
88095 if (copy_to_user(dataptr, kdata, tocopy
88096 * sizeof(struct __user_cap_data_struct))) {
88097 return -EFAULT;
88098@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88099 int ret;
88100
88101 rcu_read_lock();
88102- ret = security_capable(__task_cred(t), ns, cap);
88103+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88104+ gr_task_is_capable(t, __task_cred(t), cap);
88105 rcu_read_unlock();
88106
88107- return (ret == 0);
88108+ return ret;
88109 }
88110
88111 /**
88112@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88113 int ret;
88114
88115 rcu_read_lock();
88116- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88117+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88118 rcu_read_unlock();
88119
88120- return (ret == 0);
88121+ return ret;
88122 }
88123
88124 /**
88125@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88126 BUG();
88127 }
88128
88129- if (security_capable(current_cred(), ns, cap) == 0) {
88130+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88131 current->flags |= PF_SUPERPRIV;
88132 return true;
88133 }
88134@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88135 }
88136 EXPORT_SYMBOL(ns_capable);
88137
88138+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88139+{
88140+ if (unlikely(!cap_valid(cap))) {
88141+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88142+ BUG();
88143+ }
88144+
88145+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88146+ current->flags |= PF_SUPERPRIV;
88147+ return true;
88148+ }
88149+ return false;
88150+}
88151+EXPORT_SYMBOL(ns_capable_nolog);
88152+
88153 /**
88154 * file_ns_capable - Determine if the file's opener had a capability in effect
88155 * @file: The file we want to check
88156@@ -427,6 +446,12 @@ bool capable(int cap)
88157 }
88158 EXPORT_SYMBOL(capable);
88159
88160+bool capable_nolog(int cap)
88161+{
88162+ return ns_capable_nolog(&init_user_ns, cap);
88163+}
88164+EXPORT_SYMBOL(capable_nolog);
88165+
88166 /**
88167 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88168 * @inode: The inode in question
88169@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88170 kgid_has_mapping(ns, inode->i_gid);
88171 }
88172 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88173+
88174+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88175+{
88176+ struct user_namespace *ns = current_user_ns();
88177+
88178+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88179+ kgid_has_mapping(ns, inode->i_gid);
88180+}
88181+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88182diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88183index 04cfe8a..adadcc0 100644
88184--- a/kernel/cgroup.c
88185+++ b/kernel/cgroup.c
88186@@ -5343,6 +5343,9 @@ static void cgroup_release_agent(struct work_struct *work)
88187 if (!pathbuf || !agentbuf)
88188 goto out;
88189
88190+ if (agentbuf[0] == '\0')
88191+ goto out;
88192+
88193 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
88194 if (!path)
88195 goto out;
88196@@ -5528,7 +5531,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88197 struct task_struct *task;
88198 int count = 0;
88199
88200- seq_printf(seq, "css_set %p\n", cset);
88201+ seq_printf(seq, "css_set %pK\n", cset);
88202
88203 list_for_each_entry(task, &cset->tasks, cg_list) {
88204 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88205diff --git a/kernel/compat.c b/kernel/compat.c
88206index ebb3c36..1df606e 100644
88207--- a/kernel/compat.c
88208+++ b/kernel/compat.c
88209@@ -13,6 +13,7 @@
88210
88211 #include <linux/linkage.h>
88212 #include <linux/compat.h>
88213+#include <linux/module.h>
88214 #include <linux/errno.h>
88215 #include <linux/time.h>
88216 #include <linux/signal.h>
88217@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88218 mm_segment_t oldfs;
88219 long ret;
88220
88221- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88222+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88223 oldfs = get_fs();
88224 set_fs(KERNEL_DS);
88225 ret = hrtimer_nanosleep_restart(restart);
88226@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88227 oldfs = get_fs();
88228 set_fs(KERNEL_DS);
88229 ret = hrtimer_nanosleep(&tu,
88230- rmtp ? (struct timespec __user *)&rmt : NULL,
88231+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88232 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88233 set_fs(oldfs);
88234
88235@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88236 mm_segment_t old_fs = get_fs();
88237
88238 set_fs(KERNEL_DS);
88239- ret = sys_sigpending((old_sigset_t __user *) &s);
88240+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88241 set_fs(old_fs);
88242 if (ret == 0)
88243 ret = put_user(s, set);
88244@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88245 mm_segment_t old_fs = get_fs();
88246
88247 set_fs(KERNEL_DS);
88248- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88249+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88250 set_fs(old_fs);
88251
88252 if (!ret) {
88253@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88254 set_fs (KERNEL_DS);
88255 ret = sys_wait4(pid,
88256 (stat_addr ?
88257- (unsigned int __user *) &status : NULL),
88258- options, (struct rusage __user *) &r);
88259+ (unsigned int __force_user *) &status : NULL),
88260+ options, (struct rusage __force_user *) &r);
88261 set_fs (old_fs);
88262
88263 if (ret > 0) {
88264@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88265 memset(&info, 0, sizeof(info));
88266
88267 set_fs(KERNEL_DS);
88268- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88269- uru ? (struct rusage __user *)&ru : NULL);
88270+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88271+ uru ? (struct rusage __force_user *)&ru : NULL);
88272 set_fs(old_fs);
88273
88274 if ((ret < 0) || (info.si_signo == 0))
88275@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88276 oldfs = get_fs();
88277 set_fs(KERNEL_DS);
88278 err = sys_timer_settime(timer_id, flags,
88279- (struct itimerspec __user *) &newts,
88280- (struct itimerspec __user *) &oldts);
88281+ (struct itimerspec __force_user *) &newts,
88282+ (struct itimerspec __force_user *) &oldts);
88283 set_fs(oldfs);
88284 if (!err && old && put_compat_itimerspec(old, &oldts))
88285 return -EFAULT;
88286@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88287 oldfs = get_fs();
88288 set_fs(KERNEL_DS);
88289 err = sys_timer_gettime(timer_id,
88290- (struct itimerspec __user *) &ts);
88291+ (struct itimerspec __force_user *) &ts);
88292 set_fs(oldfs);
88293 if (!err && put_compat_itimerspec(setting, &ts))
88294 return -EFAULT;
88295@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88296 oldfs = get_fs();
88297 set_fs(KERNEL_DS);
88298 err = sys_clock_settime(which_clock,
88299- (struct timespec __user *) &ts);
88300+ (struct timespec __force_user *) &ts);
88301 set_fs(oldfs);
88302 return err;
88303 }
88304@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88305 oldfs = get_fs();
88306 set_fs(KERNEL_DS);
88307 err = sys_clock_gettime(which_clock,
88308- (struct timespec __user *) &ts);
88309+ (struct timespec __force_user *) &ts);
88310 set_fs(oldfs);
88311 if (!err && compat_put_timespec(&ts, tp))
88312 return -EFAULT;
88313@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88314
88315 oldfs = get_fs();
88316 set_fs(KERNEL_DS);
88317- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
88318+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
88319 set_fs(oldfs);
88320
88321 err = compat_put_timex(utp, &txc);
88322@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
88323 oldfs = get_fs();
88324 set_fs(KERNEL_DS);
88325 err = sys_clock_getres(which_clock,
88326- (struct timespec __user *) &ts);
88327+ (struct timespec __force_user *) &ts);
88328 set_fs(oldfs);
88329 if (!err && tp && compat_put_timespec(&ts, tp))
88330 return -EFAULT;
88331@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
88332 struct timespec tu;
88333 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
88334
88335- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
88336+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
88337 oldfs = get_fs();
88338 set_fs(KERNEL_DS);
88339 err = clock_nanosleep_restart(restart);
88340@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
88341 oldfs = get_fs();
88342 set_fs(KERNEL_DS);
88343 err = sys_clock_nanosleep(which_clock, flags,
88344- (struct timespec __user *) &in,
88345- (struct timespec __user *) &out);
88346+ (struct timespec __force_user *) &in,
88347+ (struct timespec __force_user *) &out);
88348 set_fs(oldfs);
88349
88350 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
88351@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
88352 mm_segment_t old_fs = get_fs();
88353
88354 set_fs(KERNEL_DS);
88355- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
88356+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
88357 set_fs(old_fs);
88358 if (compat_put_timespec(&t, interval))
88359 return -EFAULT;
88360diff --git a/kernel/configs.c b/kernel/configs.c
88361index c18b1f1..b9a0132 100644
88362--- a/kernel/configs.c
88363+++ b/kernel/configs.c
88364@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
88365 struct proc_dir_entry *entry;
88366
88367 /* create the current config file */
88368+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
88369+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
88370+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
88371+ &ikconfig_file_ops);
88372+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88373+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
88374+ &ikconfig_file_ops);
88375+#endif
88376+#else
88377 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
88378 &ikconfig_file_ops);
88379+#endif
88380+
88381 if (!entry)
88382 return -ENOMEM;
88383
88384diff --git a/kernel/cred.c b/kernel/cred.c
88385index e0573a4..26c0fd3 100644
88386--- a/kernel/cred.c
88387+++ b/kernel/cred.c
88388@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
88389 validate_creds(cred);
88390 alter_cred_subscribers(cred, -1);
88391 put_cred(cred);
88392+
88393+#ifdef CONFIG_GRKERNSEC_SETXID
88394+ cred = (struct cred *) tsk->delayed_cred;
88395+ if (cred != NULL) {
88396+ tsk->delayed_cred = NULL;
88397+ validate_creds(cred);
88398+ alter_cred_subscribers(cred, -1);
88399+ put_cred(cred);
88400+ }
88401+#endif
88402 }
88403
88404 /**
88405@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
88406 * Always returns 0 thus allowing this function to be tail-called at the end
88407 * of, say, sys_setgid().
88408 */
88409-int commit_creds(struct cred *new)
88410+static int __commit_creds(struct cred *new)
88411 {
88412 struct task_struct *task = current;
88413 const struct cred *old = task->real_cred;
88414@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
88415
88416 get_cred(new); /* we will require a ref for the subj creds too */
88417
88418+ gr_set_role_label(task, new->uid, new->gid);
88419+
88420 /* dumpability changes */
88421 if (!uid_eq(old->euid, new->euid) ||
88422 !gid_eq(old->egid, new->egid) ||
88423@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
88424 put_cred(old);
88425 return 0;
88426 }
88427+#ifdef CONFIG_GRKERNSEC_SETXID
88428+extern int set_user(struct cred *new);
88429+
88430+void gr_delayed_cred_worker(void)
88431+{
88432+ const struct cred *new = current->delayed_cred;
88433+ struct cred *ncred;
88434+
88435+ current->delayed_cred = NULL;
88436+
88437+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
88438+ // from doing get_cred on it when queueing this
88439+ put_cred(new);
88440+ return;
88441+ } else if (new == NULL)
88442+ return;
88443+
88444+ ncred = prepare_creds();
88445+ if (!ncred)
88446+ goto die;
88447+ // uids
88448+ ncred->uid = new->uid;
88449+ ncred->euid = new->euid;
88450+ ncred->suid = new->suid;
88451+ ncred->fsuid = new->fsuid;
88452+ // gids
88453+ ncred->gid = new->gid;
88454+ ncred->egid = new->egid;
88455+ ncred->sgid = new->sgid;
88456+ ncred->fsgid = new->fsgid;
88457+ // groups
88458+ set_groups(ncred, new->group_info);
88459+ // caps
88460+ ncred->securebits = new->securebits;
88461+ ncred->cap_inheritable = new->cap_inheritable;
88462+ ncred->cap_permitted = new->cap_permitted;
88463+ ncred->cap_effective = new->cap_effective;
88464+ ncred->cap_bset = new->cap_bset;
88465+
88466+ if (set_user(ncred)) {
88467+ abort_creds(ncred);
88468+ goto die;
88469+ }
88470+
88471+ // from doing get_cred on it when queueing this
88472+ put_cred(new);
88473+
88474+ __commit_creds(ncred);
88475+ return;
88476+die:
88477+ // from doing get_cred on it when queueing this
88478+ put_cred(new);
88479+ do_group_exit(SIGKILL);
88480+}
88481+#endif
88482+
88483+int commit_creds(struct cred *new)
88484+{
88485+#ifdef CONFIG_GRKERNSEC_SETXID
88486+ int ret;
88487+ int schedule_it = 0;
88488+ struct task_struct *t;
88489+ unsigned oldsecurebits = current_cred()->securebits;
88490+
88491+ /* we won't get called with tasklist_lock held for writing
88492+ and interrupts disabled as the cred struct in that case is
88493+ init_cred
88494+ */
88495+ if (grsec_enable_setxid && !current_is_single_threaded() &&
88496+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
88497+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
88498+ schedule_it = 1;
88499+ }
88500+ ret = __commit_creds(new);
88501+ if (schedule_it) {
88502+ rcu_read_lock();
88503+ read_lock(&tasklist_lock);
88504+ for (t = next_thread(current); t != current;
88505+ t = next_thread(t)) {
88506+ /* we'll check if the thread has uid 0 in
88507+ * the delayed worker routine
88508+ */
88509+ if (task_securebits(t) == oldsecurebits &&
88510+ t->delayed_cred == NULL) {
88511+ t->delayed_cred = get_cred(new);
88512+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
88513+ set_tsk_need_resched(t);
88514+ }
88515+ }
88516+ read_unlock(&tasklist_lock);
88517+ rcu_read_unlock();
88518+ }
88519+
88520+ return ret;
88521+#else
88522+ return __commit_creds(new);
88523+#endif
88524+}
88525+
88526 EXPORT_SYMBOL(commit_creds);
88527
88528 /**
88529diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
88530index ac5c0f9..4b1c6c2 100644
88531--- a/kernel/debug/debug_core.c
88532+++ b/kernel/debug/debug_core.c
88533@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
88534 */
88535 static atomic_t masters_in_kgdb;
88536 static atomic_t slaves_in_kgdb;
88537-static atomic_t kgdb_break_tasklet_var;
88538+static atomic_unchecked_t kgdb_break_tasklet_var;
88539 atomic_t kgdb_setting_breakpoint;
88540
88541 struct task_struct *kgdb_usethread;
88542@@ -137,7 +137,7 @@ int kgdb_single_step;
88543 static pid_t kgdb_sstep_pid;
88544
88545 /* to keep track of the CPU which is doing the single stepping*/
88546-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88547+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88548
88549 /*
88550 * If you are debugging a problem where roundup (the collection of
88551@@ -552,7 +552,7 @@ return_normal:
88552 * kernel will only try for the value of sstep_tries before
88553 * giving up and continuing on.
88554 */
88555- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
88556+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
88557 (kgdb_info[cpu].task &&
88558 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
88559 atomic_set(&kgdb_active, -1);
88560@@ -654,8 +654,8 @@ cpu_master_loop:
88561 }
88562
88563 kgdb_restore:
88564- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
88565- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
88566+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
88567+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
88568 if (kgdb_info[sstep_cpu].task)
88569 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
88570 else
88571@@ -932,18 +932,18 @@ static void kgdb_unregister_callbacks(void)
88572 static void kgdb_tasklet_bpt(unsigned long ing)
88573 {
88574 kgdb_breakpoint();
88575- atomic_set(&kgdb_break_tasklet_var, 0);
88576+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
88577 }
88578
88579 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
88580
88581 void kgdb_schedule_breakpoint(void)
88582 {
88583- if (atomic_read(&kgdb_break_tasklet_var) ||
88584+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
88585 atomic_read(&kgdb_active) != -1 ||
88586 atomic_read(&kgdb_setting_breakpoint))
88587 return;
88588- atomic_inc(&kgdb_break_tasklet_var);
88589+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
88590 tasklet_schedule(&kgdb_tasklet_breakpoint);
88591 }
88592 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
88593diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
88594index 60f6bb8..104bb07 100644
88595--- a/kernel/debug/kdb/kdb_main.c
88596+++ b/kernel/debug/kdb/kdb_main.c
88597@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
88598 continue;
88599
88600 kdb_printf("%-20s%8u 0x%p ", mod->name,
88601- mod->core_size, (void *)mod);
88602+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
88603 #ifdef CONFIG_MODULE_UNLOAD
88604 kdb_printf("%4d ", module_refcount(mod));
88605 #endif
88606@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
88607 kdb_printf(" (Loading)");
88608 else
88609 kdb_printf(" (Live)");
88610- kdb_printf(" 0x%p", mod->module_core);
88611+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
88612
88613 #ifdef CONFIG_MODULE_UNLOAD
88614 {
88615diff --git a/kernel/events/core.c b/kernel/events/core.c
88616index 19efcf133..7c05c93 100644
88617--- a/kernel/events/core.c
88618+++ b/kernel/events/core.c
88619@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
88620 * 0 - disallow raw tracepoint access for unpriv
88621 * 1 - disallow cpu events for unpriv
88622 * 2 - disallow kernel profiling for unpriv
88623+ * 3 - disallow all unpriv perf event use
88624 */
88625-int sysctl_perf_event_paranoid __read_mostly = 1;
88626+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88627+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
88628+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
88629+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
88630+#else
88631+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
88632+#endif
88633
88634 /* Minimum for 512 kiB + 1 user control page */
88635 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
88636@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
88637
88638 tmp *= sysctl_perf_cpu_time_max_percent;
88639 do_div(tmp, 100);
88640- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
88641+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
88642 }
88643
88644 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
88645@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
88646 }
88647 }
88648
88649-static atomic64_t perf_event_id;
88650+static atomic64_unchecked_t perf_event_id;
88651
88652 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
88653 enum event_type_t event_type);
88654@@ -3102,7 +3109,7 @@ static void __perf_event_read(void *info)
88655
88656 static inline u64 perf_event_count(struct perf_event *event)
88657 {
88658- return local64_read(&event->count) + atomic64_read(&event->child_count);
88659+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
88660 }
88661
88662 static u64 perf_event_read(struct perf_event *event)
88663@@ -3528,9 +3535,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
88664 mutex_lock(&event->child_mutex);
88665 total += perf_event_read(event);
88666 *enabled += event->total_time_enabled +
88667- atomic64_read(&event->child_total_time_enabled);
88668+ atomic64_read_unchecked(&event->child_total_time_enabled);
88669 *running += event->total_time_running +
88670- atomic64_read(&event->child_total_time_running);
88671+ atomic64_read_unchecked(&event->child_total_time_running);
88672
88673 list_for_each_entry(child, &event->child_list, child_list) {
88674 total += perf_event_read(child);
88675@@ -3994,10 +4001,10 @@ void perf_event_update_userpage(struct perf_event *event)
88676 userpg->offset -= local64_read(&event->hw.prev_count);
88677
88678 userpg->time_enabled = enabled +
88679- atomic64_read(&event->child_total_time_enabled);
88680+ atomic64_read_unchecked(&event->child_total_time_enabled);
88681
88682 userpg->time_running = running +
88683- atomic64_read(&event->child_total_time_running);
88684+ atomic64_read_unchecked(&event->child_total_time_running);
88685
88686 arch_perf_update_userpage(userpg, now);
88687
88688@@ -4568,7 +4575,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
88689
88690 /* Data. */
88691 sp = perf_user_stack_pointer(regs);
88692- rem = __output_copy_user(handle, (void *) sp, dump_size);
88693+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
88694 dyn_size = dump_size - rem;
88695
88696 perf_output_skip(handle, rem);
88697@@ -4659,11 +4666,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
88698 values[n++] = perf_event_count(event);
88699 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
88700 values[n++] = enabled +
88701- atomic64_read(&event->child_total_time_enabled);
88702+ atomic64_read_unchecked(&event->child_total_time_enabled);
88703 }
88704 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
88705 values[n++] = running +
88706- atomic64_read(&event->child_total_time_running);
88707+ atomic64_read_unchecked(&event->child_total_time_running);
88708 }
88709 if (read_format & PERF_FORMAT_ID)
88710 values[n++] = primary_event_id(event);
88711@@ -6994,7 +7001,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
88712 event->parent = parent_event;
88713
88714 event->ns = get_pid_ns(task_active_pid_ns(current));
88715- event->id = atomic64_inc_return(&perf_event_id);
88716+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
88717
88718 event->state = PERF_EVENT_STATE_INACTIVE;
88719
88720@@ -7275,6 +7282,11 @@ SYSCALL_DEFINE5(perf_event_open,
88721 if (flags & ~PERF_FLAG_ALL)
88722 return -EINVAL;
88723
88724+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88725+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
88726+ return -EACCES;
88727+#endif
88728+
88729 err = perf_copy_attr(attr_uptr, &attr);
88730 if (err)
88731 return err;
88732@@ -7642,10 +7654,10 @@ static void sync_child_event(struct perf_event *child_event,
88733 /*
88734 * Add back the child's count to the parent's count:
88735 */
88736- atomic64_add(child_val, &parent_event->child_count);
88737- atomic64_add(child_event->total_time_enabled,
88738+ atomic64_add_unchecked(child_val, &parent_event->child_count);
88739+ atomic64_add_unchecked(child_event->total_time_enabled,
88740 &parent_event->child_total_time_enabled);
88741- atomic64_add(child_event->total_time_running,
88742+ atomic64_add_unchecked(child_event->total_time_running,
88743 &parent_event->child_total_time_running);
88744
88745 /*
88746diff --git a/kernel/events/internal.h b/kernel/events/internal.h
88747index 569b2187..19940d9 100644
88748--- a/kernel/events/internal.h
88749+++ b/kernel/events/internal.h
88750@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
88751 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
88752 }
88753
88754-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
88755+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
88756 static inline unsigned long \
88757 func_name(struct perf_output_handle *handle, \
88758- const void *buf, unsigned long len) \
88759+ const void user *buf, unsigned long len) \
88760 { \
88761 unsigned long size, written; \
88762 \
88763@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
88764 return 0;
88765 }
88766
88767-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
88768+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
88769
88770 static inline unsigned long
88771 memcpy_skip(void *dst, const void *src, unsigned long n)
88772@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
88773 return 0;
88774 }
88775
88776-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
88777+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
88778
88779 #ifndef arch_perf_out_copy_user
88780 #define arch_perf_out_copy_user arch_perf_out_copy_user
88781@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
88782 }
88783 #endif
88784
88785-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
88786+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
88787
88788 /* Callchain handling */
88789 extern struct perf_callchain_entry *
88790diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
88791index cb346f2..e4dc317 100644
88792--- a/kernel/events/uprobes.c
88793+++ b/kernel/events/uprobes.c
88794@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
88795 {
88796 struct page *page;
88797 uprobe_opcode_t opcode;
88798- int result;
88799+ long result;
88800
88801 pagefault_disable();
88802 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
88803diff --git a/kernel/exit.c b/kernel/exit.c
88804index 6806c55..a5fb128 100644
88805--- a/kernel/exit.c
88806+++ b/kernel/exit.c
88807@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
88808 struct task_struct *leader;
88809 int zap_leader;
88810 repeat:
88811+#ifdef CONFIG_NET
88812+ gr_del_task_from_ip_table(p);
88813+#endif
88814+
88815 /* don't need to get the RCU readlock here - the process is dead and
88816 * can't be modifying its own credentials. But shut RCU-lockdep up */
88817 rcu_read_lock();
88818@@ -655,6 +659,8 @@ void do_exit(long code)
88819 int group_dead;
88820 TASKS_RCU(int tasks_rcu_i);
88821
88822+ set_fs(USER_DS);
88823+
88824 profile_task_exit(tsk);
88825
88826 WARN_ON(blk_needs_flush_plug(tsk));
88827@@ -671,7 +677,6 @@ void do_exit(long code)
88828 * mm_release()->clear_child_tid() from writing to a user-controlled
88829 * kernel address.
88830 */
88831- set_fs(USER_DS);
88832
88833 ptrace_event(PTRACE_EVENT_EXIT, code);
88834
88835@@ -729,6 +734,9 @@ void do_exit(long code)
88836 tsk->exit_code = code;
88837 taskstats_exit(tsk, group_dead);
88838
88839+ gr_acl_handle_psacct(tsk, code);
88840+ gr_acl_handle_exit();
88841+
88842 exit_mm(tsk);
88843
88844 if (group_dead)
88845@@ -848,7 +856,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
88846 * Take down every thread in the group. This is called by fatal signals
88847 * as well as by sys_exit_group (below).
88848 */
88849-void
88850+__noreturn void
88851 do_group_exit(int exit_code)
88852 {
88853 struct signal_struct *sig = current->signal;
88854diff --git a/kernel/fork.c b/kernel/fork.c
88855index 4dc2dda..651add0 100644
88856--- a/kernel/fork.c
88857+++ b/kernel/fork.c
88858@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
88859 void thread_info_cache_init(void)
88860 {
88861 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
88862- THREAD_SIZE, 0, NULL);
88863+ THREAD_SIZE, SLAB_USERCOPY, NULL);
88864 BUG_ON(thread_info_cache == NULL);
88865 }
88866 # endif
88867 #endif
88868
88869+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88870+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
88871+ int node, void **lowmem_stack)
88872+{
88873+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
88874+ void *ret = NULL;
88875+ unsigned int i;
88876+
88877+ *lowmem_stack = alloc_thread_info_node(tsk, node);
88878+ if (*lowmem_stack == NULL)
88879+ goto out;
88880+
88881+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
88882+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
88883+
88884+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
88885+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
88886+ if (ret == NULL) {
88887+ free_thread_info(*lowmem_stack);
88888+ *lowmem_stack = NULL;
88889+ }
88890+
88891+out:
88892+ return ret;
88893+}
88894+
88895+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
88896+{
88897+ unmap_process_stacks(tsk);
88898+}
88899+#else
88900+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
88901+ int node, void **lowmem_stack)
88902+{
88903+ return alloc_thread_info_node(tsk, node);
88904+}
88905+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
88906+{
88907+ free_thread_info(ti);
88908+}
88909+#endif
88910+
88911 /* SLAB cache for signal_struct structures (tsk->signal) */
88912 static struct kmem_cache *signal_cachep;
88913
88914@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
88915 /* SLAB cache for mm_struct structures (tsk->mm) */
88916 static struct kmem_cache *mm_cachep;
88917
88918-static void account_kernel_stack(struct thread_info *ti, int account)
88919+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
88920 {
88921+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88922+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
88923+#else
88924 struct zone *zone = page_zone(virt_to_page(ti));
88925+#endif
88926
88927 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
88928 }
88929
88930 void free_task(struct task_struct *tsk)
88931 {
88932- account_kernel_stack(tsk->stack, -1);
88933+ account_kernel_stack(tsk, tsk->stack, -1);
88934 arch_release_thread_info(tsk->stack);
88935- free_thread_info(tsk->stack);
88936+ gr_free_thread_info(tsk, tsk->stack);
88937 rt_mutex_debug_task_free(tsk);
88938 ftrace_graph_exit_task(tsk);
88939 put_seccomp_filter(tsk);
88940@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88941 {
88942 struct task_struct *tsk;
88943 struct thread_info *ti;
88944+ void *lowmem_stack;
88945 int node = tsk_fork_get_node(orig);
88946 int err;
88947
88948@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88949 if (!tsk)
88950 return NULL;
88951
88952- ti = alloc_thread_info_node(tsk, node);
88953+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
88954 if (!ti)
88955 goto free_tsk;
88956
88957@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88958 goto free_ti;
88959
88960 tsk->stack = ti;
88961+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88962+ tsk->lowmem_stack = lowmem_stack;
88963+#endif
88964 #ifdef CONFIG_SECCOMP
88965 /*
88966 * We must handle setting up seccomp filters once we're under
88967@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88968 set_task_stack_end_magic(tsk);
88969
88970 #ifdef CONFIG_CC_STACKPROTECTOR
88971- tsk->stack_canary = get_random_int();
88972+ tsk->stack_canary = pax_get_random_long();
88973 #endif
88974
88975 /*
88976@@ -352,24 +402,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88977 tsk->splice_pipe = NULL;
88978 tsk->task_frag.page = NULL;
88979
88980- account_kernel_stack(ti, 1);
88981+ account_kernel_stack(tsk, ti, 1);
88982
88983 return tsk;
88984
88985 free_ti:
88986- free_thread_info(ti);
88987+ gr_free_thread_info(tsk, ti);
88988 free_tsk:
88989 free_task_struct(tsk);
88990 return NULL;
88991 }
88992
88993 #ifdef CONFIG_MMU
88994-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
88995+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
88996+{
88997+ struct vm_area_struct *tmp;
88998+ unsigned long charge;
88999+ struct file *file;
89000+ int retval;
89001+
89002+ charge = 0;
89003+ if (mpnt->vm_flags & VM_ACCOUNT) {
89004+ unsigned long len = vma_pages(mpnt);
89005+
89006+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89007+ goto fail_nomem;
89008+ charge = len;
89009+ }
89010+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89011+ if (!tmp)
89012+ goto fail_nomem;
89013+ *tmp = *mpnt;
89014+ tmp->vm_mm = mm;
89015+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89016+ retval = vma_dup_policy(mpnt, tmp);
89017+ if (retval)
89018+ goto fail_nomem_policy;
89019+ if (anon_vma_fork(tmp, mpnt))
89020+ goto fail_nomem_anon_vma_fork;
89021+ tmp->vm_flags &= ~VM_LOCKED;
89022+ tmp->vm_next = tmp->vm_prev = NULL;
89023+ tmp->vm_mirror = NULL;
89024+ file = tmp->vm_file;
89025+ if (file) {
89026+ struct inode *inode = file_inode(file);
89027+ struct address_space *mapping = file->f_mapping;
89028+
89029+ get_file(file);
89030+ if (tmp->vm_flags & VM_DENYWRITE)
89031+ atomic_dec(&inode->i_writecount);
89032+ i_mmap_lock_write(mapping);
89033+ if (tmp->vm_flags & VM_SHARED)
89034+ atomic_inc(&mapping->i_mmap_writable);
89035+ flush_dcache_mmap_lock(mapping);
89036+ /* insert tmp into the share list, just after mpnt */
89037+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89038+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
89039+ else
89040+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89041+ flush_dcache_mmap_unlock(mapping);
89042+ i_mmap_unlock_write(mapping);
89043+ }
89044+
89045+ /*
89046+ * Clear hugetlb-related page reserves for children. This only
89047+ * affects MAP_PRIVATE mappings. Faults generated by the child
89048+ * are not guaranteed to succeed, even if read-only
89049+ */
89050+ if (is_vm_hugetlb_page(tmp))
89051+ reset_vma_resv_huge_pages(tmp);
89052+
89053+ return tmp;
89054+
89055+fail_nomem_anon_vma_fork:
89056+ mpol_put(vma_policy(tmp));
89057+fail_nomem_policy:
89058+ kmem_cache_free(vm_area_cachep, tmp);
89059+fail_nomem:
89060+ vm_unacct_memory(charge);
89061+ return NULL;
89062+}
89063+
89064+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89065 {
89066 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89067 struct rb_node **rb_link, *rb_parent;
89068 int retval;
89069- unsigned long charge;
89070
89071 uprobe_start_dup_mmap();
89072 down_write(&oldmm->mmap_sem);
89073@@ -397,55 +515,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89074
89075 prev = NULL;
89076 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89077- struct file *file;
89078-
89079 if (mpnt->vm_flags & VM_DONTCOPY) {
89080 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89081 -vma_pages(mpnt));
89082 continue;
89083 }
89084- charge = 0;
89085- if (mpnt->vm_flags & VM_ACCOUNT) {
89086- unsigned long len = vma_pages(mpnt);
89087-
89088- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89089- goto fail_nomem;
89090- charge = len;
89091- }
89092- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89093- if (!tmp)
89094- goto fail_nomem;
89095- *tmp = *mpnt;
89096- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89097- retval = vma_dup_policy(mpnt, tmp);
89098- if (retval)
89099- goto fail_nomem_policy;
89100- tmp->vm_mm = mm;
89101- if (anon_vma_fork(tmp, mpnt))
89102- goto fail_nomem_anon_vma_fork;
89103- tmp->vm_flags &= ~VM_LOCKED;
89104- tmp->vm_next = tmp->vm_prev = NULL;
89105- file = tmp->vm_file;
89106- if (file) {
89107- struct inode *inode = file_inode(file);
89108- struct address_space *mapping = file->f_mapping;
89109-
89110- get_file(file);
89111- if (tmp->vm_flags & VM_DENYWRITE)
89112- atomic_dec(&inode->i_writecount);
89113- i_mmap_lock_write(mapping);
89114- if (tmp->vm_flags & VM_SHARED)
89115- atomic_inc(&mapping->i_mmap_writable);
89116- flush_dcache_mmap_lock(mapping);
89117- /* insert tmp into the share list, just after mpnt */
89118- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89119- vma_nonlinear_insert(tmp,
89120- &mapping->i_mmap_nonlinear);
89121- else
89122- vma_interval_tree_insert_after(tmp, mpnt,
89123- &mapping->i_mmap);
89124- flush_dcache_mmap_unlock(mapping);
89125- i_mmap_unlock_write(mapping);
89126+ tmp = dup_vma(mm, oldmm, mpnt);
89127+ if (!tmp) {
89128+ retval = -ENOMEM;
89129+ goto out;
89130 }
89131
89132 /*
89133@@ -477,6 +555,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89134 if (retval)
89135 goto out;
89136 }
89137+
89138+#ifdef CONFIG_PAX_SEGMEXEC
89139+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89140+ struct vm_area_struct *mpnt_m;
89141+
89142+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89143+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89144+
89145+ if (!mpnt->vm_mirror)
89146+ continue;
89147+
89148+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89149+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89150+ mpnt->vm_mirror = mpnt_m;
89151+ } else {
89152+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89153+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89154+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89155+ mpnt->vm_mirror->vm_mirror = mpnt;
89156+ }
89157+ }
89158+ BUG_ON(mpnt_m);
89159+ }
89160+#endif
89161+
89162 /* a new mm has just been created */
89163 arch_dup_mmap(oldmm, mm);
89164 retval = 0;
89165@@ -486,14 +589,6 @@ out:
89166 up_write(&oldmm->mmap_sem);
89167 uprobe_end_dup_mmap();
89168 return retval;
89169-fail_nomem_anon_vma_fork:
89170- mpol_put(vma_policy(tmp));
89171-fail_nomem_policy:
89172- kmem_cache_free(vm_area_cachep, tmp);
89173-fail_nomem:
89174- retval = -ENOMEM;
89175- vm_unacct_memory(charge);
89176- goto out;
89177 }
89178
89179 static inline int mm_alloc_pgd(struct mm_struct *mm)
89180@@ -734,8 +829,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89181 return ERR_PTR(err);
89182
89183 mm = get_task_mm(task);
89184- if (mm && mm != current->mm &&
89185- !ptrace_may_access(task, mode)) {
89186+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89187+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89188 mmput(mm);
89189 mm = ERR_PTR(-EACCES);
89190 }
89191@@ -938,13 +1033,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89192 spin_unlock(&fs->lock);
89193 return -EAGAIN;
89194 }
89195- fs->users++;
89196+ atomic_inc(&fs->users);
89197 spin_unlock(&fs->lock);
89198 return 0;
89199 }
89200 tsk->fs = copy_fs_struct(fs);
89201 if (!tsk->fs)
89202 return -ENOMEM;
89203+ /* Carry through gr_chroot_dentry and is_chrooted instead
89204+ of recomputing it here. Already copied when the task struct
89205+ is duplicated. This allows pivot_root to not be treated as
89206+ a chroot
89207+ */
89208+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89209+
89210 return 0;
89211 }
89212
89213@@ -1182,7 +1284,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89214 * parts of the process environment (as per the clone
89215 * flags). The actual kick-off is left to the caller.
89216 */
89217-static struct task_struct *copy_process(unsigned long clone_flags,
89218+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89219 unsigned long stack_start,
89220 unsigned long stack_size,
89221 int __user *child_tidptr,
89222@@ -1253,6 +1355,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89223 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89224 #endif
89225 retval = -EAGAIN;
89226+
89227+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89228+
89229 if (atomic_read(&p->real_cred->user->processes) >=
89230 task_rlimit(p, RLIMIT_NPROC)) {
89231 if (p->real_cred->user != INIT_USER &&
89232@@ -1502,6 +1607,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89233 goto bad_fork_free_pid;
89234 }
89235
89236+ /* synchronizes with gr_set_acls()
89237+ we need to call this past the point of no return for fork()
89238+ */
89239+ gr_copy_label(p);
89240+
89241 if (likely(p->pid)) {
89242 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89243
89244@@ -1592,6 +1702,8 @@ bad_fork_cleanup_count:
89245 bad_fork_free:
89246 free_task(p);
89247 fork_out:
89248+ gr_log_forkfail(retval);
89249+
89250 return ERR_PTR(retval);
89251 }
89252
89253@@ -1653,6 +1765,7 @@ long do_fork(unsigned long clone_flags,
89254
89255 p = copy_process(clone_flags, stack_start, stack_size,
89256 child_tidptr, NULL, trace);
89257+ add_latent_entropy();
89258 /*
89259 * Do this prior waking up the new thread - the thread pointer
89260 * might get invalid after that point, if the thread exits quickly.
89261@@ -1669,6 +1782,8 @@ long do_fork(unsigned long clone_flags,
89262 if (clone_flags & CLONE_PARENT_SETTID)
89263 put_user(nr, parent_tidptr);
89264
89265+ gr_handle_brute_check();
89266+
89267 if (clone_flags & CLONE_VFORK) {
89268 p->vfork_done = &vfork;
89269 init_completion(&vfork);
89270@@ -1787,7 +1902,7 @@ void __init proc_caches_init(void)
89271 mm_cachep = kmem_cache_create("mm_struct",
89272 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89273 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89274- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89275+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89276 mmap_init();
89277 nsproxy_cache_init();
89278 }
89279@@ -1827,7 +1942,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89280 return 0;
89281
89282 /* don't need lock here; in the worst case we'll do useless copy */
89283- if (fs->users == 1)
89284+ if (atomic_read(&fs->users) == 1)
89285 return 0;
89286
89287 *new_fsp = copy_fs_struct(fs);
89288@@ -1939,7 +2054,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89289 fs = current->fs;
89290 spin_lock(&fs->lock);
89291 current->fs = new_fs;
89292- if (--fs->users)
89293+ gr_set_chroot_entries(current, &current->fs->root);
89294+ if (atomic_dec_return(&fs->users))
89295 new_fs = NULL;
89296 else
89297 new_fs = fs;
89298diff --git a/kernel/futex.c b/kernel/futex.c
89299index 63678b5..512f9af 100644
89300--- a/kernel/futex.c
89301+++ b/kernel/futex.c
89302@@ -201,7 +201,7 @@ struct futex_pi_state {
89303 atomic_t refcount;
89304
89305 union futex_key key;
89306-};
89307+} __randomize_layout;
89308
89309 /**
89310 * struct futex_q - The hashed futex queue entry, one per waiting task
89311@@ -235,7 +235,7 @@ struct futex_q {
89312 struct rt_mutex_waiter *rt_waiter;
89313 union futex_key *requeue_pi_key;
89314 u32 bitset;
89315-};
89316+} __randomize_layout;
89317
89318 static const struct futex_q futex_q_init = {
89319 /* list gets initialized in queue_me()*/
89320@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89321 struct page *page, *page_head;
89322 int err, ro = 0;
89323
89324+#ifdef CONFIG_PAX_SEGMEXEC
89325+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
89326+ return -EFAULT;
89327+#endif
89328+
89329 /*
89330 * The futex address must be "naturally" aligned.
89331 */
89332@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
89333
89334 static int get_futex_value_locked(u32 *dest, u32 __user *from)
89335 {
89336- int ret;
89337+ unsigned long ret;
89338
89339 pagefault_disable();
89340 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
89341@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
89342 {
89343 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
89344 u32 curval;
89345+ mm_segment_t oldfs;
89346
89347 /*
89348 * This will fail and we want it. Some arch implementations do
89349@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
89350 * implementation, the non-functional ones will return
89351 * -ENOSYS.
89352 */
89353+ oldfs = get_fs();
89354+ set_fs(USER_DS);
89355 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
89356 futex_cmpxchg_enabled = 1;
89357+ set_fs(oldfs);
89358 #endif
89359 }
89360
89361diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
89362index 55c8c93..9ba7ad6 100644
89363--- a/kernel/futex_compat.c
89364+++ b/kernel/futex_compat.c
89365@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
89366 return 0;
89367 }
89368
89369-static void __user *futex_uaddr(struct robust_list __user *entry,
89370+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
89371 compat_long_t futex_offset)
89372 {
89373 compat_uptr_t base = ptr_to_compat(entry);
89374diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
89375index b358a80..fc25240 100644
89376--- a/kernel/gcov/base.c
89377+++ b/kernel/gcov/base.c
89378@@ -114,11 +114,6 @@ void gcov_enable_events(void)
89379 }
89380
89381 #ifdef CONFIG_MODULES
89382-static inline int within(void *addr, void *start, unsigned long size)
89383-{
89384- return ((addr >= start) && (addr < start + size));
89385-}
89386-
89387 /* Update list and generate events when modules are unloaded. */
89388 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89389 void *data)
89390@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89391
89392 /* Remove entries located in module from linked list. */
89393 while ((info = gcov_info_next(info))) {
89394- if (within(info, mod->module_core, mod->core_size)) {
89395+ if (within_module_core_rw((unsigned long)info, mod)) {
89396 gcov_info_unlink(prev, info);
89397 if (gcov_events_enabled)
89398 gcov_event(GCOV_REMOVE, info);
89399diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
89400index 8069237..fe712d0 100644
89401--- a/kernel/irq/manage.c
89402+++ b/kernel/irq/manage.c
89403@@ -871,7 +871,7 @@ static int irq_thread(void *data)
89404
89405 action_ret = handler_fn(desc, action);
89406 if (action_ret == IRQ_HANDLED)
89407- atomic_inc(&desc->threads_handled);
89408+ atomic_inc_unchecked(&desc->threads_handled);
89409
89410 wake_threads_waitq(desc);
89411 }
89412diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
89413index e2514b0..de3dfe0 100644
89414--- a/kernel/irq/spurious.c
89415+++ b/kernel/irq/spurious.c
89416@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
89417 * count. We just care about the count being
89418 * different than the one we saw before.
89419 */
89420- handled = atomic_read(&desc->threads_handled);
89421+ handled = atomic_read_unchecked(&desc->threads_handled);
89422 handled |= SPURIOUS_DEFERRED;
89423 if (handled != desc->threads_handled_last) {
89424 action_ret = IRQ_HANDLED;
89425diff --git a/kernel/jump_label.c b/kernel/jump_label.c
89426index 9019f15..9a3c42e 100644
89427--- a/kernel/jump_label.c
89428+++ b/kernel/jump_label.c
89429@@ -14,6 +14,7 @@
89430 #include <linux/err.h>
89431 #include <linux/static_key.h>
89432 #include <linux/jump_label_ratelimit.h>
89433+#include <linux/mm.h>
89434
89435 #ifdef HAVE_JUMP_LABEL
89436
89437@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
89438
89439 size = (((unsigned long)stop - (unsigned long)start)
89440 / sizeof(struct jump_entry));
89441+ pax_open_kernel();
89442 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
89443+ pax_close_kernel();
89444 }
89445
89446 static void jump_label_update(struct static_key *key, int enable);
89447@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
89448 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
89449 struct jump_entry *iter;
89450
89451+ pax_open_kernel();
89452 for (iter = iter_start; iter < iter_stop; iter++) {
89453 if (within_module_init(iter->code, mod))
89454 iter->code = 0;
89455 }
89456+ pax_close_kernel();
89457 }
89458
89459 static int
89460diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
89461index 5c5987f..bc502b0 100644
89462--- a/kernel/kallsyms.c
89463+++ b/kernel/kallsyms.c
89464@@ -11,6 +11,9 @@
89465 * Changed the compression method from stem compression to "table lookup"
89466 * compression (see scripts/kallsyms.c for a more complete description)
89467 */
89468+#ifdef CONFIG_GRKERNSEC_HIDESYM
89469+#define __INCLUDED_BY_HIDESYM 1
89470+#endif
89471 #include <linux/kallsyms.h>
89472 #include <linux/module.h>
89473 #include <linux/init.h>
89474@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
89475
89476 static inline int is_kernel_inittext(unsigned long addr)
89477 {
89478+ if (system_state != SYSTEM_BOOTING)
89479+ return 0;
89480+
89481 if (addr >= (unsigned long)_sinittext
89482 && addr <= (unsigned long)_einittext)
89483 return 1;
89484 return 0;
89485 }
89486
89487+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89488+#ifdef CONFIG_MODULES
89489+static inline int is_module_text(unsigned long addr)
89490+{
89491+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
89492+ return 1;
89493+
89494+ addr = ktla_ktva(addr);
89495+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
89496+}
89497+#else
89498+static inline int is_module_text(unsigned long addr)
89499+{
89500+ return 0;
89501+}
89502+#endif
89503+#endif
89504+
89505 static inline int is_kernel_text(unsigned long addr)
89506 {
89507 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
89508@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
89509
89510 static inline int is_kernel(unsigned long addr)
89511 {
89512+
89513+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89514+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
89515+ return 1;
89516+
89517+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
89518+#else
89519 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
89520+#endif
89521+
89522 return 1;
89523 return in_gate_area_no_mm(addr);
89524 }
89525
89526 static int is_ksym_addr(unsigned long addr)
89527 {
89528+
89529+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89530+ if (is_module_text(addr))
89531+ return 0;
89532+#endif
89533+
89534 if (all_var)
89535 return is_kernel(addr);
89536
89537@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
89538
89539 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
89540 {
89541- iter->name[0] = '\0';
89542 iter->nameoff = get_symbol_offset(new_pos);
89543 iter->pos = new_pos;
89544 }
89545@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
89546 {
89547 struct kallsym_iter *iter = m->private;
89548
89549+#ifdef CONFIG_GRKERNSEC_HIDESYM
89550+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
89551+ return 0;
89552+#endif
89553+
89554 /* Some debugging symbols have no name. Ignore them. */
89555 if (!iter->name[0])
89556 return 0;
89557@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
89558 */
89559 type = iter->exported ? toupper(iter->type) :
89560 tolower(iter->type);
89561+
89562 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
89563 type, iter->name, iter->module_name);
89564 } else
89565diff --git a/kernel/kcmp.c b/kernel/kcmp.c
89566index 0aa69ea..a7fcafb 100644
89567--- a/kernel/kcmp.c
89568+++ b/kernel/kcmp.c
89569@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
89570 struct task_struct *task1, *task2;
89571 int ret;
89572
89573+#ifdef CONFIG_GRKERNSEC
89574+ return -ENOSYS;
89575+#endif
89576+
89577 rcu_read_lock();
89578
89579 /*
89580diff --git a/kernel/kexec.c b/kernel/kexec.c
89581index 9a8a01a..3c35dd6 100644
89582--- a/kernel/kexec.c
89583+++ b/kernel/kexec.c
89584@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
89585 compat_ulong_t, flags)
89586 {
89587 struct compat_kexec_segment in;
89588- struct kexec_segment out, __user *ksegments;
89589+ struct kexec_segment out;
89590+ struct kexec_segment __user *ksegments;
89591 unsigned long i, result;
89592
89593 /* Don't allow clients that don't understand the native
89594diff --git a/kernel/kmod.c b/kernel/kmod.c
89595index 2777f40..6cf5e70 100644
89596--- a/kernel/kmod.c
89597+++ b/kernel/kmod.c
89598@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
89599 kfree(info->argv);
89600 }
89601
89602-static int call_modprobe(char *module_name, int wait)
89603+static int call_modprobe(char *module_name, char *module_param, int wait)
89604 {
89605 struct subprocess_info *info;
89606 static char *envp[] = {
89607@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
89608 NULL
89609 };
89610
89611- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
89612+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
89613 if (!argv)
89614 goto out;
89615
89616@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
89617 argv[1] = "-q";
89618 argv[2] = "--";
89619 argv[3] = module_name; /* check free_modprobe_argv() */
89620- argv[4] = NULL;
89621+ argv[4] = module_param;
89622+ argv[5] = NULL;
89623
89624 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
89625 NULL, free_modprobe_argv, NULL);
89626@@ -122,9 +123,8 @@ out:
89627 * If module auto-loading support is disabled then this function
89628 * becomes a no-operation.
89629 */
89630-int __request_module(bool wait, const char *fmt, ...)
89631+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
89632 {
89633- va_list args;
89634 char module_name[MODULE_NAME_LEN];
89635 unsigned int max_modprobes;
89636 int ret;
89637@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
89638 if (!modprobe_path[0])
89639 return 0;
89640
89641- va_start(args, fmt);
89642- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89643- va_end(args);
89644+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
89645 if (ret >= MODULE_NAME_LEN)
89646 return -ENAMETOOLONG;
89647
89648@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
89649 if (ret)
89650 return ret;
89651
89652+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89653+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89654+ /* hack to workaround consolekit/udisks stupidity */
89655+ read_lock(&tasklist_lock);
89656+ if (!strcmp(current->comm, "mount") &&
89657+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
89658+ read_unlock(&tasklist_lock);
89659+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
89660+ return -EPERM;
89661+ }
89662+ read_unlock(&tasklist_lock);
89663+ }
89664+#endif
89665+
89666 /* If modprobe needs a service that is in a module, we get a recursive
89667 * loop. Limit the number of running kmod threads to max_threads/2 or
89668 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
89669@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
89670
89671 trace_module_request(module_name, wait, _RET_IP_);
89672
89673- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89674+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89675
89676 atomic_dec(&kmod_concurrent);
89677 return ret;
89678 }
89679+
89680+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
89681+{
89682+ va_list args;
89683+ int ret;
89684+
89685+ va_start(args, fmt);
89686+ ret = ____request_module(wait, module_param, fmt, args);
89687+ va_end(args);
89688+
89689+ return ret;
89690+}
89691+
89692+int __request_module(bool wait, const char *fmt, ...)
89693+{
89694+ va_list args;
89695+ int ret;
89696+
89697+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89698+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89699+ char module_param[MODULE_NAME_LEN];
89700+
89701+ memset(module_param, 0, sizeof(module_param));
89702+
89703+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
89704+
89705+ va_start(args, fmt);
89706+ ret = ____request_module(wait, module_param, fmt, args);
89707+ va_end(args);
89708+
89709+ return ret;
89710+ }
89711+#endif
89712+
89713+ va_start(args, fmt);
89714+ ret = ____request_module(wait, NULL, fmt, args);
89715+ va_end(args);
89716+
89717+ return ret;
89718+}
89719+
89720 EXPORT_SYMBOL(__request_module);
89721 #endif /* CONFIG_MODULES */
89722
89723 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
89724 {
89725+#ifdef CONFIG_GRKERNSEC
89726+ kfree(info->path);
89727+ info->path = info->origpath;
89728+#endif
89729 if (info->cleanup)
89730 (*info->cleanup)(info);
89731 kfree(info);
89732@@ -232,6 +289,20 @@ static int ____call_usermodehelper(void *data)
89733 */
89734 set_user_nice(current, 0);
89735
89736+#ifdef CONFIG_GRKERNSEC
89737+ /* this is race-free as far as userland is concerned as we copied
89738+ out the path to be used prior to this point and are now operating
89739+ on that copy
89740+ */
89741+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
89742+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
89743+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
89744+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
89745+ retval = -EPERM;
89746+ goto out;
89747+ }
89748+#endif
89749+
89750 retval = -ENOMEM;
89751 new = prepare_kernel_cred(current);
89752 if (!new)
89753@@ -254,8 +325,8 @@ static int ____call_usermodehelper(void *data)
89754 commit_creds(new);
89755
89756 retval = do_execve(getname_kernel(sub_info->path),
89757- (const char __user *const __user *)sub_info->argv,
89758- (const char __user *const __user *)sub_info->envp);
89759+ (const char __user *const __force_user *)sub_info->argv,
89760+ (const char __user *const __force_user *)sub_info->envp);
89761 out:
89762 sub_info->retval = retval;
89763 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
89764@@ -288,7 +359,7 @@ static int wait_for_helper(void *data)
89765 *
89766 * Thus the __user pointer cast is valid here.
89767 */
89768- sys_wait4(pid, (int __user *)&ret, 0, NULL);
89769+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
89770
89771 /*
89772 * If ret is 0, either ____call_usermodehelper failed and the
89773@@ -510,7 +581,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
89774 goto out;
89775
89776 INIT_WORK(&sub_info->work, __call_usermodehelper);
89777+#ifdef CONFIG_GRKERNSEC
89778+ sub_info->origpath = path;
89779+ sub_info->path = kstrdup(path, gfp_mask);
89780+#else
89781 sub_info->path = path;
89782+#endif
89783 sub_info->argv = argv;
89784 sub_info->envp = envp;
89785
89786@@ -612,7 +688,7 @@ EXPORT_SYMBOL(call_usermodehelper);
89787 static int proc_cap_handler(struct ctl_table *table, int write,
89788 void __user *buffer, size_t *lenp, loff_t *ppos)
89789 {
89790- struct ctl_table t;
89791+ ctl_table_no_const t;
89792 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
89793 kernel_cap_t new_cap;
89794 int err, i;
89795diff --git a/kernel/kprobes.c b/kernel/kprobes.c
89796index ee61992..62142b1 100644
89797--- a/kernel/kprobes.c
89798+++ b/kernel/kprobes.c
89799@@ -31,6 +31,9 @@
89800 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
89801 * <prasanna@in.ibm.com> added function-return probes.
89802 */
89803+#ifdef CONFIG_GRKERNSEC_HIDESYM
89804+#define __INCLUDED_BY_HIDESYM 1
89805+#endif
89806 #include <linux/kprobes.h>
89807 #include <linux/hash.h>
89808 #include <linux/init.h>
89809@@ -122,12 +125,12 @@ enum kprobe_slot_state {
89810
89811 static void *alloc_insn_page(void)
89812 {
89813- return module_alloc(PAGE_SIZE);
89814+ return module_alloc_exec(PAGE_SIZE);
89815 }
89816
89817 static void free_insn_page(void *page)
89818 {
89819- module_memfree(page);
89820+ module_memfree_exec(page);
89821 }
89822
89823 struct kprobe_insn_cache kprobe_insn_slots = {
89824@@ -2191,11 +2194,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
89825 kprobe_type = "k";
89826
89827 if (sym)
89828- seq_printf(pi, "%p %s %s+0x%x %s ",
89829+ seq_printf(pi, "%pK %s %s+0x%x %s ",
89830 p->addr, kprobe_type, sym, offset,
89831 (modname ? modname : " "));
89832 else
89833- seq_printf(pi, "%p %s %p ",
89834+ seq_printf(pi, "%pK %s %pK ",
89835 p->addr, kprobe_type, p->addr);
89836
89837 if (!pp)
89838diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
89839index 6683cce..daf8999 100644
89840--- a/kernel/ksysfs.c
89841+++ b/kernel/ksysfs.c
89842@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
89843 {
89844 if (count+1 > UEVENT_HELPER_PATH_LEN)
89845 return -ENOENT;
89846+ if (!capable(CAP_SYS_ADMIN))
89847+ return -EPERM;
89848 memcpy(uevent_helper, buf, count);
89849 uevent_helper[count] = '\0';
89850 if (count && uevent_helper[count-1] == '\n')
89851@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
89852 return count;
89853 }
89854
89855-static struct bin_attribute notes_attr = {
89856+static bin_attribute_no_const notes_attr __read_only = {
89857 .attr = {
89858 .name = "notes",
89859 .mode = S_IRUGO,
89860diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
89861index 88d0d44..e9ce0ee 100644
89862--- a/kernel/locking/lockdep.c
89863+++ b/kernel/locking/lockdep.c
89864@@ -599,6 +599,10 @@ static int static_obj(void *obj)
89865 end = (unsigned long) &_end,
89866 addr = (unsigned long) obj;
89867
89868+#ifdef CONFIG_PAX_KERNEXEC
89869+ start = ktla_ktva(start);
89870+#endif
89871+
89872 /*
89873 * static variable?
89874 */
89875@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
89876 if (!static_obj(lock->key)) {
89877 debug_locks_off();
89878 printk("INFO: trying to register non-static key.\n");
89879+ printk("lock:%pS key:%pS.\n", lock, lock->key);
89880 printk("the code is fine but needs lockdep annotation.\n");
89881 printk("turning off the locking correctness validator.\n");
89882 dump_stack();
89883@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
89884 if (!class)
89885 return 0;
89886 }
89887- atomic_inc((atomic_t *)&class->ops);
89888+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
89889 if (very_verbose(class)) {
89890 printk("\nacquire class [%p] %s", class->key, class->name);
89891 if (class->name_version > 1)
89892diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
89893index ef43ac4..2720dfa 100644
89894--- a/kernel/locking/lockdep_proc.c
89895+++ b/kernel/locking/lockdep_proc.c
89896@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
89897 return 0;
89898 }
89899
89900- seq_printf(m, "%p", class->key);
89901+ seq_printf(m, "%pK", class->key);
89902 #ifdef CONFIG_DEBUG_LOCKDEP
89903 seq_printf(m, " OPS:%8ld", class->ops);
89904 #endif
89905@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
89906
89907 list_for_each_entry(entry, &class->locks_after, entry) {
89908 if (entry->distance == 1) {
89909- seq_printf(m, " -> [%p] ", entry->class->key);
89910+ seq_printf(m, " -> [%pK] ", entry->class->key);
89911 print_name(m, entry->class);
89912 seq_puts(m, "\n");
89913 }
89914@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
89915 if (!class->key)
89916 continue;
89917
89918- seq_printf(m, "[%p] ", class->key);
89919+ seq_printf(m, "[%pK] ", class->key);
89920 print_name(m, class);
89921 seq_puts(m, "\n");
89922 }
89923@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
89924 if (!i)
89925 seq_line(m, '-', 40-namelen, namelen);
89926
89927- snprintf(ip, sizeof(ip), "[<%p>]",
89928+ snprintf(ip, sizeof(ip), "[<%pK>]",
89929 (void *)class->contention_point[i]);
89930 seq_printf(m, "%40s %14lu %29s %pS\n",
89931 name, stats->contention_point[i],
89932@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
89933 if (!i)
89934 seq_line(m, '-', 40-namelen, namelen);
89935
89936- snprintf(ip, sizeof(ip), "[<%p>]",
89937+ snprintf(ip, sizeof(ip), "[<%pK>]",
89938 (void *)class->contending_point[i]);
89939 seq_printf(m, "%40s %14lu %29s %pS\n",
89940 name, stats->contending_point[i],
89941diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
89942index 9887a90..0cd2b1d 100644
89943--- a/kernel/locking/mcs_spinlock.c
89944+++ b/kernel/locking/mcs_spinlock.c
89945@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
89946
89947 prev = decode_cpu(old);
89948 node->prev = prev;
89949- ACCESS_ONCE(prev->next) = node;
89950+ ACCESS_ONCE_RW(prev->next) = node;
89951
89952 /*
89953 * Normally @prev is untouchable after the above store; because at that
89954@@ -172,8 +172,8 @@ unqueue:
89955 * it will wait in Step-A.
89956 */
89957
89958- ACCESS_ONCE(next->prev) = prev;
89959- ACCESS_ONCE(prev->next) = next;
89960+ ACCESS_ONCE_RW(next->prev) = prev;
89961+ ACCESS_ONCE_RW(prev->next) = next;
89962
89963 return false;
89964 }
89965@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
89966 node = this_cpu_ptr(&osq_node);
89967 next = xchg(&node->next, NULL);
89968 if (next) {
89969- ACCESS_ONCE(next->locked) = 1;
89970+ ACCESS_ONCE_RW(next->locked) = 1;
89971 return;
89972 }
89973
89974 next = osq_wait_next(lock, node, NULL);
89975 if (next)
89976- ACCESS_ONCE(next->locked) = 1;
89977+ ACCESS_ONCE_RW(next->locked) = 1;
89978 }
89979
89980 #endif
89981diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
89982index 4d60986..5d351c1 100644
89983--- a/kernel/locking/mcs_spinlock.h
89984+++ b/kernel/locking/mcs_spinlock.h
89985@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
89986 */
89987 return;
89988 }
89989- ACCESS_ONCE(prev->next) = node;
89990+ ACCESS_ONCE_RW(prev->next) = node;
89991
89992 /* Wait until the lock holder passes the lock down. */
89993 arch_mcs_spin_lock_contended(&node->locked);
89994diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
89995index 3ef3736..9c951fa 100644
89996--- a/kernel/locking/mutex-debug.c
89997+++ b/kernel/locking/mutex-debug.c
89998@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
89999 }
90000
90001 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90002- struct thread_info *ti)
90003+ struct task_struct *task)
90004 {
90005 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90006
90007 /* Mark the current thread as blocked on the lock: */
90008- ti->task->blocked_on = waiter;
90009+ task->blocked_on = waiter;
90010 }
90011
90012 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90013- struct thread_info *ti)
90014+ struct task_struct *task)
90015 {
90016 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90017- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90018- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90019- ti->task->blocked_on = NULL;
90020+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90021+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90022+ task->blocked_on = NULL;
90023
90024 list_del_init(&waiter->list);
90025 waiter->task = NULL;
90026diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90027index 0799fd3..d06ae3b 100644
90028--- a/kernel/locking/mutex-debug.h
90029+++ b/kernel/locking/mutex-debug.h
90030@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90031 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90032 extern void debug_mutex_add_waiter(struct mutex *lock,
90033 struct mutex_waiter *waiter,
90034- struct thread_info *ti);
90035+ struct task_struct *task);
90036 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90037- struct thread_info *ti);
90038+ struct task_struct *task);
90039 extern void debug_mutex_unlock(struct mutex *lock);
90040 extern void debug_mutex_init(struct mutex *lock, const char *name,
90041 struct lock_class_key *key);
90042diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90043index 4541951..39fe90a 100644
90044--- a/kernel/locking/mutex.c
90045+++ b/kernel/locking/mutex.c
90046@@ -524,7 +524,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90047 goto skip_wait;
90048
90049 debug_mutex_lock_common(lock, &waiter);
90050- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90051+ debug_mutex_add_waiter(lock, &waiter, task);
90052
90053 /* add waiting tasks to the end of the waitqueue (FIFO): */
90054 list_add_tail(&waiter.list, &lock->wait_list);
90055@@ -569,7 +569,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90056 schedule_preempt_disabled();
90057 spin_lock_mutex(&lock->wait_lock, flags);
90058 }
90059- mutex_remove_waiter(lock, &waiter, current_thread_info());
90060+ mutex_remove_waiter(lock, &waiter, task);
90061 /* set it to 0 if there are no waiters left: */
90062 if (likely(list_empty(&lock->wait_list)))
90063 atomic_set(&lock->count, 0);
90064@@ -606,7 +606,7 @@ skip_wait:
90065 return 0;
90066
90067 err:
90068- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90069+ mutex_remove_waiter(lock, &waiter, task);
90070 spin_unlock_mutex(&lock->wait_lock, flags);
90071 debug_mutex_free_waiter(&waiter);
90072 mutex_release(&lock->dep_map, 1, ip);
90073diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90074index 1d96dd0..994ff19 100644
90075--- a/kernel/locking/rtmutex-tester.c
90076+++ b/kernel/locking/rtmutex-tester.c
90077@@ -22,7 +22,7 @@
90078 #define MAX_RT_TEST_MUTEXES 8
90079
90080 static spinlock_t rttest_lock;
90081-static atomic_t rttest_event;
90082+static atomic_unchecked_t rttest_event;
90083
90084 struct test_thread_data {
90085 int opcode;
90086@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90087
90088 case RTTEST_LOCKCONT:
90089 td->mutexes[td->opdata] = 1;
90090- td->event = atomic_add_return(1, &rttest_event);
90091+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90092 return 0;
90093
90094 case RTTEST_RESET:
90095@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90096 return 0;
90097
90098 case RTTEST_RESETEVENT:
90099- atomic_set(&rttest_event, 0);
90100+ atomic_set_unchecked(&rttest_event, 0);
90101 return 0;
90102
90103 default:
90104@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90105 return ret;
90106
90107 td->mutexes[id] = 1;
90108- td->event = atomic_add_return(1, &rttest_event);
90109+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90110 rt_mutex_lock(&mutexes[id]);
90111- td->event = atomic_add_return(1, &rttest_event);
90112+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90113 td->mutexes[id] = 4;
90114 return 0;
90115
90116@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90117 return ret;
90118
90119 td->mutexes[id] = 1;
90120- td->event = atomic_add_return(1, &rttest_event);
90121+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90122 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90123- td->event = atomic_add_return(1, &rttest_event);
90124+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90125 td->mutexes[id] = ret ? 0 : 4;
90126 return ret ? -EINTR : 0;
90127
90128@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90129 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90130 return ret;
90131
90132- td->event = atomic_add_return(1, &rttest_event);
90133+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90134 rt_mutex_unlock(&mutexes[id]);
90135- td->event = atomic_add_return(1, &rttest_event);
90136+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90137 td->mutexes[id] = 0;
90138 return 0;
90139
90140@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90141 break;
90142
90143 td->mutexes[dat] = 2;
90144- td->event = atomic_add_return(1, &rttest_event);
90145+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90146 break;
90147
90148 default:
90149@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90150 return;
90151
90152 td->mutexes[dat] = 3;
90153- td->event = atomic_add_return(1, &rttest_event);
90154+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90155 break;
90156
90157 case RTTEST_LOCKNOWAIT:
90158@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90159 return;
90160
90161 td->mutexes[dat] = 1;
90162- td->event = atomic_add_return(1, &rttest_event);
90163+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90164 return;
90165
90166 default:
90167diff --git a/kernel/module.c b/kernel/module.c
90168index d856e96..b82225c 100644
90169--- a/kernel/module.c
90170+++ b/kernel/module.c
90171@@ -59,6 +59,7 @@
90172 #include <linux/jump_label.h>
90173 #include <linux/pfn.h>
90174 #include <linux/bsearch.h>
90175+#include <linux/grsecurity.h>
90176 #include <uapi/linux/module.h>
90177 #include "module-internal.h"
90178
90179@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90180
90181 /* Bounds of module allocation, for speeding __module_address.
90182 * Protected by module_mutex. */
90183-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90184+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90185+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90186
90187 int register_module_notifier(struct notifier_block *nb)
90188 {
90189@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90190 return true;
90191
90192 list_for_each_entry_rcu(mod, &modules, list) {
90193- struct symsearch arr[] = {
90194+ struct symsearch modarr[] = {
90195 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90196 NOT_GPL_ONLY, false },
90197 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90198@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90199 if (mod->state == MODULE_STATE_UNFORMED)
90200 continue;
90201
90202- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90203+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90204 return true;
90205 }
90206 return false;
90207@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90208 if (!pcpusec->sh_size)
90209 return 0;
90210
90211- if (align > PAGE_SIZE) {
90212+ if (align-1 >= PAGE_SIZE) {
90213 pr_warn("%s: per-cpu alignment %li > %li\n",
90214 mod->name, align, PAGE_SIZE);
90215 align = PAGE_SIZE;
90216@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
90217 static ssize_t show_coresize(struct module_attribute *mattr,
90218 struct module_kobject *mk, char *buffer)
90219 {
90220- return sprintf(buffer, "%u\n", mk->mod->core_size);
90221+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90222 }
90223
90224 static struct module_attribute modinfo_coresize =
90225@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
90226 static ssize_t show_initsize(struct module_attribute *mattr,
90227 struct module_kobject *mk, char *buffer)
90228 {
90229- return sprintf(buffer, "%u\n", mk->mod->init_size);
90230+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90231 }
90232
90233 static struct module_attribute modinfo_initsize =
90234@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
90235 goto bad_version;
90236 }
90237
90238+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90239+ /*
90240+ * avoid potentially printing jibberish on attempted load
90241+ * of a module randomized with a different seed
90242+ */
90243+ pr_warn("no symbol version for %s\n", symname);
90244+#else
90245 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90246+#endif
90247 return 0;
90248
90249 bad_version:
90250+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90251+ /*
90252+ * avoid potentially printing jibberish on attempted load
90253+ * of a module randomized with a different seed
90254+ */
90255+ pr_warn("attempted module disagrees about version of symbol %s\n",
90256+ symname);
90257+#else
90258 pr_warn("%s: disagrees about version of symbol %s\n",
90259 mod->name, symname);
90260+#endif
90261 return 0;
90262 }
90263
90264@@ -1275,7 +1294,7 @@ resolve_symbol_wait(struct module *mod,
90265 */
90266 #ifdef CONFIG_SYSFS
90267
90268-#ifdef CONFIG_KALLSYMS
90269+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90270 static inline bool sect_empty(const Elf_Shdr *sect)
90271 {
90272 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90273@@ -1413,7 +1432,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90274 {
90275 unsigned int notes, loaded, i;
90276 struct module_notes_attrs *notes_attrs;
90277- struct bin_attribute *nattr;
90278+ bin_attribute_no_const *nattr;
90279
90280 /* failed to create section attributes, so can't create notes */
90281 if (!mod->sect_attrs)
90282@@ -1525,7 +1544,7 @@ static void del_usage_links(struct module *mod)
90283 static int module_add_modinfo_attrs(struct module *mod)
90284 {
90285 struct module_attribute *attr;
90286- struct module_attribute *temp_attr;
90287+ module_attribute_no_const *temp_attr;
90288 int error = 0;
90289 int i;
90290
90291@@ -1735,21 +1754,21 @@ static void set_section_ro_nx(void *base,
90292
90293 static void unset_module_core_ro_nx(struct module *mod)
90294 {
90295- set_page_attributes(mod->module_core + mod->core_text_size,
90296- mod->module_core + mod->core_size,
90297+ set_page_attributes(mod->module_core_rw,
90298+ mod->module_core_rw + mod->core_size_rw,
90299 set_memory_x);
90300- set_page_attributes(mod->module_core,
90301- mod->module_core + mod->core_ro_size,
90302+ set_page_attributes(mod->module_core_rx,
90303+ mod->module_core_rx + mod->core_size_rx,
90304 set_memory_rw);
90305 }
90306
90307 static void unset_module_init_ro_nx(struct module *mod)
90308 {
90309- set_page_attributes(mod->module_init + mod->init_text_size,
90310- mod->module_init + mod->init_size,
90311+ set_page_attributes(mod->module_init_rw,
90312+ mod->module_init_rw + mod->init_size_rw,
90313 set_memory_x);
90314- set_page_attributes(mod->module_init,
90315- mod->module_init + mod->init_ro_size,
90316+ set_page_attributes(mod->module_init_rx,
90317+ mod->module_init_rx + mod->init_size_rx,
90318 set_memory_rw);
90319 }
90320
90321@@ -1762,14 +1781,14 @@ void set_all_modules_text_rw(void)
90322 list_for_each_entry_rcu(mod, &modules, list) {
90323 if (mod->state == MODULE_STATE_UNFORMED)
90324 continue;
90325- if ((mod->module_core) && (mod->core_text_size)) {
90326- set_page_attributes(mod->module_core,
90327- mod->module_core + mod->core_text_size,
90328+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90329+ set_page_attributes(mod->module_core_rx,
90330+ mod->module_core_rx + mod->core_size_rx,
90331 set_memory_rw);
90332 }
90333- if ((mod->module_init) && (mod->init_text_size)) {
90334- set_page_attributes(mod->module_init,
90335- mod->module_init + mod->init_text_size,
90336+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90337+ set_page_attributes(mod->module_init_rx,
90338+ mod->module_init_rx + mod->init_size_rx,
90339 set_memory_rw);
90340 }
90341 }
90342@@ -1785,14 +1804,14 @@ void set_all_modules_text_ro(void)
90343 list_for_each_entry_rcu(mod, &modules, list) {
90344 if (mod->state == MODULE_STATE_UNFORMED)
90345 continue;
90346- if ((mod->module_core) && (mod->core_text_size)) {
90347- set_page_attributes(mod->module_core,
90348- mod->module_core + mod->core_text_size,
90349+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90350+ set_page_attributes(mod->module_core_rx,
90351+ mod->module_core_rx + mod->core_size_rx,
90352 set_memory_ro);
90353 }
90354- if ((mod->module_init) && (mod->init_text_size)) {
90355- set_page_attributes(mod->module_init,
90356- mod->module_init + mod->init_text_size,
90357+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90358+ set_page_attributes(mod->module_init_rx,
90359+ mod->module_init_rx + mod->init_size_rx,
90360 set_memory_ro);
90361 }
90362 }
90363@@ -1801,7 +1820,15 @@ void set_all_modules_text_ro(void)
90364 #else
90365 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
90366 static void unset_module_core_ro_nx(struct module *mod) { }
90367-static void unset_module_init_ro_nx(struct module *mod) { }
90368+static void unset_module_init_ro_nx(struct module *mod)
90369+{
90370+
90371+#ifdef CONFIG_PAX_KERNEXEC
90372+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90373+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90374+#endif
90375+
90376+}
90377 #endif
90378
90379 void __weak module_memfree(void *module_region)
90380@@ -1855,16 +1882,19 @@ static void free_module(struct module *mod)
90381 /* This may be NULL, but that's OK */
90382 unset_module_init_ro_nx(mod);
90383 module_arch_freeing_init(mod);
90384- module_memfree(mod->module_init);
90385+ module_memfree(mod->module_init_rw);
90386+ module_memfree_exec(mod->module_init_rx);
90387 kfree(mod->args);
90388 percpu_modfree(mod);
90389
90390 /* Free lock-classes: */
90391- lockdep_free_key_range(mod->module_core, mod->core_size);
90392+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
90393+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
90394
90395 /* Finally, free the core (containing the module structure) */
90396 unset_module_core_ro_nx(mod);
90397- module_memfree(mod->module_core);
90398+ module_memfree_exec(mod->module_core_rx);
90399+ module_memfree(mod->module_core_rw);
90400
90401 #ifdef CONFIG_MPU
90402 update_protections(current->mm);
90403@@ -1933,9 +1963,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90404 int ret = 0;
90405 const struct kernel_symbol *ksym;
90406
90407+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90408+ int is_fs_load = 0;
90409+ int register_filesystem_found = 0;
90410+ char *p;
90411+
90412+ p = strstr(mod->args, "grsec_modharden_fs");
90413+ if (p) {
90414+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
90415+ /* copy \0 as well */
90416+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
90417+ is_fs_load = 1;
90418+ }
90419+#endif
90420+
90421 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
90422 const char *name = info->strtab + sym[i].st_name;
90423
90424+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90425+ /* it's a real shame this will never get ripped and copied
90426+ upstream! ;(
90427+ */
90428+ if (is_fs_load && !strcmp(name, "register_filesystem"))
90429+ register_filesystem_found = 1;
90430+#endif
90431+
90432 switch (sym[i].st_shndx) {
90433 case SHN_COMMON:
90434 /* Ignore common symbols */
90435@@ -1960,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90436 ksym = resolve_symbol_wait(mod, info, name);
90437 /* Ok if resolved. */
90438 if (ksym && !IS_ERR(ksym)) {
90439+ pax_open_kernel();
90440 sym[i].st_value = ksym->value;
90441+ pax_close_kernel();
90442 break;
90443 }
90444
90445@@ -1979,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90446 secbase = (unsigned long)mod_percpu(mod);
90447 else
90448 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
90449+ pax_open_kernel();
90450 sym[i].st_value += secbase;
90451+ pax_close_kernel();
90452 break;
90453 }
90454 }
90455
90456+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90457+ if (is_fs_load && !register_filesystem_found) {
90458+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
90459+ ret = -EPERM;
90460+ }
90461+#endif
90462+
90463 return ret;
90464 }
90465
90466@@ -2067,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
90467 || s->sh_entsize != ~0UL
90468 || strstarts(sname, ".init"))
90469 continue;
90470- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
90471+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90472+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
90473+ else
90474+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
90475 pr_debug("\t%s\n", sname);
90476 }
90477- switch (m) {
90478- case 0: /* executable */
90479- mod->core_size = debug_align(mod->core_size);
90480- mod->core_text_size = mod->core_size;
90481- break;
90482- case 1: /* RO: text and ro-data */
90483- mod->core_size = debug_align(mod->core_size);
90484- mod->core_ro_size = mod->core_size;
90485- break;
90486- case 3: /* whole core */
90487- mod->core_size = debug_align(mod->core_size);
90488- break;
90489- }
90490 }
90491
90492 pr_debug("Init section allocation order:\n");
90493@@ -2096,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
90494 || s->sh_entsize != ~0UL
90495 || !strstarts(sname, ".init"))
90496 continue;
90497- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
90498- | INIT_OFFSET_MASK);
90499+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90500+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
90501+ else
90502+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
90503+ s->sh_entsize |= INIT_OFFSET_MASK;
90504 pr_debug("\t%s\n", sname);
90505 }
90506- switch (m) {
90507- case 0: /* executable */
90508- mod->init_size = debug_align(mod->init_size);
90509- mod->init_text_size = mod->init_size;
90510- break;
90511- case 1: /* RO: text and ro-data */
90512- mod->init_size = debug_align(mod->init_size);
90513- mod->init_ro_size = mod->init_size;
90514- break;
90515- case 3: /* whole init */
90516- mod->init_size = debug_align(mod->init_size);
90517- break;
90518- }
90519 }
90520 }
90521
90522@@ -2285,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90523
90524 /* Put symbol section at end of init part of module. */
90525 symsect->sh_flags |= SHF_ALLOC;
90526- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
90527+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
90528 info->index.sym) | INIT_OFFSET_MASK;
90529 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
90530
90531@@ -2302,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90532 }
90533
90534 /* Append room for core symbols at end of core part. */
90535- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
90536- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
90537- mod->core_size += strtab_size;
90538+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
90539+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
90540+ mod->core_size_rx += strtab_size;
90541
90542 /* Put string table section at end of init part of module. */
90543 strsect->sh_flags |= SHF_ALLOC;
90544- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
90545+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
90546 info->index.str) | INIT_OFFSET_MASK;
90547 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
90548 }
90549@@ -2326,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90550 /* Make sure we get permanent strtab: don't use info->strtab. */
90551 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
90552
90553+ pax_open_kernel();
90554+
90555 /* Set types up while we still have access to sections. */
90556 for (i = 0; i < mod->num_symtab; i++)
90557 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
90558
90559- mod->core_symtab = dst = mod->module_core + info->symoffs;
90560- mod->core_strtab = s = mod->module_core + info->stroffs;
90561+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
90562+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
90563 src = mod->symtab;
90564 for (ndst = i = 0; i < mod->num_symtab; i++) {
90565 if (i == 0 ||
90566@@ -2343,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90567 }
90568 }
90569 mod->core_num_syms = ndst;
90570+
90571+ pax_close_kernel();
90572 }
90573 #else
90574 static inline void layout_symtab(struct module *mod, struct load_info *info)
90575@@ -2376,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
90576 return vmalloc_exec(size);
90577 }
90578
90579-static void *module_alloc_update_bounds(unsigned long size)
90580+static void *module_alloc_update_bounds_rw(unsigned long size)
90581 {
90582 void *ret = module_alloc(size);
90583
90584 if (ret) {
90585 mutex_lock(&module_mutex);
90586 /* Update module bounds. */
90587- if ((unsigned long)ret < module_addr_min)
90588- module_addr_min = (unsigned long)ret;
90589- if ((unsigned long)ret + size > module_addr_max)
90590- module_addr_max = (unsigned long)ret + size;
90591+ if ((unsigned long)ret < module_addr_min_rw)
90592+ module_addr_min_rw = (unsigned long)ret;
90593+ if ((unsigned long)ret + size > module_addr_max_rw)
90594+ module_addr_max_rw = (unsigned long)ret + size;
90595+ mutex_unlock(&module_mutex);
90596+ }
90597+ return ret;
90598+}
90599+
90600+static void *module_alloc_update_bounds_rx(unsigned long size)
90601+{
90602+ void *ret = module_alloc_exec(size);
90603+
90604+ if (ret) {
90605+ mutex_lock(&module_mutex);
90606+ /* Update module bounds. */
90607+ if ((unsigned long)ret < module_addr_min_rx)
90608+ module_addr_min_rx = (unsigned long)ret;
90609+ if ((unsigned long)ret + size > module_addr_max_rx)
90610+ module_addr_max_rx = (unsigned long)ret + size;
90611 mutex_unlock(&module_mutex);
90612 }
90613 return ret;
90614@@ -2640,7 +2703,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90615 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
90616
90617 if (info->index.sym == 0) {
90618+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90619+ /*
90620+ * avoid potentially printing jibberish on attempted load
90621+ * of a module randomized with a different seed
90622+ */
90623+ pr_warn("module has no symbols (stripped?)\n");
90624+#else
90625 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
90626+#endif
90627 return ERR_PTR(-ENOEXEC);
90628 }
90629
90630@@ -2656,8 +2727,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90631 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90632 {
90633 const char *modmagic = get_modinfo(info, "vermagic");
90634+ const char *license = get_modinfo(info, "license");
90635 int err;
90636
90637+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
90638+ if (!license || !license_is_gpl_compatible(license))
90639+ return -ENOEXEC;
90640+#endif
90641+
90642 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
90643 modmagic = NULL;
90644
90645@@ -2682,7 +2759,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90646 }
90647
90648 /* Set up license info based on the info section */
90649- set_license(mod, get_modinfo(info, "license"));
90650+ set_license(mod, license);
90651
90652 return 0;
90653 }
90654@@ -2776,7 +2853,7 @@ static int move_module(struct module *mod, struct load_info *info)
90655 void *ptr;
90656
90657 /* Do the allocs. */
90658- ptr = module_alloc_update_bounds(mod->core_size);
90659+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
90660 /*
90661 * The pointer to this block is stored in the module structure
90662 * which is inside the block. Just mark it as not being a
90663@@ -2786,11 +2863,11 @@ static int move_module(struct module *mod, struct load_info *info)
90664 if (!ptr)
90665 return -ENOMEM;
90666
90667- memset(ptr, 0, mod->core_size);
90668- mod->module_core = ptr;
90669+ memset(ptr, 0, mod->core_size_rw);
90670+ mod->module_core_rw = ptr;
90671
90672- if (mod->init_size) {
90673- ptr = module_alloc_update_bounds(mod->init_size);
90674+ if (mod->init_size_rw) {
90675+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
90676 /*
90677 * The pointer to this block is stored in the module structure
90678 * which is inside the block. This block doesn't need to be
90679@@ -2799,13 +2876,45 @@ static int move_module(struct module *mod, struct load_info *info)
90680 */
90681 kmemleak_ignore(ptr);
90682 if (!ptr) {
90683- module_memfree(mod->module_core);
90684+ module_memfree(mod->module_core_rw);
90685 return -ENOMEM;
90686 }
90687- memset(ptr, 0, mod->init_size);
90688- mod->module_init = ptr;
90689+ memset(ptr, 0, mod->init_size_rw);
90690+ mod->module_init_rw = ptr;
90691 } else
90692- mod->module_init = NULL;
90693+ mod->module_init_rw = NULL;
90694+
90695+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
90696+ kmemleak_not_leak(ptr);
90697+ if (!ptr) {
90698+ if (mod->module_init_rw)
90699+ module_memfree(mod->module_init_rw);
90700+ module_memfree(mod->module_core_rw);
90701+ return -ENOMEM;
90702+ }
90703+
90704+ pax_open_kernel();
90705+ memset(ptr, 0, mod->core_size_rx);
90706+ pax_close_kernel();
90707+ mod->module_core_rx = ptr;
90708+
90709+ if (mod->init_size_rx) {
90710+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
90711+ kmemleak_ignore(ptr);
90712+ if (!ptr && mod->init_size_rx) {
90713+ module_memfree_exec(mod->module_core_rx);
90714+ if (mod->module_init_rw)
90715+ module_memfree(mod->module_init_rw);
90716+ module_memfree(mod->module_core_rw);
90717+ return -ENOMEM;
90718+ }
90719+
90720+ pax_open_kernel();
90721+ memset(ptr, 0, mod->init_size_rx);
90722+ pax_close_kernel();
90723+ mod->module_init_rx = ptr;
90724+ } else
90725+ mod->module_init_rx = NULL;
90726
90727 /* Transfer each section which specifies SHF_ALLOC */
90728 pr_debug("final section addresses:\n");
90729@@ -2816,16 +2925,45 @@ static int move_module(struct module *mod, struct load_info *info)
90730 if (!(shdr->sh_flags & SHF_ALLOC))
90731 continue;
90732
90733- if (shdr->sh_entsize & INIT_OFFSET_MASK)
90734- dest = mod->module_init
90735- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90736- else
90737- dest = mod->module_core + shdr->sh_entsize;
90738+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
90739+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90740+ dest = mod->module_init_rw
90741+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90742+ else
90743+ dest = mod->module_init_rx
90744+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90745+ } else {
90746+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90747+ dest = mod->module_core_rw + shdr->sh_entsize;
90748+ else
90749+ dest = mod->module_core_rx + shdr->sh_entsize;
90750+ }
90751+
90752+ if (shdr->sh_type != SHT_NOBITS) {
90753+
90754+#ifdef CONFIG_PAX_KERNEXEC
90755+#ifdef CONFIG_X86_64
90756+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
90757+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
90758+#endif
90759+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
90760+ pax_open_kernel();
90761+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90762+ pax_close_kernel();
90763+ } else
90764+#endif
90765
90766- if (shdr->sh_type != SHT_NOBITS)
90767 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90768+ }
90769 /* Update sh_addr to point to copy in image. */
90770- shdr->sh_addr = (unsigned long)dest;
90771+
90772+#ifdef CONFIG_PAX_KERNEXEC
90773+ if (shdr->sh_flags & SHF_EXECINSTR)
90774+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
90775+ else
90776+#endif
90777+
90778+ shdr->sh_addr = (unsigned long)dest;
90779 pr_debug("\t0x%lx %s\n",
90780 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
90781 }
90782@@ -2882,12 +3020,12 @@ static void flush_module_icache(const struct module *mod)
90783 * Do it before processing of module parameters, so the module
90784 * can provide parameter accessor functions of its own.
90785 */
90786- if (mod->module_init)
90787- flush_icache_range((unsigned long)mod->module_init,
90788- (unsigned long)mod->module_init
90789- + mod->init_size);
90790- flush_icache_range((unsigned long)mod->module_core,
90791- (unsigned long)mod->module_core + mod->core_size);
90792+ if (mod->module_init_rx)
90793+ flush_icache_range((unsigned long)mod->module_init_rx,
90794+ (unsigned long)mod->module_init_rx
90795+ + mod->init_size_rx);
90796+ flush_icache_range((unsigned long)mod->module_core_rx,
90797+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
90798
90799 set_fs(old_fs);
90800 }
90801@@ -2945,8 +3083,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
90802 {
90803 percpu_modfree(mod);
90804 module_arch_freeing_init(mod);
90805- module_memfree(mod->module_init);
90806- module_memfree(mod->module_core);
90807+ module_memfree_exec(mod->module_init_rx);
90808+ module_memfree_exec(mod->module_core_rx);
90809+ module_memfree(mod->module_init_rw);
90810+ module_memfree(mod->module_core_rw);
90811 }
90812
90813 int __weak module_finalize(const Elf_Ehdr *hdr,
90814@@ -2959,7 +3099,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
90815 static int post_relocation(struct module *mod, const struct load_info *info)
90816 {
90817 /* Sort exception table now relocations are done. */
90818+ pax_open_kernel();
90819 sort_extable(mod->extable, mod->extable + mod->num_exentries);
90820+ pax_close_kernel();
90821
90822 /* Copy relocated percpu area over. */
90823 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
90824@@ -3001,13 +3143,15 @@ static void do_mod_ctors(struct module *mod)
90825 /* For freeing module_init on success, in case kallsyms traversing */
90826 struct mod_initfree {
90827 struct rcu_head rcu;
90828- void *module_init;
90829+ void *module_init_rw;
90830+ void *module_init_rx;
90831 };
90832
90833 static void do_free_init(struct rcu_head *head)
90834 {
90835 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
90836- module_memfree(m->module_init);
90837+ module_memfree(m->module_init_rw);
90838+ module_memfree_exec(m->module_init_rx);
90839 kfree(m);
90840 }
90841
90842@@ -3022,7 +3166,8 @@ static int do_init_module(struct module *mod)
90843 ret = -ENOMEM;
90844 goto fail;
90845 }
90846- freeinit->module_init = mod->module_init;
90847+ freeinit->module_init_rw = mod->module_init_rw;
90848+ freeinit->module_init_rx = mod->module_init_rx;
90849
90850 /*
90851 * We want to find out whether @mod uses async during init. Clear
90852@@ -3081,10 +3226,10 @@ static int do_init_module(struct module *mod)
90853 #endif
90854 unset_module_init_ro_nx(mod);
90855 module_arch_freeing_init(mod);
90856- mod->module_init = NULL;
90857- mod->init_size = 0;
90858- mod->init_ro_size = 0;
90859- mod->init_text_size = 0;
90860+ mod->module_init_rw = NULL;
90861+ mod->module_init_rx = NULL;
90862+ mod->init_size_rw = 0;
90863+ mod->init_size_rx = 0;
90864 /*
90865 * We want to free module_init, but be aware that kallsyms may be
90866 * walking this with preempt disabled. In all the failure paths,
90867@@ -3198,16 +3343,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
90868 module_bug_finalize(info->hdr, info->sechdrs, mod);
90869
90870 /* Set RO and NX regions for core */
90871- set_section_ro_nx(mod->module_core,
90872- mod->core_text_size,
90873- mod->core_ro_size,
90874- mod->core_size);
90875+ set_section_ro_nx(mod->module_core_rx,
90876+ mod->core_size_rx,
90877+ mod->core_size_rx,
90878+ mod->core_size_rx);
90879
90880 /* Set RO and NX regions for init */
90881- set_section_ro_nx(mod->module_init,
90882- mod->init_text_size,
90883- mod->init_ro_size,
90884- mod->init_size);
90885+ set_section_ro_nx(mod->module_init_rx,
90886+ mod->init_size_rx,
90887+ mod->init_size_rx,
90888+ mod->init_size_rx);
90889
90890 /* Mark state as coming so strong_try_module_get() ignores us,
90891 * but kallsyms etc. can see us. */
90892@@ -3291,9 +3436,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
90893 if (err)
90894 goto free_unload;
90895
90896+ /* Now copy in args */
90897+ mod->args = strndup_user(uargs, ~0UL >> 1);
90898+ if (IS_ERR(mod->args)) {
90899+ err = PTR_ERR(mod->args);
90900+ goto free_unload;
90901+ }
90902+
90903 /* Set up MODINFO_ATTR fields */
90904 setup_modinfo(mod, info);
90905
90906+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90907+ {
90908+ char *p, *p2;
90909+
90910+ if (strstr(mod->args, "grsec_modharden_netdev")) {
90911+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
90912+ err = -EPERM;
90913+ goto free_modinfo;
90914+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
90915+ p += sizeof("grsec_modharden_normal") - 1;
90916+ p2 = strstr(p, "_");
90917+ if (p2) {
90918+ *p2 = '\0';
90919+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
90920+ *p2 = '_';
90921+ }
90922+ err = -EPERM;
90923+ goto free_modinfo;
90924+ }
90925+ }
90926+#endif
90927+
90928 /* Fix up syms, so that st_value is a pointer to location. */
90929 err = simplify_symbols(mod, info);
90930 if (err < 0)
90931@@ -3309,13 +3483,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
90932
90933 flush_module_icache(mod);
90934
90935- /* Now copy in args */
90936- mod->args = strndup_user(uargs, ~0UL >> 1);
90937- if (IS_ERR(mod->args)) {
90938- err = PTR_ERR(mod->args);
90939- goto free_arch_cleanup;
90940- }
90941-
90942 dynamic_debug_setup(info->debug, info->num_debug);
90943
90944 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
90945@@ -3363,11 +3530,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
90946 ddebug_cleanup:
90947 dynamic_debug_remove(info->debug);
90948 synchronize_sched();
90949- kfree(mod->args);
90950- free_arch_cleanup:
90951 module_arch_cleanup(mod);
90952 free_modinfo:
90953 free_modinfo(mod);
90954+ kfree(mod->args);
90955 free_unload:
90956 module_unload_free(mod);
90957 unlink_mod:
90958@@ -3454,10 +3620,16 @@ static const char *get_ksymbol(struct module *mod,
90959 unsigned long nextval;
90960
90961 /* At worse, next value is at end of module */
90962- if (within_module_init(addr, mod))
90963- nextval = (unsigned long)mod->module_init+mod->init_text_size;
90964+ if (within_module_init_rx(addr, mod))
90965+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
90966+ else if (within_module_init_rw(addr, mod))
90967+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
90968+ else if (within_module_core_rx(addr, mod))
90969+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
90970+ else if (within_module_core_rw(addr, mod))
90971+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
90972 else
90973- nextval = (unsigned long)mod->module_core+mod->core_text_size;
90974+ return NULL;
90975
90976 /* Scan for closest preceding symbol, and next symbol. (ELF
90977 starts real symbols at 1). */
90978@@ -3705,7 +3877,7 @@ static int m_show(struct seq_file *m, void *p)
90979 return 0;
90980
90981 seq_printf(m, "%s %u",
90982- mod->name, mod->init_size + mod->core_size);
90983+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
90984 print_unload_info(m, mod);
90985
90986 /* Informative for users. */
90987@@ -3714,7 +3886,7 @@ static int m_show(struct seq_file *m, void *p)
90988 mod->state == MODULE_STATE_COMING ? "Loading" :
90989 "Live");
90990 /* Used by oprofile and other similar tools. */
90991- seq_printf(m, " 0x%pK", mod->module_core);
90992+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
90993
90994 /* Taints info */
90995 if (mod->taints)
90996@@ -3750,7 +3922,17 @@ static const struct file_operations proc_modules_operations = {
90997
90998 static int __init proc_modules_init(void)
90999 {
91000+#ifndef CONFIG_GRKERNSEC_HIDESYM
91001+#ifdef CONFIG_GRKERNSEC_PROC_USER
91002+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91003+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91004+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91005+#else
91006 proc_create("modules", 0, NULL, &proc_modules_operations);
91007+#endif
91008+#else
91009+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91010+#endif
91011 return 0;
91012 }
91013 module_init(proc_modules_init);
91014@@ -3811,7 +3993,8 @@ struct module *__module_address(unsigned long addr)
91015 {
91016 struct module *mod;
91017
91018- if (addr < module_addr_min || addr > module_addr_max)
91019+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91020+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91021 return NULL;
91022
91023 list_for_each_entry_rcu(mod, &modules, list) {
91024@@ -3852,11 +4035,20 @@ bool is_module_text_address(unsigned long addr)
91025 */
91026 struct module *__module_text_address(unsigned long addr)
91027 {
91028- struct module *mod = __module_address(addr);
91029+ struct module *mod;
91030+
91031+#ifdef CONFIG_X86_32
91032+ addr = ktla_ktva(addr);
91033+#endif
91034+
91035+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91036+ return NULL;
91037+
91038+ mod = __module_address(addr);
91039+
91040 if (mod) {
91041 /* Make sure it's within the text section. */
91042- if (!within(addr, mod->module_init, mod->init_text_size)
91043- && !within(addr, mod->module_core, mod->core_text_size))
91044+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91045 mod = NULL;
91046 }
91047 return mod;
91048diff --git a/kernel/notifier.c b/kernel/notifier.c
91049index 4803da6..1c5eea6 100644
91050--- a/kernel/notifier.c
91051+++ b/kernel/notifier.c
91052@@ -5,6 +5,7 @@
91053 #include <linux/rcupdate.h>
91054 #include <linux/vmalloc.h>
91055 #include <linux/reboot.h>
91056+#include <linux/mm.h>
91057
91058 /*
91059 * Notifier list for kernel code which wants to be called
91060@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91061 while ((*nl) != NULL) {
91062 if (n->priority > (*nl)->priority)
91063 break;
91064- nl = &((*nl)->next);
91065+ nl = (struct notifier_block **)&((*nl)->next);
91066 }
91067- n->next = *nl;
91068+ pax_open_kernel();
91069+ *(const void **)&n->next = *nl;
91070 rcu_assign_pointer(*nl, n);
91071+ pax_close_kernel();
91072 return 0;
91073 }
91074
91075@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91076 return 0;
91077 if (n->priority > (*nl)->priority)
91078 break;
91079- nl = &((*nl)->next);
91080+ nl = (struct notifier_block **)&((*nl)->next);
91081 }
91082- n->next = *nl;
91083+ pax_open_kernel();
91084+ *(const void **)&n->next = *nl;
91085 rcu_assign_pointer(*nl, n);
91086+ pax_close_kernel();
91087 return 0;
91088 }
91089
91090@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91091 {
91092 while ((*nl) != NULL) {
91093 if ((*nl) == n) {
91094+ pax_open_kernel();
91095 rcu_assign_pointer(*nl, n->next);
91096+ pax_close_kernel();
91097 return 0;
91098 }
91099- nl = &((*nl)->next);
91100+ nl = (struct notifier_block **)&((*nl)->next);
91101 }
91102 return -ENOENT;
91103 }
91104diff --git a/kernel/padata.c b/kernel/padata.c
91105index 161402f..598814c 100644
91106--- a/kernel/padata.c
91107+++ b/kernel/padata.c
91108@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91109 * seq_nr mod. number of cpus in use.
91110 */
91111
91112- seq_nr = atomic_inc_return(&pd->seq_nr);
91113+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91114 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91115
91116 return padata_index_to_cpu(pd, cpu_index);
91117@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91118 padata_init_pqueues(pd);
91119 padata_init_squeues(pd);
91120 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91121- atomic_set(&pd->seq_nr, -1);
91122+ atomic_set_unchecked(&pd->seq_nr, -1);
91123 atomic_set(&pd->reorder_objects, 0);
91124 atomic_set(&pd->refcnt, 0);
91125 pd->pinst = pinst;
91126diff --git a/kernel/panic.c b/kernel/panic.c
91127index 4d8d6f9..97b9b9c 100644
91128--- a/kernel/panic.c
91129+++ b/kernel/panic.c
91130@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
91131 /*
91132 * Stop ourself in panic -- architecture code may override this
91133 */
91134-void __weak panic_smp_self_stop(void)
91135+void __weak __noreturn panic_smp_self_stop(void)
91136 {
91137 while (1)
91138 cpu_relax();
91139@@ -423,7 +423,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91140 disable_trace_on_warning();
91141
91142 pr_warn("------------[ cut here ]------------\n");
91143- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91144+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91145 raw_smp_processor_id(), current->pid, file, line, caller);
91146
91147 if (args)
91148@@ -488,7 +488,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91149 */
91150 __visible void __stack_chk_fail(void)
91151 {
91152- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91153+ dump_stack();
91154+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91155 __builtin_return_address(0));
91156 }
91157 EXPORT_SYMBOL(__stack_chk_fail);
91158diff --git a/kernel/pid.c b/kernel/pid.c
91159index cd36a5e..11f185d 100644
91160--- a/kernel/pid.c
91161+++ b/kernel/pid.c
91162@@ -33,6 +33,7 @@
91163 #include <linux/rculist.h>
91164 #include <linux/bootmem.h>
91165 #include <linux/hash.h>
91166+#include <linux/security.h>
91167 #include <linux/pid_namespace.h>
91168 #include <linux/init_task.h>
91169 #include <linux/syscalls.h>
91170@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91171
91172 int pid_max = PID_MAX_DEFAULT;
91173
91174-#define RESERVED_PIDS 300
91175+#define RESERVED_PIDS 500
91176
91177 int pid_max_min = RESERVED_PIDS + 1;
91178 int pid_max_max = PID_MAX_LIMIT;
91179@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
91180 */
91181 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91182 {
91183+ struct task_struct *task;
91184+
91185 rcu_lockdep_assert(rcu_read_lock_held(),
91186 "find_task_by_pid_ns() needs rcu_read_lock()"
91187 " protection");
91188- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91189+
91190+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91191+
91192+ if (gr_pid_is_chrooted(task))
91193+ return NULL;
91194+
91195+ return task;
91196 }
91197
91198 struct task_struct *find_task_by_vpid(pid_t vnr)
91199@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91200 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91201 }
91202
91203+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91204+{
91205+ rcu_lockdep_assert(rcu_read_lock_held(),
91206+ "find_task_by_pid_ns() needs rcu_read_lock()"
91207+ " protection");
91208+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91209+}
91210+
91211 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91212 {
91213 struct pid *pid;
91214diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91215index a65ba13..f600dbb 100644
91216--- a/kernel/pid_namespace.c
91217+++ b/kernel/pid_namespace.c
91218@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91219 void __user *buffer, size_t *lenp, loff_t *ppos)
91220 {
91221 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91222- struct ctl_table tmp = *table;
91223+ ctl_table_no_const tmp = *table;
91224
91225 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91226 return -EPERM;
91227diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91228index 48b28d3..c63ccaf 100644
91229--- a/kernel/power/Kconfig
91230+++ b/kernel/power/Kconfig
91231@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91232 config HIBERNATION
91233 bool "Hibernation (aka 'suspend to disk')"
91234 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91235+ depends on !GRKERNSEC_KMEM
91236+ depends on !PAX_MEMORY_SANITIZE
91237 select HIBERNATE_CALLBACKS
91238 select LZO_COMPRESS
91239 select LZO_DECOMPRESS
91240diff --git a/kernel/power/process.c b/kernel/power/process.c
91241index 5a6ec86..3a8c884 100644
91242--- a/kernel/power/process.c
91243+++ b/kernel/power/process.c
91244@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91245 unsigned int elapsed_msecs;
91246 bool wakeup = false;
91247 int sleep_usecs = USEC_PER_MSEC;
91248+ bool timedout = false;
91249
91250 do_gettimeofday(&start);
91251
91252@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91253
91254 while (true) {
91255 todo = 0;
91256+ if (time_after(jiffies, end_time))
91257+ timedout = true;
91258 read_lock(&tasklist_lock);
91259 for_each_process_thread(g, p) {
91260 if (p == current || !freeze_task(p))
91261 continue;
91262
91263- if (!freezer_should_skip(p))
91264+ if (!freezer_should_skip(p)) {
91265 todo++;
91266+ if (timedout) {
91267+ printk(KERN_ERR "Task refusing to freeze:\n");
91268+ sched_show_task(p);
91269+ }
91270+ }
91271 }
91272 read_unlock(&tasklist_lock);
91273
91274@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91275 todo += wq_busy;
91276 }
91277
91278- if (!todo || time_after(jiffies, end_time))
91279+ if (!todo || timedout)
91280 break;
91281
91282 if (pm_wakeup_pending()) {
91283diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
91284index cbd69d8..2ca4a8b 100644
91285--- a/kernel/printk/console_cmdline.h
91286+++ b/kernel/printk/console_cmdline.h
91287@@ -3,7 +3,7 @@
91288
91289 struct console_cmdline
91290 {
91291- char name[8]; /* Name of the driver */
91292+ char name[16]; /* Name of the driver */
91293 int index; /* Minor dev. to use */
91294 char *options; /* Options for the driver */
91295 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
91296diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91297index fae29e3..7df1786 100644
91298--- a/kernel/printk/printk.c
91299+++ b/kernel/printk/printk.c
91300@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
91301 if (from_file && type != SYSLOG_ACTION_OPEN)
91302 return 0;
91303
91304+#ifdef CONFIG_GRKERNSEC_DMESG
91305+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91306+ return -EPERM;
91307+#endif
91308+
91309 if (syslog_action_restricted(type)) {
91310 if (capable(CAP_SYSLOG))
91311 return 0;
91312@@ -2464,6 +2469,7 @@ void register_console(struct console *newcon)
91313 for (i = 0, c = console_cmdline;
91314 i < MAX_CMDLINECONSOLES && c->name[0];
91315 i++, c++) {
91316+ BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
91317 if (strcmp(c->name, newcon->name) != 0)
91318 continue;
91319 if (newcon->index >= 0 &&
91320diff --git a/kernel/profile.c b/kernel/profile.c
91321index 54bf5ba..df6e0a2 100644
91322--- a/kernel/profile.c
91323+++ b/kernel/profile.c
91324@@ -37,7 +37,7 @@ struct profile_hit {
91325 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91326 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91327
91328-static atomic_t *prof_buffer;
91329+static atomic_unchecked_t *prof_buffer;
91330 static unsigned long prof_len, prof_shift;
91331
91332 int prof_on __read_mostly;
91333@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
91334 hits[i].pc = 0;
91335 continue;
91336 }
91337- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91338+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91339 hits[i].hits = hits[i].pc = 0;
91340 }
91341 }
91342@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91343 * Add the current hit(s) and flush the write-queue out
91344 * to the global buffer:
91345 */
91346- atomic_add(nr_hits, &prof_buffer[pc]);
91347+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
91348 for (i = 0; i < NR_PROFILE_HIT; ++i) {
91349- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91350+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91351 hits[i].pc = hits[i].hits = 0;
91352 }
91353 out:
91354@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91355 {
91356 unsigned long pc;
91357 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
91358- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91359+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91360 }
91361 #endif /* !CONFIG_SMP */
91362
91363@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
91364 return -EFAULT;
91365 buf++; p++; count--; read++;
91366 }
91367- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
91368+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
91369 if (copy_to_user(buf, (void *)pnt, count))
91370 return -EFAULT;
91371 read += count;
91372@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
91373 }
91374 #endif
91375 profile_discard_flip_buffers();
91376- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
91377+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
91378 return count;
91379 }
91380
91381diff --git a/kernel/ptrace.c b/kernel/ptrace.c
91382index 1eb9d90..d40d21e 100644
91383--- a/kernel/ptrace.c
91384+++ b/kernel/ptrace.c
91385@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
91386 if (seize)
91387 flags |= PT_SEIZED;
91388 rcu_read_lock();
91389- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91390+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91391 flags |= PT_PTRACE_CAP;
91392 rcu_read_unlock();
91393 task->ptrace = flags;
91394@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
91395 break;
91396 return -EIO;
91397 }
91398- if (copy_to_user(dst, buf, retval))
91399+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
91400 return -EFAULT;
91401 copied += retval;
91402 src += retval;
91403@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
91404 bool seized = child->ptrace & PT_SEIZED;
91405 int ret = -EIO;
91406 siginfo_t siginfo, *si;
91407- void __user *datavp = (void __user *) data;
91408+ void __user *datavp = (__force void __user *) data;
91409 unsigned long __user *datalp = datavp;
91410 unsigned long flags;
91411
91412@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
91413 goto out;
91414 }
91415
91416+ if (gr_handle_ptrace(child, request)) {
91417+ ret = -EPERM;
91418+ goto out_put_task_struct;
91419+ }
91420+
91421 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91422 ret = ptrace_attach(child, request, addr, data);
91423 /*
91424 * Some architectures need to do book-keeping after
91425 * a ptrace attach.
91426 */
91427- if (!ret)
91428+ if (!ret) {
91429 arch_ptrace_attach(child);
91430+ gr_audit_ptrace(child);
91431+ }
91432 goto out_put_task_struct;
91433 }
91434
91435@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
91436 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
91437 if (copied != sizeof(tmp))
91438 return -EIO;
91439- return put_user(tmp, (unsigned long __user *)data);
91440+ return put_user(tmp, (__force unsigned long __user *)data);
91441 }
91442
91443 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
91444@@ -1158,7 +1165,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
91445 }
91446
91447 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91448- compat_long_t, addr, compat_long_t, data)
91449+ compat_ulong_t, addr, compat_ulong_t, data)
91450 {
91451 struct task_struct *child;
91452 long ret;
91453@@ -1174,14 +1181,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91454 goto out;
91455 }
91456
91457+ if (gr_handle_ptrace(child, request)) {
91458+ ret = -EPERM;
91459+ goto out_put_task_struct;
91460+ }
91461+
91462 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91463 ret = ptrace_attach(child, request, addr, data);
91464 /*
91465 * Some architectures need to do book-keeping after
91466 * a ptrace attach.
91467 */
91468- if (!ret)
91469+ if (!ret) {
91470 arch_ptrace_attach(child);
91471+ gr_audit_ptrace(child);
91472+ }
91473 goto out_put_task_struct;
91474 }
91475
91476diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
91477index 4d559ba..053da37 100644
91478--- a/kernel/rcu/rcutorture.c
91479+++ b/kernel/rcu/rcutorture.c
91480@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91481 rcu_torture_count) = { 0 };
91482 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91483 rcu_torture_batch) = { 0 };
91484-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91485-static atomic_t n_rcu_torture_alloc;
91486-static atomic_t n_rcu_torture_alloc_fail;
91487-static atomic_t n_rcu_torture_free;
91488-static atomic_t n_rcu_torture_mberror;
91489-static atomic_t n_rcu_torture_error;
91490+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91491+static atomic_unchecked_t n_rcu_torture_alloc;
91492+static atomic_unchecked_t n_rcu_torture_alloc_fail;
91493+static atomic_unchecked_t n_rcu_torture_free;
91494+static atomic_unchecked_t n_rcu_torture_mberror;
91495+static atomic_unchecked_t n_rcu_torture_error;
91496 static long n_rcu_torture_barrier_error;
91497 static long n_rcu_torture_boost_ktrerror;
91498 static long n_rcu_torture_boost_rterror;
91499@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
91500 static long n_rcu_torture_timers;
91501 static long n_barrier_attempts;
91502 static long n_barrier_successes;
91503-static atomic_long_t n_cbfloods;
91504+static atomic_long_unchecked_t n_cbfloods;
91505 static struct list_head rcu_torture_removed;
91506
91507 static int rcu_torture_writer_state;
91508@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
91509
91510 spin_lock_bh(&rcu_torture_lock);
91511 if (list_empty(&rcu_torture_freelist)) {
91512- atomic_inc(&n_rcu_torture_alloc_fail);
91513+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
91514 spin_unlock_bh(&rcu_torture_lock);
91515 return NULL;
91516 }
91517- atomic_inc(&n_rcu_torture_alloc);
91518+ atomic_inc_unchecked(&n_rcu_torture_alloc);
91519 p = rcu_torture_freelist.next;
91520 list_del_init(p);
91521 spin_unlock_bh(&rcu_torture_lock);
91522@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
91523 static void
91524 rcu_torture_free(struct rcu_torture *p)
91525 {
91526- atomic_inc(&n_rcu_torture_free);
91527+ atomic_inc_unchecked(&n_rcu_torture_free);
91528 spin_lock_bh(&rcu_torture_lock);
91529 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
91530 spin_unlock_bh(&rcu_torture_lock);
91531@@ -312,7 +312,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
91532 i = rp->rtort_pipe_count;
91533 if (i > RCU_TORTURE_PIPE_LEN)
91534 i = RCU_TORTURE_PIPE_LEN;
91535- atomic_inc(&rcu_torture_wcount[i]);
91536+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91537 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
91538 rp->rtort_mbtest = 0;
91539 return true;
91540@@ -799,7 +799,7 @@ rcu_torture_cbflood(void *arg)
91541 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
91542 do {
91543 schedule_timeout_interruptible(cbflood_inter_holdoff);
91544- atomic_long_inc(&n_cbfloods);
91545+ atomic_long_inc_unchecked(&n_cbfloods);
91546 WARN_ON(signal_pending(current));
91547 for (i = 0; i < cbflood_n_burst; i++) {
91548 for (j = 0; j < cbflood_n_per_burst; j++) {
91549@@ -918,7 +918,7 @@ rcu_torture_writer(void *arg)
91550 i = old_rp->rtort_pipe_count;
91551 if (i > RCU_TORTURE_PIPE_LEN)
91552 i = RCU_TORTURE_PIPE_LEN;
91553- atomic_inc(&rcu_torture_wcount[i]);
91554+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91555 old_rp->rtort_pipe_count++;
91556 switch (synctype[torture_random(&rand) % nsynctypes]) {
91557 case RTWS_DEF_FREE:
91558@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
91559 return;
91560 }
91561 if (p->rtort_mbtest == 0)
91562- atomic_inc(&n_rcu_torture_mberror);
91563+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91564 spin_lock(&rand_lock);
91565 cur_ops->read_delay(&rand);
91566 n_rcu_torture_timers++;
91567@@ -1106,7 +1106,7 @@ rcu_torture_reader(void *arg)
91568 continue;
91569 }
91570 if (p->rtort_mbtest == 0)
91571- atomic_inc(&n_rcu_torture_mberror);
91572+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91573 cur_ops->read_delay(&rand);
91574 preempt_disable();
91575 pipe_count = p->rtort_pipe_count;
91576@@ -1173,11 +1173,11 @@ rcu_torture_stats_print(void)
91577 rcu_torture_current,
91578 rcu_torture_current_version,
91579 list_empty(&rcu_torture_freelist),
91580- atomic_read(&n_rcu_torture_alloc),
91581- atomic_read(&n_rcu_torture_alloc_fail),
91582- atomic_read(&n_rcu_torture_free));
91583+ atomic_read_unchecked(&n_rcu_torture_alloc),
91584+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
91585+ atomic_read_unchecked(&n_rcu_torture_free));
91586 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
91587- atomic_read(&n_rcu_torture_mberror),
91588+ atomic_read_unchecked(&n_rcu_torture_mberror),
91589 n_rcu_torture_boost_ktrerror,
91590 n_rcu_torture_boost_rterror);
91591 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
91592@@ -1189,17 +1189,17 @@ rcu_torture_stats_print(void)
91593 n_barrier_successes,
91594 n_barrier_attempts,
91595 n_rcu_torture_barrier_error);
91596- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
91597+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
91598
91599 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91600- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
91601+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
91602 n_rcu_torture_barrier_error != 0 ||
91603 n_rcu_torture_boost_ktrerror != 0 ||
91604 n_rcu_torture_boost_rterror != 0 ||
91605 n_rcu_torture_boost_failure != 0 ||
91606 i > 1) {
91607 pr_cont("%s", "!!! ");
91608- atomic_inc(&n_rcu_torture_error);
91609+ atomic_inc_unchecked(&n_rcu_torture_error);
91610 WARN_ON_ONCE(1);
91611 }
91612 pr_cont("Reader Pipe: ");
91613@@ -1216,7 +1216,7 @@ rcu_torture_stats_print(void)
91614 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91615 pr_cont("Free-Block Circulation: ");
91616 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91617- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
91618+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
91619 }
91620 pr_cont("\n");
91621
91622@@ -1560,7 +1560,7 @@ rcu_torture_cleanup(void)
91623
91624 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
91625
91626- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91627+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91628 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
91629 else if (torture_onoff_failures())
91630 rcu_torture_print_module_parms(cur_ops,
91631@@ -1685,18 +1685,18 @@ rcu_torture_init(void)
91632
91633 rcu_torture_current = NULL;
91634 rcu_torture_current_version = 0;
91635- atomic_set(&n_rcu_torture_alloc, 0);
91636- atomic_set(&n_rcu_torture_alloc_fail, 0);
91637- atomic_set(&n_rcu_torture_free, 0);
91638- atomic_set(&n_rcu_torture_mberror, 0);
91639- atomic_set(&n_rcu_torture_error, 0);
91640+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
91641+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
91642+ atomic_set_unchecked(&n_rcu_torture_free, 0);
91643+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
91644+ atomic_set_unchecked(&n_rcu_torture_error, 0);
91645 n_rcu_torture_barrier_error = 0;
91646 n_rcu_torture_boost_ktrerror = 0;
91647 n_rcu_torture_boost_rterror = 0;
91648 n_rcu_torture_boost_failure = 0;
91649 n_rcu_torture_boosts = 0;
91650 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
91651- atomic_set(&rcu_torture_wcount[i], 0);
91652+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
91653 for_each_possible_cpu(cpu) {
91654 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91655 per_cpu(rcu_torture_count, cpu)[i] = 0;
91656diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
91657index 0db5649..e6ec167 100644
91658--- a/kernel/rcu/tiny.c
91659+++ b/kernel/rcu/tiny.c
91660@@ -42,7 +42,7 @@
91661 /* Forward declarations for tiny_plugin.h. */
91662 struct rcu_ctrlblk;
91663 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
91664-static void rcu_process_callbacks(struct softirq_action *unused);
91665+static void rcu_process_callbacks(void);
91666 static void __call_rcu(struct rcu_head *head,
91667 void (*func)(struct rcu_head *rcu),
91668 struct rcu_ctrlblk *rcp);
91669@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
91670 false));
91671 }
91672
91673-static void rcu_process_callbacks(struct softirq_action *unused)
91674+static __latent_entropy void rcu_process_callbacks(void)
91675 {
91676 __rcu_process_callbacks(&rcu_sched_ctrlblk);
91677 __rcu_process_callbacks(&rcu_bh_ctrlblk);
91678diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
91679index 858c565..7efd915 100644
91680--- a/kernel/rcu/tiny_plugin.h
91681+++ b/kernel/rcu/tiny_plugin.h
91682@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
91683 dump_stack();
91684 }
91685 if (*rcp->curtail && ULONG_CMP_GE(j, js))
91686- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
91687+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
91688 3 * rcu_jiffies_till_stall_check() + 3;
91689 else if (ULONG_CMP_GE(j, js))
91690- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91691+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91692 }
91693
91694 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
91695 {
91696 rcp->ticks_this_gp = 0;
91697 rcp->gp_start = jiffies;
91698- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91699+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91700 }
91701
91702 static void check_cpu_stalls(void)
91703diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
91704index 7680fc2..b8e9161 100644
91705--- a/kernel/rcu/tree.c
91706+++ b/kernel/rcu/tree.c
91707@@ -261,7 +261,7 @@ static void rcu_momentary_dyntick_idle(void)
91708 */
91709 rdtp = this_cpu_ptr(&rcu_dynticks);
91710 smp_mb__before_atomic(); /* Earlier stuff before QS. */
91711- atomic_add(2, &rdtp->dynticks); /* QS. */
91712+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
91713 smp_mb__after_atomic(); /* Later stuff after QS. */
91714 break;
91715 }
91716@@ -521,9 +521,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
91717 rcu_prepare_for_idle();
91718 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91719 smp_mb__before_atomic(); /* See above. */
91720- atomic_inc(&rdtp->dynticks);
91721+ atomic_inc_unchecked(&rdtp->dynticks);
91722 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
91723- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91724+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91725 rcu_dynticks_task_enter();
91726
91727 /*
91728@@ -644,10 +644,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
91729
91730 rcu_dynticks_task_exit();
91731 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
91732- atomic_inc(&rdtp->dynticks);
91733+ atomic_inc_unchecked(&rdtp->dynticks);
91734 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91735 smp_mb__after_atomic(); /* See above. */
91736- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91737+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91738 rcu_cleanup_after_idle();
91739 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
91740 if (!user && !is_idle_task(current)) {
91741@@ -768,14 +768,14 @@ void rcu_nmi_enter(void)
91742 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
91743
91744 if (rdtp->dynticks_nmi_nesting == 0 &&
91745- (atomic_read(&rdtp->dynticks) & 0x1))
91746+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
91747 return;
91748 rdtp->dynticks_nmi_nesting++;
91749 smp_mb__before_atomic(); /* Force delay from prior write. */
91750- atomic_inc(&rdtp->dynticks);
91751+ atomic_inc_unchecked(&rdtp->dynticks);
91752 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91753 smp_mb__after_atomic(); /* See above. */
91754- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91755+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91756 }
91757
91758 /**
91759@@ -794,9 +794,9 @@ void rcu_nmi_exit(void)
91760 return;
91761 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91762 smp_mb__before_atomic(); /* See above. */
91763- atomic_inc(&rdtp->dynticks);
91764+ atomic_inc_unchecked(&rdtp->dynticks);
91765 smp_mb__after_atomic(); /* Force delay to next write. */
91766- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91767+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91768 }
91769
91770 /**
91771@@ -809,7 +809,7 @@ void rcu_nmi_exit(void)
91772 */
91773 bool notrace __rcu_is_watching(void)
91774 {
91775- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91776+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91777 }
91778
91779 /**
91780@@ -892,7 +892,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
91781 static int dyntick_save_progress_counter(struct rcu_data *rdp,
91782 bool *isidle, unsigned long *maxj)
91783 {
91784- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
91785+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91786 rcu_sysidle_check_cpu(rdp, isidle, maxj);
91787 if ((rdp->dynticks_snap & 0x1) == 0) {
91788 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
91789@@ -921,7 +921,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91790 int *rcrmp;
91791 unsigned int snap;
91792
91793- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
91794+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91795 snap = (unsigned int)rdp->dynticks_snap;
91796
91797 /*
91798@@ -984,10 +984,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91799 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
91800 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
91801 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
91802- ACCESS_ONCE(rdp->cond_resched_completed) =
91803+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
91804 ACCESS_ONCE(rdp->mynode->completed);
91805 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
91806- ACCESS_ONCE(*rcrmp) =
91807+ ACCESS_ONCE_RW(*rcrmp) =
91808 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
91809 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
91810 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
91811@@ -1009,7 +1009,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
91812 rsp->gp_start = j;
91813 smp_wmb(); /* Record start time before stall time. */
91814 j1 = rcu_jiffies_till_stall_check();
91815- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
91816+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
91817 rsp->jiffies_resched = j + j1 / 2;
91818 }
91819
91820@@ -1050,7 +1050,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
91821 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91822 return;
91823 }
91824- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91825+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91826 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91827
91828 /*
91829@@ -1127,7 +1127,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
91830
91831 raw_spin_lock_irqsave(&rnp->lock, flags);
91832 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
91833- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
91834+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
91835 3 * rcu_jiffies_till_stall_check() + 3;
91836 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91837
91838@@ -1211,7 +1211,7 @@ void rcu_cpu_stall_reset(void)
91839 struct rcu_state *rsp;
91840
91841 for_each_rcu_flavor(rsp)
91842- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91843+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91844 }
91845
91846 /*
91847@@ -1597,7 +1597,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
91848 raw_spin_unlock_irq(&rnp->lock);
91849 return 0;
91850 }
91851- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91852+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91853
91854 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
91855 /*
91856@@ -1638,9 +1638,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
91857 rdp = this_cpu_ptr(rsp->rda);
91858 rcu_preempt_check_blocked_tasks(rnp);
91859 rnp->qsmask = rnp->qsmaskinit;
91860- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
91861+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
91862 WARN_ON_ONCE(rnp->completed != rsp->completed);
91863- ACCESS_ONCE(rnp->completed) = rsp->completed;
91864+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
91865 if (rnp == rdp->mynode)
91866 (void)__note_gp_changes(rsp, rnp, rdp);
91867 rcu_preempt_boost_start_gp(rnp);
91868@@ -1685,7 +1685,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
91869 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
91870 raw_spin_lock_irq(&rnp->lock);
91871 smp_mb__after_unlock_lock();
91872- ACCESS_ONCE(rsp->gp_flags) =
91873+ ACCESS_ONCE_RW(rsp->gp_flags) =
91874 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
91875 raw_spin_unlock_irq(&rnp->lock);
91876 }
91877@@ -1731,7 +1731,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
91878 rcu_for_each_node_breadth_first(rsp, rnp) {
91879 raw_spin_lock_irq(&rnp->lock);
91880 smp_mb__after_unlock_lock();
91881- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
91882+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
91883 rdp = this_cpu_ptr(rsp->rda);
91884 if (rnp == rdp->mynode)
91885 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
91886@@ -1746,14 +1746,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
91887 rcu_nocb_gp_set(rnp, nocb);
91888
91889 /* Declare grace period done. */
91890- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
91891+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
91892 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
91893 rsp->fqs_state = RCU_GP_IDLE;
91894 rdp = this_cpu_ptr(rsp->rda);
91895 /* Advance CBs to reduce false positives below. */
91896 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
91897 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
91898- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91899+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91900 trace_rcu_grace_period(rsp->name,
91901 ACCESS_ONCE(rsp->gpnum),
91902 TPS("newreq"));
91903@@ -1878,7 +1878,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
91904 */
91905 return false;
91906 }
91907- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91908+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91909 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
91910 TPS("newreq"));
91911
91912@@ -2099,7 +2099,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
91913 rsp->qlen += rdp->qlen;
91914 rdp->n_cbs_orphaned += rdp->qlen;
91915 rdp->qlen_lazy = 0;
91916- ACCESS_ONCE(rdp->qlen) = 0;
91917+ ACCESS_ONCE_RW(rdp->qlen) = 0;
91918 }
91919
91920 /*
91921@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
91922 }
91923 smp_mb(); /* List handling before counting for rcu_barrier(). */
91924 rdp->qlen_lazy -= count_lazy;
91925- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
91926+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
91927 rdp->n_cbs_invoked += count;
91928
91929 /* Reinstate batch limit if we have worked down the excess. */
91930@@ -2507,7 +2507,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
91931 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
91932 return; /* Someone beat us to it. */
91933 }
91934- ACCESS_ONCE(rsp->gp_flags) =
91935+ ACCESS_ONCE_RW(rsp->gp_flags) =
91936 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
91937 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
91938 rcu_gp_kthread_wake(rsp);
91939@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
91940 /*
91941 * Do RCU core processing for the current CPU.
91942 */
91943-static void rcu_process_callbacks(struct softirq_action *unused)
91944+static void rcu_process_callbacks(void)
91945 {
91946 struct rcu_state *rsp;
91947
91948@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
91949 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
91950 if (debug_rcu_head_queue(head)) {
91951 /* Probable double call_rcu(), so leak the callback. */
91952- ACCESS_ONCE(head->func) = rcu_leak_callback;
91953+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
91954 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
91955 return;
91956 }
91957@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
91958 local_irq_restore(flags);
91959 return;
91960 }
91961- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
91962+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
91963 if (lazy)
91964 rdp->qlen_lazy++;
91965 else
91966@@ -2966,11 +2966,11 @@ void synchronize_sched_expedited(void)
91967 * counter wrap on a 32-bit system. Quite a few more CPUs would of
91968 * course be required on a 64-bit system.
91969 */
91970- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
91971+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
91972 (ulong)atomic_long_read(&rsp->expedited_done) +
91973 ULONG_MAX / 8)) {
91974 synchronize_sched();
91975- atomic_long_inc(&rsp->expedited_wrap);
91976+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
91977 return;
91978 }
91979
91980@@ -2978,12 +2978,12 @@ void synchronize_sched_expedited(void)
91981 * Take a ticket. Note that atomic_inc_return() implies a
91982 * full memory barrier.
91983 */
91984- snap = atomic_long_inc_return(&rsp->expedited_start);
91985+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
91986 firstsnap = snap;
91987 if (!try_get_online_cpus()) {
91988 /* CPU hotplug operation in flight, fall back to normal GP. */
91989 wait_rcu_gp(call_rcu_sched);
91990- atomic_long_inc(&rsp->expedited_normal);
91991+ atomic_long_inc_unchecked(&rsp->expedited_normal);
91992 return;
91993 }
91994 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
91995@@ -2996,7 +2996,7 @@ void synchronize_sched_expedited(void)
91996 for_each_cpu(cpu, cm) {
91997 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
91998
91999- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
92000+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
92001 cpumask_clear_cpu(cpu, cm);
92002 }
92003 if (cpumask_weight(cm) == 0)
92004@@ -3011,14 +3011,14 @@ void synchronize_sched_expedited(void)
92005 synchronize_sched_expedited_cpu_stop,
92006 NULL) == -EAGAIN) {
92007 put_online_cpus();
92008- atomic_long_inc(&rsp->expedited_tryfail);
92009+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
92010
92011 /* Check to see if someone else did our work for us. */
92012 s = atomic_long_read(&rsp->expedited_done);
92013 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92014 /* ensure test happens before caller kfree */
92015 smp_mb__before_atomic(); /* ^^^ */
92016- atomic_long_inc(&rsp->expedited_workdone1);
92017+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
92018 free_cpumask_var(cm);
92019 return;
92020 }
92021@@ -3028,7 +3028,7 @@ void synchronize_sched_expedited(void)
92022 udelay(trycount * num_online_cpus());
92023 } else {
92024 wait_rcu_gp(call_rcu_sched);
92025- atomic_long_inc(&rsp->expedited_normal);
92026+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92027 free_cpumask_var(cm);
92028 return;
92029 }
92030@@ -3038,7 +3038,7 @@ void synchronize_sched_expedited(void)
92031 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92032 /* ensure test happens before caller kfree */
92033 smp_mb__before_atomic(); /* ^^^ */
92034- atomic_long_inc(&rsp->expedited_workdone2);
92035+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92036 free_cpumask_var(cm);
92037 return;
92038 }
92039@@ -3053,14 +3053,14 @@ void synchronize_sched_expedited(void)
92040 if (!try_get_online_cpus()) {
92041 /* CPU hotplug operation in flight, use normal GP. */
92042 wait_rcu_gp(call_rcu_sched);
92043- atomic_long_inc(&rsp->expedited_normal);
92044+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92045 free_cpumask_var(cm);
92046 return;
92047 }
92048- snap = atomic_long_read(&rsp->expedited_start);
92049+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92050 smp_mb(); /* ensure read is before try_stop_cpus(). */
92051 }
92052- atomic_long_inc(&rsp->expedited_stoppedcpus);
92053+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92054
92055 all_cpus_idle:
92056 free_cpumask_var(cm);
92057@@ -3072,16 +3072,16 @@ all_cpus_idle:
92058 * than we did already did their update.
92059 */
92060 do {
92061- atomic_long_inc(&rsp->expedited_done_tries);
92062+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92063 s = atomic_long_read(&rsp->expedited_done);
92064 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92065 /* ensure test happens before caller kfree */
92066 smp_mb__before_atomic(); /* ^^^ */
92067- atomic_long_inc(&rsp->expedited_done_lost);
92068+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92069 break;
92070 }
92071 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92072- atomic_long_inc(&rsp->expedited_done_exit);
92073+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92074
92075 put_online_cpus();
92076 }
92077@@ -3287,7 +3287,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92078 * ACCESS_ONCE() to prevent the compiler from speculating
92079 * the increment to precede the early-exit check.
92080 */
92081- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92082+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92083 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92084 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92085 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92086@@ -3342,7 +3342,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92087
92088 /* Increment ->n_barrier_done to prevent duplicate work. */
92089 smp_mb(); /* Keep increment after above mechanism. */
92090- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92091+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92092 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92093 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92094 smp_mb(); /* Keep increment before caller's subsequent code. */
92095@@ -3387,10 +3387,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92096 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92097 init_callback_list(rdp);
92098 rdp->qlen_lazy = 0;
92099- ACCESS_ONCE(rdp->qlen) = 0;
92100+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92101 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92102 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92103- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92104+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92105 rdp->cpu = cpu;
92106 rdp->rsp = rsp;
92107 rcu_boot_init_nocb_percpu_data(rdp);
92108@@ -3423,8 +3423,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92109 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92110 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92111 rcu_sysidle_init_percpu_data(rdp->dynticks);
92112- atomic_set(&rdp->dynticks->dynticks,
92113- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92114+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92115+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92116 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92117
92118 /* Add CPU to rcu_node bitmasks. */
92119diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92120index 8e7b184..9c55768 100644
92121--- a/kernel/rcu/tree.h
92122+++ b/kernel/rcu/tree.h
92123@@ -87,11 +87,11 @@ struct rcu_dynticks {
92124 long long dynticks_nesting; /* Track irq/process nesting level. */
92125 /* Process level is worth LLONG_MAX/2. */
92126 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92127- atomic_t dynticks; /* Even value for idle, else odd. */
92128+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92129 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92130 long long dynticks_idle_nesting;
92131 /* irq/process nesting level from idle. */
92132- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92133+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92134 /* "Idle" excludes userspace execution. */
92135 unsigned long dynticks_idle_jiffies;
92136 /* End of last non-NMI non-idle period. */
92137@@ -466,17 +466,17 @@ struct rcu_state {
92138 /* _rcu_barrier(). */
92139 /* End of fields guarded by barrier_mutex. */
92140
92141- atomic_long_t expedited_start; /* Starting ticket. */
92142- atomic_long_t expedited_done; /* Done ticket. */
92143- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92144- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92145- atomic_long_t expedited_workdone1; /* # done by others #1. */
92146- atomic_long_t expedited_workdone2; /* # done by others #2. */
92147- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92148- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92149- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92150- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92151- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92152+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92153+ atomic_long_t expedited_done; /* Done ticket. */
92154+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92155+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92156+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92157+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92158+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92159+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92160+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92161+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92162+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92163
92164 unsigned long jiffies_force_qs; /* Time at which to invoke */
92165 /* force_quiescent_state(). */
92166diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92167index 3ec85cb..3687925 100644
92168--- a/kernel/rcu/tree_plugin.h
92169+++ b/kernel/rcu/tree_plugin.h
92170@@ -709,7 +709,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92171 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92172 {
92173 return !rcu_preempted_readers_exp(rnp) &&
92174- ACCESS_ONCE(rnp->expmask) == 0;
92175+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92176 }
92177
92178 /*
92179@@ -870,7 +870,7 @@ void synchronize_rcu_expedited(void)
92180
92181 /* Clean up and exit. */
92182 smp_mb(); /* ensure expedited GP seen before counter increment. */
92183- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
92184+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
92185 sync_rcu_preempt_exp_count + 1;
92186 unlock_mb_ret:
92187 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92188@@ -1426,7 +1426,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92189 free_cpumask_var(cm);
92190 }
92191
92192-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92193+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92194 .store = &rcu_cpu_kthread_task,
92195 .thread_should_run = rcu_cpu_kthread_should_run,
92196 .thread_fn = rcu_cpu_kthread,
92197@@ -1900,7 +1900,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92198 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92199 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
92200 cpu, ticks_value, ticks_title,
92201- atomic_read(&rdtp->dynticks) & 0xfff,
92202+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92203 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92204 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92205 fast_no_hz);
92206@@ -2044,7 +2044,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92207 return;
92208 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92209 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
92210- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92211+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92212 wake_up(&rdp_leader->nocb_wq);
92213 }
92214 }
92215@@ -2096,7 +2096,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92216
92217 /* Enqueue the callback on the nocb list and update counts. */
92218 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92219- ACCESS_ONCE(*old_rhpp) = rhp;
92220+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92221 atomic_long_add(rhcount, &rdp->nocb_q_count);
92222 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92223 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
92224@@ -2286,7 +2286,7 @@ wait_again:
92225 continue; /* No CBs here, try next follower. */
92226
92227 /* Move callbacks to wait-for-GP list, which is empty. */
92228- ACCESS_ONCE(rdp->nocb_head) = NULL;
92229+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92230 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92231 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
92232 rdp->nocb_gp_count_lazy =
92233@@ -2413,7 +2413,7 @@ static int rcu_nocb_kthread(void *arg)
92234 list = ACCESS_ONCE(rdp->nocb_follower_head);
92235 BUG_ON(!list);
92236 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92237- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92238+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
92239 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
92240 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
92241 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
92242@@ -2443,8 +2443,8 @@ static int rcu_nocb_kthread(void *arg)
92243 list = next;
92244 }
92245 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
92246- ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92247- ACCESS_ONCE(rdp->nocb_p_count_lazy) =
92248+ ACCESS_ONCE_RW(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92249+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) =
92250 rdp->nocb_p_count_lazy - cl;
92251 rdp->n_nocbs_invoked += c;
92252 }
92253@@ -2465,7 +2465,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
92254 if (!rcu_nocb_need_deferred_wakeup(rdp))
92255 return;
92256 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
92257- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92258+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92259 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
92260 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
92261 }
92262@@ -2588,7 +2588,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
92263 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
92264 "rcuo%c/%d", rsp->abbr, cpu);
92265 BUG_ON(IS_ERR(t));
92266- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
92267+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
92268 }
92269
92270 /*
92271@@ -2793,11 +2793,11 @@ static void rcu_sysidle_enter(int irq)
92272
92273 /* Record start of fully idle period. */
92274 j = jiffies;
92275- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
92276+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
92277 smp_mb__before_atomic();
92278- atomic_inc(&rdtp->dynticks_idle);
92279+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92280 smp_mb__after_atomic();
92281- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
92282+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
92283 }
92284
92285 /*
92286@@ -2868,9 +2868,9 @@ static void rcu_sysidle_exit(int irq)
92287
92288 /* Record end of idle period. */
92289 smp_mb__before_atomic();
92290- atomic_inc(&rdtp->dynticks_idle);
92291+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92292 smp_mb__after_atomic();
92293- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
92294+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
92295
92296 /*
92297 * If we are the timekeeping CPU, we are permitted to be non-idle
92298@@ -2915,7 +2915,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
92299 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
92300
92301 /* Pick up current idle and NMI-nesting counter and check. */
92302- cur = atomic_read(&rdtp->dynticks_idle);
92303+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
92304 if (cur & 0x1) {
92305 *isidle = false; /* We are not idle! */
92306 return;
92307@@ -2964,7 +2964,7 @@ static void rcu_sysidle(unsigned long j)
92308 case RCU_SYSIDLE_NOT:
92309
92310 /* First time all are idle, so note a short idle period. */
92311- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92312+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92313 break;
92314
92315 case RCU_SYSIDLE_SHORT:
92316@@ -3002,7 +3002,7 @@ static void rcu_sysidle_cancel(void)
92317 {
92318 smp_mb();
92319 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
92320- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
92321+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
92322 }
92323
92324 /*
92325@@ -3054,7 +3054,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
92326 smp_mb(); /* grace period precedes setting inuse. */
92327
92328 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
92329- ACCESS_ONCE(rshp->inuse) = 0;
92330+ ACCESS_ONCE_RW(rshp->inuse) = 0;
92331 }
92332
92333 /*
92334@@ -3207,7 +3207,7 @@ static void rcu_bind_gp_kthread(void)
92335 static void rcu_dynticks_task_enter(void)
92336 {
92337 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92338- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
92339+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
92340 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92341 }
92342
92343@@ -3215,6 +3215,6 @@ static void rcu_dynticks_task_enter(void)
92344 static void rcu_dynticks_task_exit(void)
92345 {
92346 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92347- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
92348+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
92349 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92350 }
92351diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
92352index 5cdc62e..cc52e88 100644
92353--- a/kernel/rcu/tree_trace.c
92354+++ b/kernel/rcu/tree_trace.c
92355@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
92356 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
92357 rdp->passed_quiesce, rdp->qs_pending);
92358 seq_printf(m, " dt=%d/%llx/%d df=%lu",
92359- atomic_read(&rdp->dynticks->dynticks),
92360+ atomic_read_unchecked(&rdp->dynticks->dynticks),
92361 rdp->dynticks->dynticks_nesting,
92362 rdp->dynticks->dynticks_nmi_nesting,
92363 rdp->dynticks_fqs);
92364@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
92365 struct rcu_state *rsp = (struct rcu_state *)m->private;
92366
92367 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
92368- atomic_long_read(&rsp->expedited_start),
92369+ atomic_long_read_unchecked(&rsp->expedited_start),
92370 atomic_long_read(&rsp->expedited_done),
92371- atomic_long_read(&rsp->expedited_wrap),
92372- atomic_long_read(&rsp->expedited_tryfail),
92373- atomic_long_read(&rsp->expedited_workdone1),
92374- atomic_long_read(&rsp->expedited_workdone2),
92375- atomic_long_read(&rsp->expedited_normal),
92376- atomic_long_read(&rsp->expedited_stoppedcpus),
92377- atomic_long_read(&rsp->expedited_done_tries),
92378- atomic_long_read(&rsp->expedited_done_lost),
92379- atomic_long_read(&rsp->expedited_done_exit));
92380+ atomic_long_read_unchecked(&rsp->expedited_wrap),
92381+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
92382+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
92383+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
92384+ atomic_long_read_unchecked(&rsp->expedited_normal),
92385+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
92386+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
92387+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
92388+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
92389 return 0;
92390 }
92391
92392diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
92393index e0d31a3..f4dafe3 100644
92394--- a/kernel/rcu/update.c
92395+++ b/kernel/rcu/update.c
92396@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
92397 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
92398 */
92399 if (till_stall_check < 3) {
92400- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
92401+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
92402 till_stall_check = 3;
92403 } else if (till_stall_check > 300) {
92404- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
92405+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
92406 till_stall_check = 300;
92407 }
92408 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
92409@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
92410 !ACCESS_ONCE(t->on_rq) ||
92411 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
92412 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
92413- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
92414+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
92415 list_del_init(&t->rcu_tasks_holdout_list);
92416 put_task_struct(t);
92417 return;
92418@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
92419 !is_idle_task(t)) {
92420 get_task_struct(t);
92421 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
92422- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
92423+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
92424 list_add(&t->rcu_tasks_holdout_list,
92425 &rcu_tasks_holdouts);
92426 }
92427@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
92428 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
92429 BUG_ON(IS_ERR(t));
92430 smp_mb(); /* Ensure others see full kthread. */
92431- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
92432+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
92433 mutex_unlock(&rcu_tasks_kthread_mutex);
92434 }
92435
92436diff --git a/kernel/resource.c b/kernel/resource.c
92437index 0bcebff..e7cd5b2 100644
92438--- a/kernel/resource.c
92439+++ b/kernel/resource.c
92440@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
92441
92442 static int __init ioresources_init(void)
92443 {
92444+#ifdef CONFIG_GRKERNSEC_PROC_ADD
92445+#ifdef CONFIG_GRKERNSEC_PROC_USER
92446+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
92447+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
92448+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92449+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
92450+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
92451+#endif
92452+#else
92453 proc_create("ioports", 0, NULL, &proc_ioports_operations);
92454 proc_create("iomem", 0, NULL, &proc_iomem_operations);
92455+#endif
92456 return 0;
92457 }
92458 __initcall(ioresources_init);
92459diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
92460index eae160d..c9aa22e 100644
92461--- a/kernel/sched/auto_group.c
92462+++ b/kernel/sched/auto_group.c
92463@@ -11,7 +11,7 @@
92464
92465 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
92466 static struct autogroup autogroup_default;
92467-static atomic_t autogroup_seq_nr;
92468+static atomic_unchecked_t autogroup_seq_nr;
92469
92470 void __init autogroup_init(struct task_struct *init_task)
92471 {
92472@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
92473
92474 kref_init(&ag->kref);
92475 init_rwsem(&ag->lock);
92476- ag->id = atomic_inc_return(&autogroup_seq_nr);
92477+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
92478 ag->tg = tg;
92479 #ifdef CONFIG_RT_GROUP_SCHED
92480 /*
92481diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
92482index 607f852..486bc87 100644
92483--- a/kernel/sched/completion.c
92484+++ b/kernel/sched/completion.c
92485@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
92486 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92487 * or number of jiffies left till timeout) if completed.
92488 */
92489-long __sched
92490+long __sched __intentional_overflow(-1)
92491 wait_for_completion_interruptible_timeout(struct completion *x,
92492 unsigned long timeout)
92493 {
92494@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
92495 *
92496 * Return: -ERESTARTSYS if interrupted, 0 if completed.
92497 */
92498-int __sched wait_for_completion_killable(struct completion *x)
92499+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
92500 {
92501 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
92502 if (t == -ERESTARTSYS)
92503@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
92504 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92505 * or number of jiffies left till timeout) if completed.
92506 */
92507-long __sched
92508+long __sched __intentional_overflow(-1)
92509 wait_for_completion_killable_timeout(struct completion *x,
92510 unsigned long timeout)
92511 {
92512diff --git a/kernel/sched/core.c b/kernel/sched/core.c
92513index 44dfc8b..56d160d 100644
92514--- a/kernel/sched/core.c
92515+++ b/kernel/sched/core.c
92516@@ -1902,7 +1902,7 @@ void set_numabalancing_state(bool enabled)
92517 int sysctl_numa_balancing(struct ctl_table *table, int write,
92518 void __user *buffer, size_t *lenp, loff_t *ppos)
92519 {
92520- struct ctl_table t;
92521+ ctl_table_no_const t;
92522 int err;
92523 int state = numabalancing_enabled;
92524
92525@@ -2352,8 +2352,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
92526 next->active_mm = oldmm;
92527 atomic_inc(&oldmm->mm_count);
92528 enter_lazy_tlb(oldmm, next);
92529- } else
92530+ } else {
92531 switch_mm(oldmm, mm, next);
92532+ populate_stack();
92533+ }
92534
92535 if (!prev->mm) {
92536 prev->active_mm = NULL;
92537@@ -3152,6 +3154,8 @@ int can_nice(const struct task_struct *p, const int nice)
92538 /* convert nice value [19,-20] to rlimit style value [1,40] */
92539 int nice_rlim = nice_to_rlimit(nice);
92540
92541+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
92542+
92543 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
92544 capable(CAP_SYS_NICE));
92545 }
92546@@ -3178,7 +3182,8 @@ SYSCALL_DEFINE1(nice, int, increment)
92547 nice = task_nice(current) + increment;
92548
92549 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
92550- if (increment < 0 && !can_nice(current, nice))
92551+ if (increment < 0 && (!can_nice(current, nice) ||
92552+ gr_handle_chroot_nice()))
92553 return -EPERM;
92554
92555 retval = security_task_setnice(current, nice);
92556@@ -3473,6 +3478,7 @@ recheck:
92557 if (policy != p->policy && !rlim_rtprio)
92558 return -EPERM;
92559
92560+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
92561 /* can't increase priority */
92562 if (attr->sched_priority > p->rt_priority &&
92563 attr->sched_priority > rlim_rtprio)
92564@@ -4973,6 +4979,7 @@ void idle_task_exit(void)
92565
92566 if (mm != &init_mm) {
92567 switch_mm(mm, &init_mm, current);
92568+ populate_stack();
92569 finish_arch_post_lock_switch();
92570 }
92571 mmdrop(mm);
92572@@ -5068,7 +5075,7 @@ static void migrate_tasks(unsigned int dead_cpu)
92573
92574 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
92575
92576-static struct ctl_table sd_ctl_dir[] = {
92577+static ctl_table_no_const sd_ctl_dir[] __read_only = {
92578 {
92579 .procname = "sched_domain",
92580 .mode = 0555,
92581@@ -5085,17 +5092,17 @@ static struct ctl_table sd_ctl_root[] = {
92582 {}
92583 };
92584
92585-static struct ctl_table *sd_alloc_ctl_entry(int n)
92586+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
92587 {
92588- struct ctl_table *entry =
92589+ ctl_table_no_const *entry =
92590 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
92591
92592 return entry;
92593 }
92594
92595-static void sd_free_ctl_entry(struct ctl_table **tablep)
92596+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
92597 {
92598- struct ctl_table *entry;
92599+ ctl_table_no_const *entry;
92600
92601 /*
92602 * In the intermediate directories, both the child directory and
92603@@ -5103,22 +5110,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
92604 * will always be set. In the lowest directory the names are
92605 * static strings and all have proc handlers.
92606 */
92607- for (entry = *tablep; entry->mode; entry++) {
92608- if (entry->child)
92609- sd_free_ctl_entry(&entry->child);
92610+ for (entry = tablep; entry->mode; entry++) {
92611+ if (entry->child) {
92612+ sd_free_ctl_entry(entry->child);
92613+ pax_open_kernel();
92614+ entry->child = NULL;
92615+ pax_close_kernel();
92616+ }
92617 if (entry->proc_handler == NULL)
92618 kfree(entry->procname);
92619 }
92620
92621- kfree(*tablep);
92622- *tablep = NULL;
92623+ kfree(tablep);
92624 }
92625
92626 static int min_load_idx = 0;
92627 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
92628
92629 static void
92630-set_table_entry(struct ctl_table *entry,
92631+set_table_entry(ctl_table_no_const *entry,
92632 const char *procname, void *data, int maxlen,
92633 umode_t mode, proc_handler *proc_handler,
92634 bool load_idx)
92635@@ -5138,7 +5148,7 @@ set_table_entry(struct ctl_table *entry,
92636 static struct ctl_table *
92637 sd_alloc_ctl_domain_table(struct sched_domain *sd)
92638 {
92639- struct ctl_table *table = sd_alloc_ctl_entry(14);
92640+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
92641
92642 if (table == NULL)
92643 return NULL;
92644@@ -5176,9 +5186,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
92645 return table;
92646 }
92647
92648-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
92649+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
92650 {
92651- struct ctl_table *entry, *table;
92652+ ctl_table_no_const *entry, *table;
92653 struct sched_domain *sd;
92654 int domain_num = 0, i;
92655 char buf[32];
92656@@ -5205,11 +5215,13 @@ static struct ctl_table_header *sd_sysctl_header;
92657 static void register_sched_domain_sysctl(void)
92658 {
92659 int i, cpu_num = num_possible_cpus();
92660- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
92661+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
92662 char buf[32];
92663
92664 WARN_ON(sd_ctl_dir[0].child);
92665+ pax_open_kernel();
92666 sd_ctl_dir[0].child = entry;
92667+ pax_close_kernel();
92668
92669 if (entry == NULL)
92670 return;
92671@@ -5232,8 +5244,12 @@ static void unregister_sched_domain_sysctl(void)
92672 if (sd_sysctl_header)
92673 unregister_sysctl_table(sd_sysctl_header);
92674 sd_sysctl_header = NULL;
92675- if (sd_ctl_dir[0].child)
92676- sd_free_ctl_entry(&sd_ctl_dir[0].child);
92677+ if (sd_ctl_dir[0].child) {
92678+ sd_free_ctl_entry(sd_ctl_dir[0].child);
92679+ pax_open_kernel();
92680+ sd_ctl_dir[0].child = NULL;
92681+ pax_close_kernel();
92682+ }
92683 }
92684 #else
92685 static void register_sched_domain_sysctl(void)
92686diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
92687index fe331fc..29d620e 100644
92688--- a/kernel/sched/fair.c
92689+++ b/kernel/sched/fair.c
92690@@ -2089,7 +2089,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
92691
92692 static void reset_ptenuma_scan(struct task_struct *p)
92693 {
92694- ACCESS_ONCE(p->mm->numa_scan_seq)++;
92695+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
92696 p->mm->numa_scan_offset = 0;
92697 }
92698
92699@@ -7651,7 +7651,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
92700 * run_rebalance_domains is triggered when needed from the scheduler tick.
92701 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
92702 */
92703-static void run_rebalance_domains(struct softirq_action *h)
92704+static __latent_entropy void run_rebalance_domains(void)
92705 {
92706 struct rq *this_rq = this_rq();
92707 enum cpu_idle_type idle = this_rq->idle_balance ?
92708diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
92709index 9a2a45c..bb91ace 100644
92710--- a/kernel/sched/sched.h
92711+++ b/kernel/sched/sched.h
92712@@ -1182,7 +1182,7 @@ struct sched_class {
92713 #ifdef CONFIG_FAIR_GROUP_SCHED
92714 void (*task_move_group) (struct task_struct *p, int on_rq);
92715 #endif
92716-};
92717+} __do_const;
92718
92719 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
92720 {
92721diff --git a/kernel/seccomp.c b/kernel/seccomp.c
92722index 4ef9687..4f44028 100644
92723--- a/kernel/seccomp.c
92724+++ b/kernel/seccomp.c
92725@@ -629,7 +629,9 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd)
92726
92727 switch (action) {
92728 case SECCOMP_RET_ERRNO:
92729- /* Set the low-order 16-bits as a errno. */
92730+ /* Set low-order bits as an errno, capped at MAX_ERRNO. */
92731+ if (data > MAX_ERRNO)
92732+ data = MAX_ERRNO;
92733 syscall_set_return_value(current, task_pt_regs(current),
92734 -data, 0);
92735 goto skip;
92736diff --git a/kernel/signal.c b/kernel/signal.c
92737index 16a30529..25ad033 100644
92738--- a/kernel/signal.c
92739+++ b/kernel/signal.c
92740@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
92741
92742 int print_fatal_signals __read_mostly;
92743
92744-static void __user *sig_handler(struct task_struct *t, int sig)
92745+static __sighandler_t sig_handler(struct task_struct *t, int sig)
92746 {
92747 return t->sighand->action[sig - 1].sa.sa_handler;
92748 }
92749
92750-static int sig_handler_ignored(void __user *handler, int sig)
92751+static int sig_handler_ignored(__sighandler_t handler, int sig)
92752 {
92753 /* Is it explicitly or implicitly ignored? */
92754 return handler == SIG_IGN ||
92755@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
92756
92757 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
92758 {
92759- void __user *handler;
92760+ __sighandler_t handler;
92761
92762 handler = sig_handler(t, sig);
92763
92764@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
92765 atomic_inc(&user->sigpending);
92766 rcu_read_unlock();
92767
92768+ if (!override_rlimit)
92769+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
92770+
92771 if (override_rlimit ||
92772 atomic_read(&user->sigpending) <=
92773 task_rlimit(t, RLIMIT_SIGPENDING)) {
92774@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
92775
92776 int unhandled_signal(struct task_struct *tsk, int sig)
92777 {
92778- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
92779+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
92780 if (is_global_init(tsk))
92781 return 1;
92782 if (handler != SIG_IGN && handler != SIG_DFL)
92783@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
92784 }
92785 }
92786
92787+ /* allow glibc communication via tgkill to other threads in our
92788+ thread group */
92789+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
92790+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
92791+ && gr_handle_signal(t, sig))
92792+ return -EPERM;
92793+
92794 return security_task_kill(t, info, sig, 0);
92795 }
92796
92797@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92798 return send_signal(sig, info, p, 1);
92799 }
92800
92801-static int
92802+int
92803 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92804 {
92805 return send_signal(sig, info, t, 0);
92806@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92807 unsigned long int flags;
92808 int ret, blocked, ignored;
92809 struct k_sigaction *action;
92810+ int is_unhandled = 0;
92811
92812 spin_lock_irqsave(&t->sighand->siglock, flags);
92813 action = &t->sighand->action[sig-1];
92814@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92815 }
92816 if (action->sa.sa_handler == SIG_DFL)
92817 t->signal->flags &= ~SIGNAL_UNKILLABLE;
92818+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
92819+ is_unhandled = 1;
92820 ret = specific_send_sig_info(sig, info, t);
92821 spin_unlock_irqrestore(&t->sighand->siglock, flags);
92822
92823+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
92824+ normal operation */
92825+ if (is_unhandled) {
92826+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
92827+ gr_handle_crash(t, sig);
92828+ }
92829+
92830 return ret;
92831 }
92832
92833@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92834 ret = check_kill_permission(sig, info, p);
92835 rcu_read_unlock();
92836
92837- if (!ret && sig)
92838+ if (!ret && sig) {
92839 ret = do_send_sig_info(sig, info, p, true);
92840+ if (!ret)
92841+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
92842+ }
92843
92844 return ret;
92845 }
92846@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
92847 int error = -ESRCH;
92848
92849 rcu_read_lock();
92850- p = find_task_by_vpid(pid);
92851+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
92852+ /* allow glibc communication via tgkill to other threads in our
92853+ thread group */
92854+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
92855+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
92856+ p = find_task_by_vpid_unrestricted(pid);
92857+ else
92858+#endif
92859+ p = find_task_by_vpid(pid);
92860 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
92861 error = check_kill_permission(sig, info, p);
92862 /*
92863@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
92864 }
92865 seg = get_fs();
92866 set_fs(KERNEL_DS);
92867- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
92868- (stack_t __force __user *) &uoss,
92869+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
92870+ (stack_t __force_user *) &uoss,
92871 compat_user_stack_pointer());
92872 set_fs(seg);
92873 if (ret >= 0 && uoss_ptr) {
92874diff --git a/kernel/smpboot.c b/kernel/smpboot.c
92875index 40190f2..8861d40 100644
92876--- a/kernel/smpboot.c
92877+++ b/kernel/smpboot.c
92878@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
92879 }
92880 smpboot_unpark_thread(plug_thread, cpu);
92881 }
92882- list_add(&plug_thread->list, &hotplug_threads);
92883+ pax_list_add(&plug_thread->list, &hotplug_threads);
92884 out:
92885 mutex_unlock(&smpboot_threads_lock);
92886 put_online_cpus();
92887@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
92888 {
92889 get_online_cpus();
92890 mutex_lock(&smpboot_threads_lock);
92891- list_del(&plug_thread->list);
92892+ pax_list_del(&plug_thread->list);
92893 smpboot_destroy_threads(plug_thread);
92894 mutex_unlock(&smpboot_threads_lock);
92895 put_online_cpus();
92896diff --git a/kernel/softirq.c b/kernel/softirq.c
92897index c497fcd..e8f90a9 100644
92898--- a/kernel/softirq.c
92899+++ b/kernel/softirq.c
92900@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
92901 EXPORT_SYMBOL(irq_stat);
92902 #endif
92903
92904-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
92905+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
92906
92907 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
92908
92909@@ -266,7 +266,7 @@ restart:
92910 kstat_incr_softirqs_this_cpu(vec_nr);
92911
92912 trace_softirq_entry(vec_nr);
92913- h->action(h);
92914+ h->action();
92915 trace_softirq_exit(vec_nr);
92916 if (unlikely(prev_count != preempt_count())) {
92917 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
92918@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
92919 or_softirq_pending(1UL << nr);
92920 }
92921
92922-void open_softirq(int nr, void (*action)(struct softirq_action *))
92923+void __init open_softirq(int nr, void (*action)(void))
92924 {
92925 softirq_vec[nr].action = action;
92926 }
92927@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
92928 }
92929 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
92930
92931-static void tasklet_action(struct softirq_action *a)
92932+static void tasklet_action(void)
92933 {
92934 struct tasklet_struct *list;
92935
92936@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
92937 }
92938 }
92939
92940-static void tasklet_hi_action(struct softirq_action *a)
92941+static __latent_entropy void tasklet_hi_action(void)
92942 {
92943 struct tasklet_struct *list;
92944
92945@@ -745,7 +745,7 @@ static struct notifier_block cpu_nfb = {
92946 .notifier_call = cpu_callback
92947 };
92948
92949-static struct smp_hotplug_thread softirq_threads = {
92950+static struct smp_hotplug_thread softirq_threads __read_only = {
92951 .store = &ksoftirqd,
92952 .thread_should_run = ksoftirqd_should_run,
92953 .thread_fn = run_ksoftirqd,
92954diff --git a/kernel/sys.c b/kernel/sys.c
92955index ea9c881..2194af5 100644
92956--- a/kernel/sys.c
92957+++ b/kernel/sys.c
92958@@ -154,6 +154,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
92959 error = -EACCES;
92960 goto out;
92961 }
92962+
92963+ if (gr_handle_chroot_setpriority(p, niceval)) {
92964+ error = -EACCES;
92965+ goto out;
92966+ }
92967+
92968 no_nice = security_task_setnice(p, niceval);
92969 if (no_nice) {
92970 error = no_nice;
92971@@ -359,6 +365,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
92972 goto error;
92973 }
92974
92975+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
92976+ goto error;
92977+
92978+ if (!gid_eq(new->gid, old->gid)) {
92979+ /* make sure we generate a learn log for what will
92980+ end up being a role transition after a full-learning
92981+ policy is generated
92982+ CAP_SETGID is required to perform a transition
92983+ we may not log a CAP_SETGID check above, e.g.
92984+ in the case where new rgid = old egid
92985+ */
92986+ gr_learn_cap(current, new, CAP_SETGID);
92987+ }
92988+
92989 if (rgid != (gid_t) -1 ||
92990 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
92991 new->sgid = new->egid;
92992@@ -394,6 +414,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
92993 old = current_cred();
92994
92995 retval = -EPERM;
92996+
92997+ if (gr_check_group_change(kgid, kgid, kgid))
92998+ goto error;
92999+
93000 if (ns_capable(old->user_ns, CAP_SETGID))
93001 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93002 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93003@@ -411,7 +435,7 @@ error:
93004 /*
93005 * change the user struct in a credentials set to match the new UID
93006 */
93007-static int set_user(struct cred *new)
93008+int set_user(struct cred *new)
93009 {
93010 struct user_struct *new_user;
93011
93012@@ -491,7 +515,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93013 goto error;
93014 }
93015
93016+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
93017+ goto error;
93018+
93019 if (!uid_eq(new->uid, old->uid)) {
93020+ /* make sure we generate a learn log for what will
93021+ end up being a role transition after a full-learning
93022+ policy is generated
93023+ CAP_SETUID is required to perform a transition
93024+ we may not log a CAP_SETUID check above, e.g.
93025+ in the case where new ruid = old euid
93026+ */
93027+ gr_learn_cap(current, new, CAP_SETUID);
93028 retval = set_user(new);
93029 if (retval < 0)
93030 goto error;
93031@@ -541,6 +576,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93032 old = current_cred();
93033
93034 retval = -EPERM;
93035+
93036+ if (gr_check_crash_uid(kuid))
93037+ goto error;
93038+ if (gr_check_user_change(kuid, kuid, kuid))
93039+ goto error;
93040+
93041 if (ns_capable(old->user_ns, CAP_SETUID)) {
93042 new->suid = new->uid = kuid;
93043 if (!uid_eq(kuid, old->uid)) {
93044@@ -610,6 +651,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93045 goto error;
93046 }
93047
93048+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93049+ goto error;
93050+
93051 if (ruid != (uid_t) -1) {
93052 new->uid = kruid;
93053 if (!uid_eq(kruid, old->uid)) {
93054@@ -694,6 +738,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93055 goto error;
93056 }
93057
93058+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93059+ goto error;
93060+
93061 if (rgid != (gid_t) -1)
93062 new->gid = krgid;
93063 if (egid != (gid_t) -1)
93064@@ -758,12 +805,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93065 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93066 ns_capable(old->user_ns, CAP_SETUID)) {
93067 if (!uid_eq(kuid, old->fsuid)) {
93068+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93069+ goto error;
93070+
93071 new->fsuid = kuid;
93072 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93073 goto change_okay;
93074 }
93075 }
93076
93077+error:
93078 abort_creds(new);
93079 return old_fsuid;
93080
93081@@ -796,12 +847,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93082 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93083 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93084 ns_capable(old->user_ns, CAP_SETGID)) {
93085+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93086+ goto error;
93087+
93088 if (!gid_eq(kgid, old->fsgid)) {
93089 new->fsgid = kgid;
93090 goto change_okay;
93091 }
93092 }
93093
93094+error:
93095 abort_creds(new);
93096 return old_fsgid;
93097
93098@@ -1178,19 +1233,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93099 return -EFAULT;
93100
93101 down_read(&uts_sem);
93102- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93103+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93104 __OLD_UTS_LEN);
93105 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93106- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93107+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93108 __OLD_UTS_LEN);
93109 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93110- error |= __copy_to_user(&name->release, &utsname()->release,
93111+ error |= __copy_to_user(name->release, &utsname()->release,
93112 __OLD_UTS_LEN);
93113 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93114- error |= __copy_to_user(&name->version, &utsname()->version,
93115+ error |= __copy_to_user(name->version, &utsname()->version,
93116 __OLD_UTS_LEN);
93117 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93118- error |= __copy_to_user(&name->machine, &utsname()->machine,
93119+ error |= __copy_to_user(name->machine, &utsname()->machine,
93120 __OLD_UTS_LEN);
93121 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93122 up_read(&uts_sem);
93123@@ -1391,6 +1446,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93124 */
93125 new_rlim->rlim_cur = 1;
93126 }
93127+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93128+ is changed to a lower value. Since tasks can be created by the same
93129+ user in between this limit change and an execve by this task, force
93130+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93131+ */
93132+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93133+ tsk->flags |= PF_NPROC_EXCEEDED;
93134 }
93135 if (!retval) {
93136 if (old_rlim)
93137diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93138index 88ea2d6..88acc77 100644
93139--- a/kernel/sysctl.c
93140+++ b/kernel/sysctl.c
93141@@ -94,7 +94,6 @@
93142
93143
93144 #if defined(CONFIG_SYSCTL)
93145-
93146 /* External variables not in a header file. */
93147 extern int max_threads;
93148 extern int suid_dumpable;
93149@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93150
93151 /* Constants used for minimum and maximum */
93152 #ifdef CONFIG_LOCKUP_DETECTOR
93153-static int sixty = 60;
93154+static int sixty __read_only = 60;
93155 #endif
93156
93157-static int __maybe_unused neg_one = -1;
93158+static int __maybe_unused neg_one __read_only = -1;
93159
93160-static int zero;
93161-static int __maybe_unused one = 1;
93162-static int __maybe_unused two = 2;
93163-static int __maybe_unused four = 4;
93164-static unsigned long one_ul = 1;
93165-static int one_hundred = 100;
93166+static int zero __read_only = 0;
93167+static int __maybe_unused one __read_only = 1;
93168+static int __maybe_unused two __read_only = 2;
93169+static int __maybe_unused three __read_only = 3;
93170+static int __maybe_unused four __read_only = 4;
93171+static unsigned long one_ul __read_only = 1;
93172+static int one_hundred __read_only = 100;
93173 #ifdef CONFIG_PRINTK
93174-static int ten_thousand = 10000;
93175+static int ten_thousand __read_only = 10000;
93176 #endif
93177
93178 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93179@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93180 void __user *buffer, size_t *lenp, loff_t *ppos);
93181 #endif
93182
93183-#ifdef CONFIG_PRINTK
93184 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93185 void __user *buffer, size_t *lenp, loff_t *ppos);
93186-#endif
93187
93188 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93189 void __user *buffer, size_t *lenp, loff_t *ppos);
93190@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93191
93192 #endif
93193
93194+extern struct ctl_table grsecurity_table[];
93195+
93196 static struct ctl_table kern_table[];
93197 static struct ctl_table vm_table[];
93198 static struct ctl_table fs_table[];
93199@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93200 int sysctl_legacy_va_layout;
93201 #endif
93202
93203+#ifdef CONFIG_PAX_SOFTMODE
93204+static struct ctl_table pax_table[] = {
93205+ {
93206+ .procname = "softmode",
93207+ .data = &pax_softmode,
93208+ .maxlen = sizeof(unsigned int),
93209+ .mode = 0600,
93210+ .proc_handler = &proc_dointvec,
93211+ },
93212+
93213+ { }
93214+};
93215+#endif
93216+
93217 /* The default sysctl tables: */
93218
93219 static struct ctl_table sysctl_base_table[] = {
93220@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93221 #endif
93222
93223 static struct ctl_table kern_table[] = {
93224+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93225+ {
93226+ .procname = "grsecurity",
93227+ .mode = 0500,
93228+ .child = grsecurity_table,
93229+ },
93230+#endif
93231+
93232+#ifdef CONFIG_PAX_SOFTMODE
93233+ {
93234+ .procname = "pax",
93235+ .mode = 0500,
93236+ .child = pax_table,
93237+ },
93238+#endif
93239+
93240 {
93241 .procname = "sched_child_runs_first",
93242 .data = &sysctl_sched_child_runs_first,
93243@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
93244 .data = &modprobe_path,
93245 .maxlen = KMOD_PATH_LEN,
93246 .mode = 0644,
93247- .proc_handler = proc_dostring,
93248+ .proc_handler = proc_dostring_modpriv,
93249 },
93250 {
93251 .procname = "modules_disabled",
93252@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
93253 .extra1 = &zero,
93254 .extra2 = &one,
93255 },
93256+#endif
93257 {
93258 .procname = "kptr_restrict",
93259 .data = &kptr_restrict,
93260 .maxlen = sizeof(int),
93261 .mode = 0644,
93262 .proc_handler = proc_dointvec_minmax_sysadmin,
93263+#ifdef CONFIG_GRKERNSEC_HIDESYM
93264+ .extra1 = &two,
93265+#else
93266 .extra1 = &zero,
93267+#endif
93268 .extra2 = &two,
93269 },
93270-#endif
93271 {
93272 .procname = "ngroups_max",
93273 .data = &ngroups_max,
93274@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
93275 */
93276 {
93277 .procname = "perf_event_paranoid",
93278- .data = &sysctl_perf_event_paranoid,
93279- .maxlen = sizeof(sysctl_perf_event_paranoid),
93280+ .data = &sysctl_perf_event_legitimately_concerned,
93281+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
93282 .mode = 0644,
93283- .proc_handler = proc_dointvec,
93284+ /* go ahead, be a hero */
93285+ .proc_handler = proc_dointvec_minmax_sysadmin,
93286+ .extra1 = &neg_one,
93287+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
93288+ .extra2 = &three,
93289+#else
93290+ .extra2 = &two,
93291+#endif
93292 },
93293 {
93294 .procname = "perf_event_mlock_kb",
93295@@ -1340,6 +1381,13 @@ static struct ctl_table vm_table[] = {
93296 .proc_handler = proc_dointvec_minmax,
93297 .extra1 = &zero,
93298 },
93299+ {
93300+ .procname = "heap_stack_gap",
93301+ .data = &sysctl_heap_stack_gap,
93302+ .maxlen = sizeof(sysctl_heap_stack_gap),
93303+ .mode = 0644,
93304+ .proc_handler = proc_doulongvec_minmax,
93305+ },
93306 #else
93307 {
93308 .procname = "nr_trim_pages",
93309@@ -1822,6 +1870,16 @@ int proc_dostring(struct ctl_table *table, int write,
93310 (char __user *)buffer, lenp, ppos);
93311 }
93312
93313+int proc_dostring_modpriv(struct ctl_table *table, int write,
93314+ void __user *buffer, size_t *lenp, loff_t *ppos)
93315+{
93316+ if (write && !capable(CAP_SYS_MODULE))
93317+ return -EPERM;
93318+
93319+ return _proc_do_string(table->data, table->maxlen, write,
93320+ buffer, lenp, ppos);
93321+}
93322+
93323 static size_t proc_skip_spaces(char **buf)
93324 {
93325 size_t ret;
93326@@ -1927,6 +1985,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
93327 len = strlen(tmp);
93328 if (len > *size)
93329 len = *size;
93330+ if (len > sizeof(tmp))
93331+ len = sizeof(tmp);
93332 if (copy_to_user(*buf, tmp, len))
93333 return -EFAULT;
93334 *size -= len;
93335@@ -2104,7 +2164,7 @@ int proc_dointvec(struct ctl_table *table, int write,
93336 static int proc_taint(struct ctl_table *table, int write,
93337 void __user *buffer, size_t *lenp, loff_t *ppos)
93338 {
93339- struct ctl_table t;
93340+ ctl_table_no_const t;
93341 unsigned long tmptaint = get_taint();
93342 int err;
93343
93344@@ -2132,7 +2192,6 @@ static int proc_taint(struct ctl_table *table, int write,
93345 return err;
93346 }
93347
93348-#ifdef CONFIG_PRINTK
93349 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93350 void __user *buffer, size_t *lenp, loff_t *ppos)
93351 {
93352@@ -2141,7 +2200,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93353
93354 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
93355 }
93356-#endif
93357
93358 struct do_proc_dointvec_minmax_conv_param {
93359 int *min;
93360@@ -2701,6 +2759,12 @@ int proc_dostring(struct ctl_table *table, int write,
93361 return -ENOSYS;
93362 }
93363
93364+int proc_dostring_modpriv(struct ctl_table *table, int write,
93365+ void __user *buffer, size_t *lenp, loff_t *ppos)
93366+{
93367+ return -ENOSYS;
93368+}
93369+
93370 int proc_dointvec(struct ctl_table *table, int write,
93371 void __user *buffer, size_t *lenp, loff_t *ppos)
93372 {
93373@@ -2757,5 +2821,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
93374 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
93375 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
93376 EXPORT_SYMBOL(proc_dostring);
93377+EXPORT_SYMBOL(proc_dostring_modpriv);
93378 EXPORT_SYMBOL(proc_doulongvec_minmax);
93379 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
93380diff --git a/kernel/taskstats.c b/kernel/taskstats.c
93381index 670fff8..a247812 100644
93382--- a/kernel/taskstats.c
93383+++ b/kernel/taskstats.c
93384@@ -28,9 +28,12 @@
93385 #include <linux/fs.h>
93386 #include <linux/file.h>
93387 #include <linux/pid_namespace.h>
93388+#include <linux/grsecurity.h>
93389 #include <net/genetlink.h>
93390 #include <linux/atomic.h>
93391
93392+extern int gr_is_taskstats_denied(int pid);
93393+
93394 /*
93395 * Maximum length of a cpumask that can be specified in
93396 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
93397@@ -576,6 +579,9 @@ err:
93398
93399 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
93400 {
93401+ if (gr_is_taskstats_denied(current->pid))
93402+ return -EACCES;
93403+
93404 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
93405 return cmd_attr_register_cpumask(info);
93406 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
93407diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
93408index a7077d3..dd48a49 100644
93409--- a/kernel/time/alarmtimer.c
93410+++ b/kernel/time/alarmtimer.c
93411@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
93412 struct platform_device *pdev;
93413 int error = 0;
93414 int i;
93415- struct k_clock alarm_clock = {
93416+ static struct k_clock alarm_clock = {
93417 .clock_getres = alarm_clock_getres,
93418 .clock_get = alarm_clock_get,
93419 .timer_create = alarm_timer_create,
93420diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
93421index d8c724c..6b331a4 100644
93422--- a/kernel/time/hrtimer.c
93423+++ b/kernel/time/hrtimer.c
93424@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
93425 local_irq_restore(flags);
93426 }
93427
93428-static void run_hrtimer_softirq(struct softirq_action *h)
93429+static __latent_entropy void run_hrtimer_softirq(void)
93430 {
93431 hrtimer_peek_ahead_timers();
93432 }
93433diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
93434index a16b678..8c5bd9d 100644
93435--- a/kernel/time/posix-cpu-timers.c
93436+++ b/kernel/time/posix-cpu-timers.c
93437@@ -1450,14 +1450,14 @@ struct k_clock clock_posix_cpu = {
93438
93439 static __init int init_posix_cpu_timers(void)
93440 {
93441- struct k_clock process = {
93442+ static struct k_clock process = {
93443 .clock_getres = process_cpu_clock_getres,
93444 .clock_get = process_cpu_clock_get,
93445 .timer_create = process_cpu_timer_create,
93446 .nsleep = process_cpu_nsleep,
93447 .nsleep_restart = process_cpu_nsleep_restart,
93448 };
93449- struct k_clock thread = {
93450+ static struct k_clock thread = {
93451 .clock_getres = thread_cpu_clock_getres,
93452 .clock_get = thread_cpu_clock_get,
93453 .timer_create = thread_cpu_timer_create,
93454diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
93455index 31ea01f..7fc61ef 100644
93456--- a/kernel/time/posix-timers.c
93457+++ b/kernel/time/posix-timers.c
93458@@ -43,6 +43,7 @@
93459 #include <linux/hash.h>
93460 #include <linux/posix-clock.h>
93461 #include <linux/posix-timers.h>
93462+#include <linux/grsecurity.h>
93463 #include <linux/syscalls.h>
93464 #include <linux/wait.h>
93465 #include <linux/workqueue.h>
93466@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
93467 * which we beg off on and pass to do_sys_settimeofday().
93468 */
93469
93470-static struct k_clock posix_clocks[MAX_CLOCKS];
93471+static struct k_clock *posix_clocks[MAX_CLOCKS];
93472
93473 /*
93474 * These ones are defined below.
93475@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93476 */
93477 static __init int init_posix_timers(void)
93478 {
93479- struct k_clock clock_realtime = {
93480+ static struct k_clock clock_realtime = {
93481 .clock_getres = hrtimer_get_res,
93482 .clock_get = posix_clock_realtime_get,
93483 .clock_set = posix_clock_realtime_set,
93484@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
93485 .timer_get = common_timer_get,
93486 .timer_del = common_timer_del,
93487 };
93488- struct k_clock clock_monotonic = {
93489+ static struct k_clock clock_monotonic = {
93490 .clock_getres = hrtimer_get_res,
93491 .clock_get = posix_ktime_get_ts,
93492 .nsleep = common_nsleep,
93493@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
93494 .timer_get = common_timer_get,
93495 .timer_del = common_timer_del,
93496 };
93497- struct k_clock clock_monotonic_raw = {
93498+ static struct k_clock clock_monotonic_raw = {
93499 .clock_getres = hrtimer_get_res,
93500 .clock_get = posix_get_monotonic_raw,
93501 };
93502- struct k_clock clock_realtime_coarse = {
93503+ static struct k_clock clock_realtime_coarse = {
93504 .clock_getres = posix_get_coarse_res,
93505 .clock_get = posix_get_realtime_coarse,
93506 };
93507- struct k_clock clock_monotonic_coarse = {
93508+ static struct k_clock clock_monotonic_coarse = {
93509 .clock_getres = posix_get_coarse_res,
93510 .clock_get = posix_get_monotonic_coarse,
93511 };
93512- struct k_clock clock_tai = {
93513+ static struct k_clock clock_tai = {
93514 .clock_getres = hrtimer_get_res,
93515 .clock_get = posix_get_tai,
93516 .nsleep = common_nsleep,
93517@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
93518 .timer_get = common_timer_get,
93519 .timer_del = common_timer_del,
93520 };
93521- struct k_clock clock_boottime = {
93522+ static struct k_clock clock_boottime = {
93523 .clock_getres = hrtimer_get_res,
93524 .clock_get = posix_get_boottime,
93525 .nsleep = common_nsleep,
93526@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93527 return;
93528 }
93529
93530- posix_clocks[clock_id] = *new_clock;
93531+ posix_clocks[clock_id] = new_clock;
93532 }
93533 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93534
93535@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93536 return (id & CLOCKFD_MASK) == CLOCKFD ?
93537 &clock_posix_dynamic : &clock_posix_cpu;
93538
93539- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93540+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93541 return NULL;
93542- return &posix_clocks[id];
93543+ return posix_clocks[id];
93544 }
93545
93546 static int common_timer_create(struct k_itimer *new_timer)
93547@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93548 struct k_clock *kc = clockid_to_kclock(which_clock);
93549 struct k_itimer *new_timer;
93550 int error, new_timer_id;
93551- sigevent_t event;
93552+ sigevent_t event = { };
93553 int it_id_set = IT_ID_NOT_SET;
93554
93555 if (!kc)
93556@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93557 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93558 return -EFAULT;
93559
93560+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93561+ have their clock_set fptr set to a nosettime dummy function
93562+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93563+ call common_clock_set, which calls do_sys_settimeofday, which
93564+ we hook
93565+ */
93566+
93567 return kc->clock_set(which_clock, &new_tp);
93568 }
93569
93570diff --git a/kernel/time/time.c b/kernel/time/time.c
93571index 2c85b77..6530536 100644
93572--- a/kernel/time/time.c
93573+++ b/kernel/time/time.c
93574@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
93575 return error;
93576
93577 if (tz) {
93578+ /* we log in do_settimeofday called below, so don't log twice
93579+ */
93580+ if (!tv)
93581+ gr_log_timechange();
93582+
93583 sys_tz = *tz;
93584 update_vsyscall_tz();
93585 if (firsttime) {
93586diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
93587index 6a93185..288c331 100644
93588--- a/kernel/time/timekeeping.c
93589+++ b/kernel/time/timekeeping.c
93590@@ -15,6 +15,7 @@
93591 #include <linux/init.h>
93592 #include <linux/mm.h>
93593 #include <linux/sched.h>
93594+#include <linux/grsecurity.h>
93595 #include <linux/syscore_ops.h>
93596 #include <linux/clocksource.h>
93597 #include <linux/jiffies.h>
93598@@ -775,6 +776,8 @@ int do_settimeofday64(const struct timespec64 *ts)
93599 if (!timespec64_valid_strict(ts))
93600 return -EINVAL;
93601
93602+ gr_log_timechange();
93603+
93604 raw_spin_lock_irqsave(&timekeeper_lock, flags);
93605 write_seqcount_begin(&tk_core.seq);
93606
93607diff --git a/kernel/time/timer.c b/kernel/time/timer.c
93608index 2d3f5c5..7ed7dc5 100644
93609--- a/kernel/time/timer.c
93610+++ b/kernel/time/timer.c
93611@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
93612 /*
93613 * This function runs timers and the timer-tq in bottom half context.
93614 */
93615-static void run_timer_softirq(struct softirq_action *h)
93616+static __latent_entropy void run_timer_softirq(void)
93617 {
93618 struct tvec_base *base = __this_cpu_read(tvec_bases);
93619
93620@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
93621 *
93622 * In all cases the return value is guaranteed to be non-negative.
93623 */
93624-signed long __sched schedule_timeout(signed long timeout)
93625+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
93626 {
93627 struct timer_list timer;
93628 unsigned long expire;
93629diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
93630index 61ed862..3b52c65 100644
93631--- a/kernel/time/timer_list.c
93632+++ b/kernel/time/timer_list.c
93633@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
93634
93635 static void print_name_offset(struct seq_file *m, void *sym)
93636 {
93637+#ifdef CONFIG_GRKERNSEC_HIDESYM
93638+ SEQ_printf(m, "<%p>", NULL);
93639+#else
93640 char symname[KSYM_NAME_LEN];
93641
93642 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
93643 SEQ_printf(m, "<%pK>", sym);
93644 else
93645 SEQ_printf(m, "%s", symname);
93646+#endif
93647 }
93648
93649 static void
93650@@ -119,7 +123,11 @@ next_one:
93651 static void
93652 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
93653 {
93654+#ifdef CONFIG_GRKERNSEC_HIDESYM
93655+ SEQ_printf(m, " .base: %p\n", NULL);
93656+#else
93657 SEQ_printf(m, " .base: %pK\n", base);
93658+#endif
93659 SEQ_printf(m, " .index: %d\n",
93660 base->index);
93661 SEQ_printf(m, " .resolution: %Lu nsecs\n",
93662@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
93663 {
93664 struct proc_dir_entry *pe;
93665
93666+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93667+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
93668+#else
93669 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
93670+#endif
93671 if (!pe)
93672 return -ENOMEM;
93673 return 0;
93674diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
93675index 1fb08f2..ca4bb1e 100644
93676--- a/kernel/time/timer_stats.c
93677+++ b/kernel/time/timer_stats.c
93678@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
93679 static unsigned long nr_entries;
93680 static struct entry entries[MAX_ENTRIES];
93681
93682-static atomic_t overflow_count;
93683+static atomic_unchecked_t overflow_count;
93684
93685 /*
93686 * The entries are in a hash-table, for fast lookup:
93687@@ -140,7 +140,7 @@ static void reset_entries(void)
93688 nr_entries = 0;
93689 memset(entries, 0, sizeof(entries));
93690 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
93691- atomic_set(&overflow_count, 0);
93692+ atomic_set_unchecked(&overflow_count, 0);
93693 }
93694
93695 static struct entry *alloc_entry(void)
93696@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93697 if (likely(entry))
93698 entry->count++;
93699 else
93700- atomic_inc(&overflow_count);
93701+ atomic_inc_unchecked(&overflow_count);
93702
93703 out_unlock:
93704 raw_spin_unlock_irqrestore(lock, flags);
93705@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93706
93707 static void print_name_offset(struct seq_file *m, unsigned long addr)
93708 {
93709+#ifdef CONFIG_GRKERNSEC_HIDESYM
93710+ seq_printf(m, "<%p>", NULL);
93711+#else
93712 char symname[KSYM_NAME_LEN];
93713
93714 if (lookup_symbol_name(addr, symname) < 0)
93715- seq_printf(m, "<%p>", (void *)addr);
93716+ seq_printf(m, "<%pK>", (void *)addr);
93717 else
93718 seq_printf(m, "%s", symname);
93719+#endif
93720 }
93721
93722 static int tstats_show(struct seq_file *m, void *v)
93723@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
93724
93725 seq_puts(m, "Timer Stats Version: v0.3\n");
93726 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
93727- if (atomic_read(&overflow_count))
93728- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
93729+ if (atomic_read_unchecked(&overflow_count))
93730+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
93731 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
93732
93733 for (i = 0; i < nr_entries; i++) {
93734@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
93735 {
93736 struct proc_dir_entry *pe;
93737
93738+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93739+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
93740+#else
93741 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
93742+#endif
93743 if (!pe)
93744 return -ENOMEM;
93745 return 0;
93746diff --git a/kernel/torture.c b/kernel/torture.c
93747index dd70993..0bf694b 100644
93748--- a/kernel/torture.c
93749+++ b/kernel/torture.c
93750@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
93751 mutex_lock(&fullstop_mutex);
93752 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
93753 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
93754- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
93755+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
93756 } else {
93757 pr_warn("Concurrent rmmod and shutdown illegal!\n");
93758 }
93759@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
93760 if (!torture_must_stop()) {
93761 if (stutter > 1) {
93762 schedule_timeout_interruptible(stutter - 1);
93763- ACCESS_ONCE(stutter_pause_test) = 2;
93764+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
93765 }
93766 schedule_timeout_interruptible(1);
93767- ACCESS_ONCE(stutter_pause_test) = 1;
93768+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
93769 }
93770 if (!torture_must_stop())
93771 schedule_timeout_interruptible(stutter);
93772- ACCESS_ONCE(stutter_pause_test) = 0;
93773+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
93774 torture_shutdown_absorb("torture_stutter");
93775 } while (!torture_must_stop());
93776 torture_kthread_stopping("torture_stutter");
93777@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
93778 schedule_timeout_uninterruptible(10);
93779 return true;
93780 }
93781- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
93782+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
93783 mutex_unlock(&fullstop_mutex);
93784 torture_shutdown_cleanup();
93785 torture_shuffle_cleanup();
93786diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
93787index 483cecf..ac46091 100644
93788--- a/kernel/trace/blktrace.c
93789+++ b/kernel/trace/blktrace.c
93790@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
93791 struct blk_trace *bt = filp->private_data;
93792 char buf[16];
93793
93794- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
93795+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
93796
93797 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
93798 }
93799@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
93800 return 1;
93801
93802 bt = buf->chan->private_data;
93803- atomic_inc(&bt->dropped);
93804+ atomic_inc_unchecked(&bt->dropped);
93805 return 0;
93806 }
93807
93808@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
93809
93810 bt->dir = dir;
93811 bt->dev = dev;
93812- atomic_set(&bt->dropped, 0);
93813+ atomic_set_unchecked(&bt->dropped, 0);
93814 INIT_LIST_HEAD(&bt->running_list);
93815
93816 ret = -EIO;
93817diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
93818index 224e768..8303c84 100644
93819--- a/kernel/trace/ftrace.c
93820+++ b/kernel/trace/ftrace.c
93821@@ -2372,12 +2372,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
93822 if (unlikely(ftrace_disabled))
93823 return 0;
93824
93825+ ret = ftrace_arch_code_modify_prepare();
93826+ FTRACE_WARN_ON(ret);
93827+ if (ret)
93828+ return 0;
93829+
93830 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
93831+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
93832 if (ret) {
93833 ftrace_bug(ret, rec);
93834- return 0;
93835 }
93836- return 1;
93837+ return ret ? 0 : 1;
93838 }
93839
93840 /*
93841@@ -4754,8 +4759,10 @@ static int ftrace_process_locs(struct module *mod,
93842 if (!count)
93843 return 0;
93844
93845+ pax_open_kernel();
93846 sort(start, count, sizeof(*start),
93847 ftrace_cmp_ips, ftrace_swap_ips);
93848+ pax_close_kernel();
93849
93850 start_pg = ftrace_allocate_pages(count);
93851 if (!start_pg)
93852@@ -5633,7 +5640,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
93853
93854 if (t->ret_stack == NULL) {
93855 atomic_set(&t->tracing_graph_pause, 0);
93856- atomic_set(&t->trace_overrun, 0);
93857+ atomic_set_unchecked(&t->trace_overrun, 0);
93858 t->curr_ret_stack = -1;
93859 /* Make sure the tasks see the -1 first: */
93860 smp_wmb();
93861@@ -5856,7 +5863,7 @@ static void
93862 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
93863 {
93864 atomic_set(&t->tracing_graph_pause, 0);
93865- atomic_set(&t->trace_overrun, 0);
93866+ atomic_set_unchecked(&t->trace_overrun, 0);
93867 t->ftrace_timestamp = 0;
93868 /* make curr_ret_stack visible before we add the ret_stack */
93869 smp_wmb();
93870diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
93871index d2e151c..b68c835 100644
93872--- a/kernel/trace/ring_buffer.c
93873+++ b/kernel/trace/ring_buffer.c
93874@@ -350,9 +350,9 @@ struct buffer_data_page {
93875 */
93876 struct buffer_page {
93877 struct list_head list; /* list of buffer pages */
93878- local_t write; /* index for next write */
93879+ local_unchecked_t write; /* index for next write */
93880 unsigned read; /* index for next read */
93881- local_t entries; /* entries on this page */
93882+ local_unchecked_t entries; /* entries on this page */
93883 unsigned long real_end; /* real end of data */
93884 struct buffer_data_page *page; /* Actual data page */
93885 };
93886@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
93887 unsigned long last_overrun;
93888 local_t entries_bytes;
93889 local_t entries;
93890- local_t overrun;
93891- local_t commit_overrun;
93892+ local_unchecked_t overrun;
93893+ local_unchecked_t commit_overrun;
93894 local_t dropped_events;
93895 local_t committing;
93896 local_t commits;
93897@@ -1047,8 +1047,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
93898 *
93899 * We add a counter to the write field to denote this.
93900 */
93901- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
93902- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
93903+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
93904+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
93905
93906 /*
93907 * Just make sure we have seen our old_write and synchronize
93908@@ -1076,8 +1076,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
93909 * cmpxchg to only update if an interrupt did not already
93910 * do it for us. If the cmpxchg fails, we don't care.
93911 */
93912- (void)local_cmpxchg(&next_page->write, old_write, val);
93913- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
93914+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
93915+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
93916
93917 /*
93918 * No need to worry about races with clearing out the commit.
93919@@ -1445,12 +1445,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
93920
93921 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
93922 {
93923- return local_read(&bpage->entries) & RB_WRITE_MASK;
93924+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
93925 }
93926
93927 static inline unsigned long rb_page_write(struct buffer_page *bpage)
93928 {
93929- return local_read(&bpage->write) & RB_WRITE_MASK;
93930+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
93931 }
93932
93933 static int
93934@@ -1545,7 +1545,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
93935 * bytes consumed in ring buffer from here.
93936 * Increment overrun to account for the lost events.
93937 */
93938- local_add(page_entries, &cpu_buffer->overrun);
93939+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
93940 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
93941 }
93942
93943@@ -2107,7 +2107,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
93944 * it is our responsibility to update
93945 * the counters.
93946 */
93947- local_add(entries, &cpu_buffer->overrun);
93948+ local_add_unchecked(entries, &cpu_buffer->overrun);
93949 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
93950
93951 /*
93952@@ -2257,7 +2257,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93953 if (tail == BUF_PAGE_SIZE)
93954 tail_page->real_end = 0;
93955
93956- local_sub(length, &tail_page->write);
93957+ local_sub_unchecked(length, &tail_page->write);
93958 return;
93959 }
93960
93961@@ -2292,7 +2292,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93962 rb_event_set_padding(event);
93963
93964 /* Set the write back to the previous setting */
93965- local_sub(length, &tail_page->write);
93966+ local_sub_unchecked(length, &tail_page->write);
93967 return;
93968 }
93969
93970@@ -2304,7 +2304,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93971
93972 /* Set write to end of buffer */
93973 length = (tail + length) - BUF_PAGE_SIZE;
93974- local_sub(length, &tail_page->write);
93975+ local_sub_unchecked(length, &tail_page->write);
93976 }
93977
93978 /*
93979@@ -2330,7 +2330,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
93980 * about it.
93981 */
93982 if (unlikely(next_page == commit_page)) {
93983- local_inc(&cpu_buffer->commit_overrun);
93984+ local_inc_unchecked(&cpu_buffer->commit_overrun);
93985 goto out_reset;
93986 }
93987
93988@@ -2386,7 +2386,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
93989 cpu_buffer->tail_page) &&
93990 (cpu_buffer->commit_page ==
93991 cpu_buffer->reader_page))) {
93992- local_inc(&cpu_buffer->commit_overrun);
93993+ local_inc_unchecked(&cpu_buffer->commit_overrun);
93994 goto out_reset;
93995 }
93996 }
93997@@ -2434,7 +2434,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
93998 length += RB_LEN_TIME_EXTEND;
93999
94000 tail_page = cpu_buffer->tail_page;
94001- write = local_add_return(length, &tail_page->write);
94002+ write = local_add_return_unchecked(length, &tail_page->write);
94003
94004 /* set write to only the index of the write */
94005 write &= RB_WRITE_MASK;
94006@@ -2458,7 +2458,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94007 kmemcheck_annotate_bitfield(event, bitfield);
94008 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94009
94010- local_inc(&tail_page->entries);
94011+ local_inc_unchecked(&tail_page->entries);
94012
94013 /*
94014 * If this is the first commit on the page, then update
94015@@ -2491,7 +2491,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94016
94017 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94018 unsigned long write_mask =
94019- local_read(&bpage->write) & ~RB_WRITE_MASK;
94020+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94021 unsigned long event_length = rb_event_length(event);
94022 /*
94023 * This is on the tail page. It is possible that
94024@@ -2501,7 +2501,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94025 */
94026 old_index += write_mask;
94027 new_index += write_mask;
94028- index = local_cmpxchg(&bpage->write, old_index, new_index);
94029+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94030 if (index == old_index) {
94031 /* update counters */
94032 local_sub(event_length, &cpu_buffer->entries_bytes);
94033@@ -2904,7 +2904,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94034
94035 /* Do the likely case first */
94036 if (likely(bpage->page == (void *)addr)) {
94037- local_dec(&bpage->entries);
94038+ local_dec_unchecked(&bpage->entries);
94039 return;
94040 }
94041
94042@@ -2916,7 +2916,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94043 start = bpage;
94044 do {
94045 if (bpage->page == (void *)addr) {
94046- local_dec(&bpage->entries);
94047+ local_dec_unchecked(&bpage->entries);
94048 return;
94049 }
94050 rb_inc_page(cpu_buffer, &bpage);
94051@@ -3200,7 +3200,7 @@ static inline unsigned long
94052 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94053 {
94054 return local_read(&cpu_buffer->entries) -
94055- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94056+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94057 }
94058
94059 /**
94060@@ -3289,7 +3289,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94061 return 0;
94062
94063 cpu_buffer = buffer->buffers[cpu];
94064- ret = local_read(&cpu_buffer->overrun);
94065+ ret = local_read_unchecked(&cpu_buffer->overrun);
94066
94067 return ret;
94068 }
94069@@ -3312,7 +3312,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94070 return 0;
94071
94072 cpu_buffer = buffer->buffers[cpu];
94073- ret = local_read(&cpu_buffer->commit_overrun);
94074+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94075
94076 return ret;
94077 }
94078@@ -3397,7 +3397,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94079 /* if you care about this being correct, lock the buffer */
94080 for_each_buffer_cpu(buffer, cpu) {
94081 cpu_buffer = buffer->buffers[cpu];
94082- overruns += local_read(&cpu_buffer->overrun);
94083+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94084 }
94085
94086 return overruns;
94087@@ -3568,8 +3568,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94088 /*
94089 * Reset the reader page to size zero.
94090 */
94091- local_set(&cpu_buffer->reader_page->write, 0);
94092- local_set(&cpu_buffer->reader_page->entries, 0);
94093+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94094+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94095 local_set(&cpu_buffer->reader_page->page->commit, 0);
94096 cpu_buffer->reader_page->real_end = 0;
94097
94098@@ -3603,7 +3603,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94099 * want to compare with the last_overrun.
94100 */
94101 smp_mb();
94102- overwrite = local_read(&(cpu_buffer->overrun));
94103+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94104
94105 /*
94106 * Here's the tricky part.
94107@@ -4175,8 +4175,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94108
94109 cpu_buffer->head_page
94110 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94111- local_set(&cpu_buffer->head_page->write, 0);
94112- local_set(&cpu_buffer->head_page->entries, 0);
94113+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94114+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94115 local_set(&cpu_buffer->head_page->page->commit, 0);
94116
94117 cpu_buffer->head_page->read = 0;
94118@@ -4186,14 +4186,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94119
94120 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94121 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94122- local_set(&cpu_buffer->reader_page->write, 0);
94123- local_set(&cpu_buffer->reader_page->entries, 0);
94124+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94125+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94126 local_set(&cpu_buffer->reader_page->page->commit, 0);
94127 cpu_buffer->reader_page->read = 0;
94128
94129 local_set(&cpu_buffer->entries_bytes, 0);
94130- local_set(&cpu_buffer->overrun, 0);
94131- local_set(&cpu_buffer->commit_overrun, 0);
94132+ local_set_unchecked(&cpu_buffer->overrun, 0);
94133+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94134 local_set(&cpu_buffer->dropped_events, 0);
94135 local_set(&cpu_buffer->entries, 0);
94136 local_set(&cpu_buffer->committing, 0);
94137@@ -4598,8 +4598,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94138 rb_init_page(bpage);
94139 bpage = reader->page;
94140 reader->page = *data_page;
94141- local_set(&reader->write, 0);
94142- local_set(&reader->entries, 0);
94143+ local_set_unchecked(&reader->write, 0);
94144+ local_set_unchecked(&reader->entries, 0);
94145 reader->read = 0;
94146 *data_page = bpage;
94147
94148diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94149index 361a827..6a319a3 100644
94150--- a/kernel/trace/trace.c
94151+++ b/kernel/trace/trace.c
94152@@ -3499,7 +3499,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94153 return 0;
94154 }
94155
94156-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94157+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94158 {
94159 /* do nothing if flag is already set */
94160 if (!!(trace_flags & mask) == !!enabled)
94161diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94162index 8de48ba..3e5b4fa 100644
94163--- a/kernel/trace/trace.h
94164+++ b/kernel/trace/trace.h
94165@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
94166 void trace_printk_init_buffers(void);
94167 void trace_printk_start_comm(void);
94168 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94169-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94170+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94171
94172 /*
94173 * Normal trace_printk() and friends allocates special buffers
94174diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94175index 57b67b1..66082a9 100644
94176--- a/kernel/trace/trace_clock.c
94177+++ b/kernel/trace/trace_clock.c
94178@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94179 return now;
94180 }
94181
94182-static atomic64_t trace_counter;
94183+static atomic64_unchecked_t trace_counter;
94184
94185 /*
94186 * trace_clock_counter(): simply an atomic counter.
94187@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94188 */
94189 u64 notrace trace_clock_counter(void)
94190 {
94191- return atomic64_add_return(1, &trace_counter);
94192+ return atomic64_inc_return_unchecked(&trace_counter);
94193 }
94194diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94195index b03a0ea..2df3168 100644
94196--- a/kernel/trace/trace_events.c
94197+++ b/kernel/trace/trace_events.c
94198@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94199 return 0;
94200 }
94201
94202-struct ftrace_module_file_ops;
94203 static void __add_event_to_tracers(struct ftrace_event_call *call);
94204
94205 /* Add an additional event_call dynamically */
94206diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
94207index ba47600..d0e47fa 100644
94208--- a/kernel/trace/trace_functions_graph.c
94209+++ b/kernel/trace/trace_functions_graph.c
94210@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
94211
94212 /* The return trace stack is full */
94213 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
94214- atomic_inc(&current->trace_overrun);
94215+ atomic_inc_unchecked(&current->trace_overrun);
94216 return -EBUSY;
94217 }
94218
94219@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
94220 *ret = current->ret_stack[index].ret;
94221 trace->func = current->ret_stack[index].func;
94222 trace->calltime = current->ret_stack[index].calltime;
94223- trace->overrun = atomic_read(&current->trace_overrun);
94224+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
94225 trace->depth = index;
94226 }
94227
94228diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
94229index 7a9ba62..2e0e4a1 100644
94230--- a/kernel/trace/trace_mmiotrace.c
94231+++ b/kernel/trace/trace_mmiotrace.c
94232@@ -24,7 +24,7 @@ struct header_iter {
94233 static struct trace_array *mmio_trace_array;
94234 static bool overrun_detected;
94235 static unsigned long prev_overruns;
94236-static atomic_t dropped_count;
94237+static atomic_unchecked_t dropped_count;
94238
94239 static void mmio_reset_data(struct trace_array *tr)
94240 {
94241@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
94242
94243 static unsigned long count_overruns(struct trace_iterator *iter)
94244 {
94245- unsigned long cnt = atomic_xchg(&dropped_count, 0);
94246+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
94247 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
94248
94249 if (over > prev_overruns)
94250@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
94251 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
94252 sizeof(*entry), 0, pc);
94253 if (!event) {
94254- atomic_inc(&dropped_count);
94255+ atomic_inc_unchecked(&dropped_count);
94256 return;
94257 }
94258 entry = ring_buffer_event_data(event);
94259@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
94260 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
94261 sizeof(*entry), 0, pc);
94262 if (!event) {
94263- atomic_inc(&dropped_count);
94264+ atomic_inc_unchecked(&dropped_count);
94265 return;
94266 }
94267 entry = ring_buffer_event_data(event);
94268diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
94269index b77b9a6..82f19bd 100644
94270--- a/kernel/trace/trace_output.c
94271+++ b/kernel/trace/trace_output.c
94272@@ -707,14 +707,16 @@ int register_ftrace_event(struct trace_event *event)
94273 goto out;
94274 }
94275
94276+ pax_open_kernel();
94277 if (event->funcs->trace == NULL)
94278- event->funcs->trace = trace_nop_print;
94279+ *(void **)&event->funcs->trace = trace_nop_print;
94280 if (event->funcs->raw == NULL)
94281- event->funcs->raw = trace_nop_print;
94282+ *(void **)&event->funcs->raw = trace_nop_print;
94283 if (event->funcs->hex == NULL)
94284- event->funcs->hex = trace_nop_print;
94285+ *(void **)&event->funcs->hex = trace_nop_print;
94286 if (event->funcs->binary == NULL)
94287- event->funcs->binary = trace_nop_print;
94288+ *(void **)&event->funcs->binary = trace_nop_print;
94289+ pax_close_kernel();
94290
94291 key = event->type & (EVENT_HASHSIZE - 1);
94292
94293diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
94294index f8b45d8..70ff6c8 100644
94295--- a/kernel/trace/trace_seq.c
94296+++ b/kernel/trace/trace_seq.c
94297@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
94298 return 0;
94299 }
94300
94301- seq_buf_path(&s->seq, path, "\n");
94302+ seq_buf_path(&s->seq, path, "\n\\");
94303
94304 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
94305 s->seq.len = save_len;
94306diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
94307index 16eddb3..758b308 100644
94308--- a/kernel/trace/trace_stack.c
94309+++ b/kernel/trace/trace_stack.c
94310@@ -90,7 +90,7 @@ check_stack(unsigned long ip, unsigned long *stack)
94311 return;
94312
94313 /* we do not handle interrupt stacks yet */
94314- if (!object_is_on_stack(stack))
94315+ if (!object_starts_on_stack(stack))
94316 return;
94317
94318 local_irq_save(flags);
94319diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
94320index c6ee36f..78513f3 100644
94321--- a/kernel/trace/trace_syscalls.c
94322+++ b/kernel/trace/trace_syscalls.c
94323@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
94324 int num;
94325
94326 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94327+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94328+ return -EINVAL;
94329
94330 mutex_lock(&syscall_trace_lock);
94331 if (!sys_perf_refcount_enter)
94332@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
94333 int num;
94334
94335 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94336+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94337+ return;
94338
94339 mutex_lock(&syscall_trace_lock);
94340 sys_perf_refcount_enter--;
94341@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
94342 int num;
94343
94344 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94345+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94346+ return -EINVAL;
94347
94348 mutex_lock(&syscall_trace_lock);
94349 if (!sys_perf_refcount_exit)
94350@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
94351 int num;
94352
94353 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94354+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94355+ return;
94356
94357 mutex_lock(&syscall_trace_lock);
94358 sys_perf_refcount_exit--;
94359diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
94360index 4109f83..fe1f830 100644
94361--- a/kernel/user_namespace.c
94362+++ b/kernel/user_namespace.c
94363@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
94364 !kgid_has_mapping(parent_ns, group))
94365 return -EPERM;
94366
94367+#ifdef CONFIG_GRKERNSEC
94368+ /*
94369+ * This doesn't really inspire confidence:
94370+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
94371+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
94372+ * Increases kernel attack surface in areas developers
94373+ * previously cared little about ("low importance due
94374+ * to requiring "root" capability")
94375+ * To be removed when this code receives *proper* review
94376+ */
94377+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
94378+ !capable(CAP_SETGID))
94379+ return -EPERM;
94380+#endif
94381+
94382 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
94383 if (!ns)
94384 return -ENOMEM;
94385@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
94386 if (atomic_read(&current->mm->mm_users) > 1)
94387 return -EINVAL;
94388
94389- if (current->fs->users != 1)
94390+ if (atomic_read(&current->fs->users) != 1)
94391 return -EINVAL;
94392
94393 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
94394diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
94395index c8eac43..4b5f08f 100644
94396--- a/kernel/utsname_sysctl.c
94397+++ b/kernel/utsname_sysctl.c
94398@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
94399 static int proc_do_uts_string(struct ctl_table *table, int write,
94400 void __user *buffer, size_t *lenp, loff_t *ppos)
94401 {
94402- struct ctl_table uts_table;
94403+ ctl_table_no_const uts_table;
94404 int r;
94405 memcpy(&uts_table, table, sizeof(uts_table));
94406 uts_table.data = get_uts(table, write);
94407diff --git a/kernel/watchdog.c b/kernel/watchdog.c
94408index 70bf118..4be3c37 100644
94409--- a/kernel/watchdog.c
94410+++ b/kernel/watchdog.c
94411@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
94412 static void watchdog_nmi_disable(unsigned int cpu) { return; }
94413 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
94414
94415-static struct smp_hotplug_thread watchdog_threads = {
94416+static struct smp_hotplug_thread watchdog_threads __read_only = {
94417 .store = &softlockup_watchdog,
94418 .thread_should_run = watchdog_should_run,
94419 .thread_fn = watchdog,
94420diff --git a/kernel/workqueue.c b/kernel/workqueue.c
94421index beeeac9..65cbfb3 100644
94422--- a/kernel/workqueue.c
94423+++ b/kernel/workqueue.c
94424@@ -4517,7 +4517,7 @@ static void rebind_workers(struct worker_pool *pool)
94425 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
94426 worker_flags |= WORKER_REBOUND;
94427 worker_flags &= ~WORKER_UNBOUND;
94428- ACCESS_ONCE(worker->flags) = worker_flags;
94429+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
94430 }
94431
94432 spin_unlock_irq(&pool->lock);
94433diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
94434index 5f2ce61..85a0b1b 100644
94435--- a/lib/Kconfig.debug
94436+++ b/lib/Kconfig.debug
94437@@ -910,7 +910,7 @@ config DEBUG_MUTEXES
94438
94439 config DEBUG_WW_MUTEX_SLOWPATH
94440 bool "Wait/wound mutex debugging: Slowpath testing"
94441- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94442+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94443 select DEBUG_LOCK_ALLOC
94444 select DEBUG_SPINLOCK
94445 select DEBUG_MUTEXES
94446@@ -927,7 +927,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
94447
94448 config DEBUG_LOCK_ALLOC
94449 bool "Lock debugging: detect incorrect freeing of live locks"
94450- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94451+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94452 select DEBUG_SPINLOCK
94453 select DEBUG_MUTEXES
94454 select LOCKDEP
94455@@ -941,7 +941,7 @@ config DEBUG_LOCK_ALLOC
94456
94457 config PROVE_LOCKING
94458 bool "Lock debugging: prove locking correctness"
94459- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94460+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94461 select LOCKDEP
94462 select DEBUG_SPINLOCK
94463 select DEBUG_MUTEXES
94464@@ -992,7 +992,7 @@ config LOCKDEP
94465
94466 config LOCK_STAT
94467 bool "Lock usage statistics"
94468- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94469+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94470 select LOCKDEP
94471 select DEBUG_SPINLOCK
94472 select DEBUG_MUTEXES
94473@@ -1453,6 +1453,7 @@ config LATENCYTOP
94474 depends on DEBUG_KERNEL
94475 depends on STACKTRACE_SUPPORT
94476 depends on PROC_FS
94477+ depends on !GRKERNSEC_HIDESYM
94478 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
94479 select KALLSYMS
94480 select KALLSYMS_ALL
94481@@ -1469,7 +1470,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94482 config DEBUG_STRICT_USER_COPY_CHECKS
94483 bool "Strict user copy size checks"
94484 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94485- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
94486+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
94487 help
94488 Enabling this option turns a certain set of sanity checks for user
94489 copy operations into compile time failures.
94490@@ -1597,7 +1598,7 @@ endmenu # runtime tests
94491
94492 config PROVIDE_OHCI1394_DMA_INIT
94493 bool "Remote debugging over FireWire early on boot"
94494- depends on PCI && X86
94495+ depends on PCI && X86 && !GRKERNSEC
94496 help
94497 If you want to debug problems which hang or crash the kernel early
94498 on boot and the crashing machine has a FireWire port, you can use
94499diff --git a/lib/Makefile b/lib/Makefile
94500index 3c3b30b..ca29102 100644
94501--- a/lib/Makefile
94502+++ b/lib/Makefile
94503@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
94504 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
94505 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
94506 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
94507-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
94508+obj-y += list_debug.o
94509 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
94510
94511 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
94512diff --git a/lib/average.c b/lib/average.c
94513index 114d1be..ab0350c 100644
94514--- a/lib/average.c
94515+++ b/lib/average.c
94516@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
94517 {
94518 unsigned long internal = ACCESS_ONCE(avg->internal);
94519
94520- ACCESS_ONCE(avg->internal) = internal ?
94521+ ACCESS_ONCE_RW(avg->internal) = internal ?
94522 (((internal << avg->weight) - internal) +
94523 (val << avg->factor)) >> avg->weight :
94524 (val << avg->factor);
94525diff --git a/lib/bitmap.c b/lib/bitmap.c
94526index 324ea9e..46b1ae2 100644
94527--- a/lib/bitmap.c
94528+++ b/lib/bitmap.c
94529@@ -271,7 +271,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
94530 }
94531 EXPORT_SYMBOL(__bitmap_subset);
94532
94533-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94534+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94535 {
94536 unsigned int k, lim = bits/BITS_PER_LONG;
94537 int w = 0;
94538@@ -437,7 +437,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
94539 {
94540 int c, old_c, totaldigits, ndigits, nchunks, nbits;
94541 u32 chunk;
94542- const char __user __force *ubuf = (const char __user __force *)buf;
94543+ const char __user *ubuf = (const char __force_user *)buf;
94544
94545 bitmap_zero(maskp, nmaskbits);
94546
94547@@ -522,7 +522,7 @@ int bitmap_parse_user(const char __user *ubuf,
94548 {
94549 if (!access_ok(VERIFY_READ, ubuf, ulen))
94550 return -EFAULT;
94551- return __bitmap_parse((const char __force *)ubuf,
94552+ return __bitmap_parse((const char __force_kernel *)ubuf,
94553 ulen, 1, maskp, nmaskbits);
94554
94555 }
94556@@ -640,7 +640,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
94557 {
94558 unsigned a, b;
94559 int c, old_c, totaldigits;
94560- const char __user __force *ubuf = (const char __user __force *)buf;
94561+ const char __user *ubuf = (const char __force_user *)buf;
94562 int exp_digit, in_range;
94563
94564 totaldigits = c = 0;
94565@@ -735,7 +735,7 @@ int bitmap_parselist_user(const char __user *ubuf,
94566 {
94567 if (!access_ok(VERIFY_READ, ubuf, ulen))
94568 return -EFAULT;
94569- return __bitmap_parselist((const char __force *)ubuf,
94570+ return __bitmap_parselist((const char __force_kernel *)ubuf,
94571 ulen, 1, maskp, nmaskbits);
94572 }
94573 EXPORT_SYMBOL(bitmap_parselist_user);
94574diff --git a/lib/bug.c b/lib/bug.c
94575index 0c3bd95..5a615a1 100644
94576--- a/lib/bug.c
94577+++ b/lib/bug.c
94578@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
94579 return BUG_TRAP_TYPE_NONE;
94580
94581 bug = find_bug(bugaddr);
94582+ if (!bug)
94583+ return BUG_TRAP_TYPE_NONE;
94584
94585 file = NULL;
94586 line = 0;
94587diff --git a/lib/debugobjects.c b/lib/debugobjects.c
94588index 547f7f9..a6d4ba0 100644
94589--- a/lib/debugobjects.c
94590+++ b/lib/debugobjects.c
94591@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
94592 if (limit > 4)
94593 return;
94594
94595- is_on_stack = object_is_on_stack(addr);
94596+ is_on_stack = object_starts_on_stack(addr);
94597 if (is_on_stack == onstack)
94598 return;
94599
94600diff --git a/lib/div64.c b/lib/div64.c
94601index 4382ad7..08aa558 100644
94602--- a/lib/div64.c
94603+++ b/lib/div64.c
94604@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
94605 EXPORT_SYMBOL(__div64_32);
94606
94607 #ifndef div_s64_rem
94608-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94609+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94610 {
94611 u64 quotient;
94612
94613@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
94614 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
94615 */
94616 #ifndef div64_u64
94617-u64 div64_u64(u64 dividend, u64 divisor)
94618+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
94619 {
94620 u32 high = divisor >> 32;
94621 u64 quot;
94622diff --git a/lib/dma-debug.c b/lib/dma-debug.c
94623index 9722bd2..0d826f4 100644
94624--- a/lib/dma-debug.c
94625+++ b/lib/dma-debug.c
94626@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
94627
94628 void dma_debug_add_bus(struct bus_type *bus)
94629 {
94630- struct notifier_block *nb;
94631+ notifier_block_no_const *nb;
94632
94633 if (dma_debug_disabled())
94634 return;
94635@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
94636
94637 static void check_for_stack(struct device *dev, void *addr)
94638 {
94639- if (object_is_on_stack(addr))
94640+ if (object_starts_on_stack(addr))
94641 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
94642 "stack [addr=%p]\n", addr);
94643 }
94644diff --git a/lib/inflate.c b/lib/inflate.c
94645index 013a761..c28f3fc 100644
94646--- a/lib/inflate.c
94647+++ b/lib/inflate.c
94648@@ -269,7 +269,7 @@ static void free(void *where)
94649 malloc_ptr = free_mem_ptr;
94650 }
94651 #else
94652-#define malloc(a) kmalloc(a, GFP_KERNEL)
94653+#define malloc(a) kmalloc((a), GFP_KERNEL)
94654 #define free(a) kfree(a)
94655 #endif
94656
94657diff --git a/lib/ioremap.c b/lib/ioremap.c
94658index 0c9216c..863bd89 100644
94659--- a/lib/ioremap.c
94660+++ b/lib/ioremap.c
94661@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
94662 unsigned long next;
94663
94664 phys_addr -= addr;
94665- pmd = pmd_alloc(&init_mm, pud, addr);
94666+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
94667 if (!pmd)
94668 return -ENOMEM;
94669 do {
94670@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
94671 unsigned long next;
94672
94673 phys_addr -= addr;
94674- pud = pud_alloc(&init_mm, pgd, addr);
94675+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
94676 if (!pud)
94677 return -ENOMEM;
94678 do {
94679diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
94680index bd2bea9..6b3c95e 100644
94681--- a/lib/is_single_threaded.c
94682+++ b/lib/is_single_threaded.c
94683@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
94684 struct task_struct *p, *t;
94685 bool ret;
94686
94687+ if (!mm)
94688+ return true;
94689+
94690 if (atomic_read(&task->signal->live) != 1)
94691 return false;
94692
94693diff --git a/lib/kobject.c b/lib/kobject.c
94694index 03d4ab3..46f6374 100644
94695--- a/lib/kobject.c
94696+++ b/lib/kobject.c
94697@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
94698
94699
94700 static DEFINE_SPINLOCK(kobj_ns_type_lock);
94701-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
94702+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
94703
94704-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94705+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94706 {
94707 enum kobj_ns_type type = ops->type;
94708 int error;
94709diff --git a/lib/list_debug.c b/lib/list_debug.c
94710index c24c2f7..f0296f4 100644
94711--- a/lib/list_debug.c
94712+++ b/lib/list_debug.c
94713@@ -11,7 +11,9 @@
94714 #include <linux/bug.h>
94715 #include <linux/kernel.h>
94716 #include <linux/rculist.h>
94717+#include <linux/mm.h>
94718
94719+#ifdef CONFIG_DEBUG_LIST
94720 /*
94721 * Insert a new entry between two known consecutive entries.
94722 *
94723@@ -19,21 +21,40 @@
94724 * the prev/next entries already!
94725 */
94726
94727+static bool __list_add_debug(struct list_head *new,
94728+ struct list_head *prev,
94729+ struct list_head *next)
94730+{
94731+ if (unlikely(next->prev != prev)) {
94732+ printk(KERN_ERR "list_add corruption. next->prev should be "
94733+ "prev (%p), but was %p. (next=%p).\n",
94734+ prev, next->prev, next);
94735+ BUG();
94736+ return false;
94737+ }
94738+ if (unlikely(prev->next != next)) {
94739+ printk(KERN_ERR "list_add corruption. prev->next should be "
94740+ "next (%p), but was %p. (prev=%p).\n",
94741+ next, prev->next, prev);
94742+ BUG();
94743+ return false;
94744+ }
94745+ if (unlikely(new == prev || new == next)) {
94746+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
94747+ new, prev, next);
94748+ BUG();
94749+ return false;
94750+ }
94751+ return true;
94752+}
94753+
94754 void __list_add(struct list_head *new,
94755- struct list_head *prev,
94756- struct list_head *next)
94757+ struct list_head *prev,
94758+ struct list_head *next)
94759 {
94760- WARN(next->prev != prev,
94761- "list_add corruption. next->prev should be "
94762- "prev (%p), but was %p. (next=%p).\n",
94763- prev, next->prev, next);
94764- WARN(prev->next != next,
94765- "list_add corruption. prev->next should be "
94766- "next (%p), but was %p. (prev=%p).\n",
94767- next, prev->next, prev);
94768- WARN(new == prev || new == next,
94769- "list_add double add: new=%p, prev=%p, next=%p.\n",
94770- new, prev, next);
94771+ if (!__list_add_debug(new, prev, next))
94772+ return;
94773+
94774 next->prev = new;
94775 new->next = next;
94776 new->prev = prev;
94777@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
94778 }
94779 EXPORT_SYMBOL(__list_add);
94780
94781-void __list_del_entry(struct list_head *entry)
94782+static bool __list_del_entry_debug(struct list_head *entry)
94783 {
94784 struct list_head *prev, *next;
94785
94786 prev = entry->prev;
94787 next = entry->next;
94788
94789- if (WARN(next == LIST_POISON1,
94790- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94791- entry, LIST_POISON1) ||
94792- WARN(prev == LIST_POISON2,
94793- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94794- entry, LIST_POISON2) ||
94795- WARN(prev->next != entry,
94796- "list_del corruption. prev->next should be %p, "
94797- "but was %p\n", entry, prev->next) ||
94798- WARN(next->prev != entry,
94799- "list_del corruption. next->prev should be %p, "
94800- "but was %p\n", entry, next->prev))
94801+ if (unlikely(next == LIST_POISON1)) {
94802+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94803+ entry, LIST_POISON1);
94804+ BUG();
94805+ return false;
94806+ }
94807+ if (unlikely(prev == LIST_POISON2)) {
94808+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94809+ entry, LIST_POISON2);
94810+ BUG();
94811+ return false;
94812+ }
94813+ if (unlikely(entry->prev->next != entry)) {
94814+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
94815+ "but was %p\n", entry, prev->next);
94816+ BUG();
94817+ return false;
94818+ }
94819+ if (unlikely(entry->next->prev != entry)) {
94820+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
94821+ "but was %p\n", entry, next->prev);
94822+ BUG();
94823+ return false;
94824+ }
94825+ return true;
94826+}
94827+
94828+void __list_del_entry(struct list_head *entry)
94829+{
94830+ if (!__list_del_entry_debug(entry))
94831 return;
94832
94833- __list_del(prev, next);
94834+ __list_del(entry->prev, entry->next);
94835 }
94836 EXPORT_SYMBOL(__list_del_entry);
94837
94838@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
94839 void __list_add_rcu(struct list_head *new,
94840 struct list_head *prev, struct list_head *next)
94841 {
94842- WARN(next->prev != prev,
94843- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
94844- prev, next->prev, next);
94845- WARN(prev->next != next,
94846- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
94847- next, prev->next, prev);
94848+ if (!__list_add_debug(new, prev, next))
94849+ return;
94850+
94851 new->next = next;
94852 new->prev = prev;
94853 rcu_assign_pointer(list_next_rcu(prev), new);
94854 next->prev = new;
94855 }
94856 EXPORT_SYMBOL(__list_add_rcu);
94857+#endif
94858+
94859+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
94860+{
94861+#ifdef CONFIG_DEBUG_LIST
94862+ if (!__list_add_debug(new, prev, next))
94863+ return;
94864+#endif
94865+
94866+ pax_open_kernel();
94867+ next->prev = new;
94868+ new->next = next;
94869+ new->prev = prev;
94870+ prev->next = new;
94871+ pax_close_kernel();
94872+}
94873+EXPORT_SYMBOL(__pax_list_add);
94874+
94875+void pax_list_del(struct list_head *entry)
94876+{
94877+#ifdef CONFIG_DEBUG_LIST
94878+ if (!__list_del_entry_debug(entry))
94879+ return;
94880+#endif
94881+
94882+ pax_open_kernel();
94883+ __list_del(entry->prev, entry->next);
94884+ entry->next = LIST_POISON1;
94885+ entry->prev = LIST_POISON2;
94886+ pax_close_kernel();
94887+}
94888+EXPORT_SYMBOL(pax_list_del);
94889+
94890+void pax_list_del_init(struct list_head *entry)
94891+{
94892+ pax_open_kernel();
94893+ __list_del(entry->prev, entry->next);
94894+ INIT_LIST_HEAD(entry);
94895+ pax_close_kernel();
94896+}
94897+EXPORT_SYMBOL(pax_list_del_init);
94898+
94899+void __pax_list_add_rcu(struct list_head *new,
94900+ struct list_head *prev, struct list_head *next)
94901+{
94902+#ifdef CONFIG_DEBUG_LIST
94903+ if (!__list_add_debug(new, prev, next))
94904+ return;
94905+#endif
94906+
94907+ pax_open_kernel();
94908+ new->next = next;
94909+ new->prev = prev;
94910+ rcu_assign_pointer(list_next_rcu(prev), new);
94911+ next->prev = new;
94912+ pax_close_kernel();
94913+}
94914+EXPORT_SYMBOL(__pax_list_add_rcu);
94915+
94916+void pax_list_del_rcu(struct list_head *entry)
94917+{
94918+#ifdef CONFIG_DEBUG_LIST
94919+ if (!__list_del_entry_debug(entry))
94920+ return;
94921+#endif
94922+
94923+ pax_open_kernel();
94924+ __list_del(entry->prev, entry->next);
94925+ entry->next = LIST_POISON1;
94926+ entry->prev = LIST_POISON2;
94927+ pax_close_kernel();
94928+}
94929+EXPORT_SYMBOL(pax_list_del_rcu);
94930diff --git a/lib/lockref.c b/lib/lockref.c
94931index d2233de..fa1a2f6 100644
94932--- a/lib/lockref.c
94933+++ b/lib/lockref.c
94934@@ -48,13 +48,13 @@
94935 void lockref_get(struct lockref *lockref)
94936 {
94937 CMPXCHG_LOOP(
94938- new.count++;
94939+ __lockref_inc(&new);
94940 ,
94941 return;
94942 );
94943
94944 spin_lock(&lockref->lock);
94945- lockref->count++;
94946+ __lockref_inc(lockref);
94947 spin_unlock(&lockref->lock);
94948 }
94949 EXPORT_SYMBOL(lockref_get);
94950@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
94951 int retval;
94952
94953 CMPXCHG_LOOP(
94954- new.count++;
94955+ __lockref_inc(&new);
94956 if (!old.count)
94957 return 0;
94958 ,
94959@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
94960 spin_lock(&lockref->lock);
94961 retval = 0;
94962 if (lockref->count) {
94963- lockref->count++;
94964+ __lockref_inc(lockref);
94965 retval = 1;
94966 }
94967 spin_unlock(&lockref->lock);
94968@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
94969 int lockref_get_or_lock(struct lockref *lockref)
94970 {
94971 CMPXCHG_LOOP(
94972- new.count++;
94973+ __lockref_inc(&new);
94974 if (!old.count)
94975 break;
94976 ,
94977@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
94978 spin_lock(&lockref->lock);
94979 if (!lockref->count)
94980 return 0;
94981- lockref->count++;
94982+ __lockref_inc(lockref);
94983 spin_unlock(&lockref->lock);
94984 return 1;
94985 }
94986@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
94987 int lockref_put_or_lock(struct lockref *lockref)
94988 {
94989 CMPXCHG_LOOP(
94990- new.count--;
94991+ __lockref_dec(&new);
94992 if (old.count <= 1)
94993 break;
94994 ,
94995@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
94996 spin_lock(&lockref->lock);
94997 if (lockref->count <= 1)
94998 return 0;
94999- lockref->count--;
95000+ __lockref_dec(lockref);
95001 spin_unlock(&lockref->lock);
95002 return 1;
95003 }
95004@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95005 int retval;
95006
95007 CMPXCHG_LOOP(
95008- new.count++;
95009+ __lockref_inc(&new);
95010 if ((int)old.count < 0)
95011 return 0;
95012 ,
95013@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95014 spin_lock(&lockref->lock);
95015 retval = 0;
95016 if ((int) lockref->count >= 0) {
95017- lockref->count++;
95018+ __lockref_inc(lockref);
95019 retval = 1;
95020 }
95021 spin_unlock(&lockref->lock);
95022diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95023index 6111bcb..02e816b 100644
95024--- a/lib/percpu-refcount.c
95025+++ b/lib/percpu-refcount.c
95026@@ -31,7 +31,7 @@
95027 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
95028 */
95029
95030-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
95031+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
95032
95033 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
95034
95035diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95036index 3291a8e..346a91e 100644
95037--- a/lib/radix-tree.c
95038+++ b/lib/radix-tree.c
95039@@ -67,7 +67,7 @@ struct radix_tree_preload {
95040 int nr;
95041 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95042 };
95043-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95044+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95045
95046 static inline void *ptr_to_indirect(void *ptr)
95047 {
95048diff --git a/lib/random32.c b/lib/random32.c
95049index 0bee183..526f12f 100644
95050--- a/lib/random32.c
95051+++ b/lib/random32.c
95052@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
95053 }
95054 #endif
95055
95056-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95057+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95058
95059 /**
95060 * prandom_u32_state - seeded pseudo-random number generator.
95061diff --git a/lib/rbtree.c b/lib/rbtree.c
95062index c16c81a..4dcbda1 100644
95063--- a/lib/rbtree.c
95064+++ b/lib/rbtree.c
95065@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95066 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95067
95068 static const struct rb_augment_callbacks dummy_callbacks = {
95069- dummy_propagate, dummy_copy, dummy_rotate
95070+ .propagate = dummy_propagate,
95071+ .copy = dummy_copy,
95072+ .rotate = dummy_rotate
95073 };
95074
95075 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95076diff --git a/lib/show_mem.c b/lib/show_mem.c
95077index 7de89f4..00d70b7 100644
95078--- a/lib/show_mem.c
95079+++ b/lib/show_mem.c
95080@@ -50,6 +50,6 @@ void show_mem(unsigned int filter)
95081 quicklist_total_size());
95082 #endif
95083 #ifdef CONFIG_MEMORY_FAILURE
95084- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95085+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95086 #endif
95087 }
95088diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95089index bb2b201..46abaf9 100644
95090--- a/lib/strncpy_from_user.c
95091+++ b/lib/strncpy_from_user.c
95092@@ -21,7 +21,7 @@
95093 */
95094 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95095 {
95096- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95097+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95098 long res = 0;
95099
95100 /*
95101diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95102index a28df52..3d55877 100644
95103--- a/lib/strnlen_user.c
95104+++ b/lib/strnlen_user.c
95105@@ -26,7 +26,7 @@
95106 */
95107 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95108 {
95109- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95110+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95111 long align, res = 0;
95112 unsigned long c;
95113
95114diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95115index 4abda07..b9d3765 100644
95116--- a/lib/swiotlb.c
95117+++ b/lib/swiotlb.c
95118@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95119
95120 void
95121 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95122- dma_addr_t dev_addr)
95123+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95124 {
95125 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95126
95127diff --git a/lib/usercopy.c b/lib/usercopy.c
95128index 4f5b1dd..7cab418 100644
95129--- a/lib/usercopy.c
95130+++ b/lib/usercopy.c
95131@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95132 WARN(1, "Buffer overflow detected!\n");
95133 }
95134 EXPORT_SYMBOL(copy_from_user_overflow);
95135+
95136+void copy_to_user_overflow(void)
95137+{
95138+ WARN(1, "Buffer overflow detected!\n");
95139+}
95140+EXPORT_SYMBOL(copy_to_user_overflow);
95141diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95142index ec337f6..8484eb2 100644
95143--- a/lib/vsprintf.c
95144+++ b/lib/vsprintf.c
95145@@ -16,6 +16,9 @@
95146 * - scnprintf and vscnprintf
95147 */
95148
95149+#ifdef CONFIG_GRKERNSEC_HIDESYM
95150+#define __INCLUDED_BY_HIDESYM 1
95151+#endif
95152 #include <stdarg.h>
95153 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
95154 #include <linux/types.h>
95155@@ -625,7 +628,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
95156 #ifdef CONFIG_KALLSYMS
95157 if (*fmt == 'B')
95158 sprint_backtrace(sym, value);
95159- else if (*fmt != 'f' && *fmt != 's')
95160+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
95161 sprint_symbol(sym, value);
95162 else
95163 sprint_symbol_no_offset(sym, value);
95164@@ -1240,7 +1243,11 @@ char *address_val(char *buf, char *end, const void *addr,
95165 return number(buf, end, num, spec);
95166 }
95167
95168+#ifdef CONFIG_GRKERNSEC_HIDESYM
95169+int kptr_restrict __read_mostly = 2;
95170+#else
95171 int kptr_restrict __read_mostly;
95172+#endif
95173
95174 /*
95175 * Show a '%p' thing. A kernel extension is that the '%p' is followed
95176@@ -1251,8 +1258,10 @@ int kptr_restrict __read_mostly;
95177 *
95178 * - 'F' For symbolic function descriptor pointers with offset
95179 * - 'f' For simple symbolic function names without offset
95180+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
95181 * - 'S' For symbolic direct pointers with offset
95182 * - 's' For symbolic direct pointers without offset
95183+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95184 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
95185 * - 'B' For backtraced symbolic direct pointers with offset
95186 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
95187@@ -1331,12 +1340,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95188
95189 if (!ptr && *fmt != 'K') {
95190 /*
95191- * Print (null) with the same width as a pointer so it makes
95192+ * Print (nil) with the same width as a pointer so it makes
95193 * tabular output look nice.
95194 */
95195 if (spec.field_width == -1)
95196 spec.field_width = default_width;
95197- return string(buf, end, "(null)", spec);
95198+ return string(buf, end, "(nil)", spec);
95199 }
95200
95201 switch (*fmt) {
95202@@ -1346,6 +1355,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95203 /* Fallthrough */
95204 case 'S':
95205 case 's':
95206+#ifdef CONFIG_GRKERNSEC_HIDESYM
95207+ break;
95208+#else
95209+ return symbol_string(buf, end, ptr, spec, fmt);
95210+#endif
95211+ case 'X':
95212+ ptr = dereference_function_descriptor(ptr);
95213+ case 'A':
95214 case 'B':
95215 return symbol_string(buf, end, ptr, spec, fmt);
95216 case 'R':
95217@@ -1403,6 +1420,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95218 va_end(va);
95219 return buf;
95220 }
95221+ case 'P':
95222+ break;
95223 case 'K':
95224 /*
95225 * %pK cannot be used in IRQ context because its test
95226@@ -1460,6 +1479,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95227 ((const struct file *)ptr)->f_path.dentry,
95228 spec, fmt);
95229 }
95230+
95231+#ifdef CONFIG_GRKERNSEC_HIDESYM
95232+ /* 'P' = approved pointers to copy to userland,
95233+ as in the /proc/kallsyms case, as we make it display nothing
95234+ for non-root users, and the real contents for root users
95235+ 'X' = approved simple symbols
95236+ Also ignore 'K' pointers, since we force their NULLing for non-root users
95237+ above
95238+ */
95239+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
95240+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
95241+ dump_stack();
95242+ ptr = NULL;
95243+ }
95244+#endif
95245+
95246 spec.flags |= SMALL;
95247 if (spec.field_width == -1) {
95248 spec.field_width = default_width;
95249@@ -2160,11 +2195,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95250 typeof(type) value; \
95251 if (sizeof(type) == 8) { \
95252 args = PTR_ALIGN(args, sizeof(u32)); \
95253- *(u32 *)&value = *(u32 *)args; \
95254- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95255+ *(u32 *)&value = *(const u32 *)args; \
95256+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95257 } else { \
95258 args = PTR_ALIGN(args, sizeof(type)); \
95259- value = *(typeof(type) *)args; \
95260+ value = *(const typeof(type) *)args; \
95261 } \
95262 args += sizeof(type); \
95263 value; \
95264@@ -2227,7 +2262,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95265 case FORMAT_TYPE_STR: {
95266 const char *str_arg = args;
95267 args += strlen(str_arg) + 1;
95268- str = string(str, end, (char *)str_arg, spec);
95269+ str = string(str, end, str_arg, spec);
95270 break;
95271 }
95272
95273diff --git a/localversion-grsec b/localversion-grsec
95274new file mode 100644
95275index 0000000..7cd6065
95276--- /dev/null
95277+++ b/localversion-grsec
95278@@ -0,0 +1 @@
95279+-grsec
95280diff --git a/mm/Kconfig b/mm/Kconfig
95281index 1d1ae6b..0f05885 100644
95282--- a/mm/Kconfig
95283+++ b/mm/Kconfig
95284@@ -341,10 +341,11 @@ config KSM
95285 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
95286
95287 config DEFAULT_MMAP_MIN_ADDR
95288- int "Low address space to protect from user allocation"
95289+ int "Low address space to protect from user allocation"
95290 depends on MMU
95291- default 4096
95292- help
95293+ default 32768 if ALPHA || ARM || PARISC || SPARC32
95294+ default 65536
95295+ help
95296 This is the portion of low virtual memory which should be protected
95297 from userspace allocation. Keeping a user from writing to low pages
95298 can help reduce the impact of kernel NULL pointer bugs.
95299@@ -375,7 +376,7 @@ config MEMORY_FAILURE
95300
95301 config HWPOISON_INJECT
95302 tristate "HWPoison pages injector"
95303- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
95304+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
95305 select PROC_PAGE_MONITOR
95306
95307 config NOMMU_INITIAL_TRIM_EXCESS
95308diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95309index 0ae0df5..82ac56b 100644
95310--- a/mm/backing-dev.c
95311+++ b/mm/backing-dev.c
95312@@ -12,7 +12,7 @@
95313 #include <linux/device.h>
95314 #include <trace/events/writeback.h>
95315
95316-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
95317+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
95318
95319 struct backing_dev_info default_backing_dev_info = {
95320 .name = "default",
95321@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
95322 return err;
95323
95324 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
95325- atomic_long_inc_return(&bdi_seq));
95326+ atomic_long_inc_return_unchecked(&bdi_seq));
95327 if (err) {
95328 bdi_destroy(bdi);
95329 return err;
95330diff --git a/mm/filemap.c b/mm/filemap.c
95331index 673e458..7192013 100644
95332--- a/mm/filemap.c
95333+++ b/mm/filemap.c
95334@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95335 struct address_space *mapping = file->f_mapping;
95336
95337 if (!mapping->a_ops->readpage)
95338- return -ENOEXEC;
95339+ return -ENODEV;
95340 file_accessed(file);
95341 vma->vm_ops = &generic_file_vm_ops;
95342 return 0;
95343@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95344 *pos = i_size_read(inode);
95345
95346 if (limit != RLIM_INFINITY) {
95347+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95348 if (*pos >= limit) {
95349 send_sig(SIGXFSZ, current, 0);
95350 return -EFBIG;
95351diff --git a/mm/fremap.c b/mm/fremap.c
95352index 2805d71..8b56e7d 100644
95353--- a/mm/fremap.c
95354+++ b/mm/fremap.c
95355@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95356 retry:
95357 vma = find_vma(mm, start);
95358
95359+#ifdef CONFIG_PAX_SEGMEXEC
95360+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95361+ goto out;
95362+#endif
95363+
95364 /*
95365 * Make sure the vma is shared, that it supports prefaulting,
95366 * and that the remapped range is valid and fully within
95367diff --git a/mm/gup.c b/mm/gup.c
95368index 9b2afbf..647297c 100644
95369--- a/mm/gup.c
95370+++ b/mm/gup.c
95371@@ -274,11 +274,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
95372 unsigned int fault_flags = 0;
95373 int ret;
95374
95375- /* For mlock, just skip the stack guard page. */
95376- if ((*flags & FOLL_MLOCK) &&
95377- (stack_guard_page_start(vma, address) ||
95378- stack_guard_page_end(vma, address + PAGE_SIZE)))
95379- return -ENOENT;
95380 if (*flags & FOLL_WRITE)
95381 fault_flags |= FAULT_FLAG_WRITE;
95382 if (nonblocking)
95383@@ -444,14 +439,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95384 if (!(gup_flags & FOLL_FORCE))
95385 gup_flags |= FOLL_NUMA;
95386
95387- do {
95388+ while (nr_pages) {
95389 struct page *page;
95390 unsigned int foll_flags = gup_flags;
95391 unsigned int page_increm;
95392
95393 /* first iteration or cross vma bound */
95394 if (!vma || start >= vma->vm_end) {
95395- vma = find_extend_vma(mm, start);
95396+ vma = find_vma(mm, start);
95397 if (!vma && in_gate_area(mm, start)) {
95398 int ret;
95399 ret = get_gate_page(mm, start & PAGE_MASK,
95400@@ -463,7 +458,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95401 goto next_page;
95402 }
95403
95404- if (!vma || check_vma_flags(vma, gup_flags))
95405+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
95406 return i ? : -EFAULT;
95407 if (is_vm_hugetlb_page(vma)) {
95408 i = follow_hugetlb_page(mm, vma, pages, vmas,
95409@@ -518,7 +513,7 @@ next_page:
95410 i += page_increm;
95411 start += page_increm * PAGE_SIZE;
95412 nr_pages -= page_increm;
95413- } while (nr_pages);
95414+ }
95415 return i;
95416 }
95417 EXPORT_SYMBOL(__get_user_pages);
95418diff --git a/mm/highmem.c b/mm/highmem.c
95419index 123bcd3..0de52ba 100644
95420--- a/mm/highmem.c
95421+++ b/mm/highmem.c
95422@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
95423 * So no dangers, even with speculative execution.
95424 */
95425 page = pte_page(pkmap_page_table[i]);
95426+ pax_open_kernel();
95427 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
95428-
95429+ pax_close_kernel();
95430 set_page_address(page, NULL);
95431 need_flush = 1;
95432 }
95433@@ -259,9 +260,11 @@ start:
95434 }
95435 }
95436 vaddr = PKMAP_ADDR(last_pkmap_nr);
95437+
95438+ pax_open_kernel();
95439 set_pte_at(&init_mm, vaddr,
95440 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95441-
95442+ pax_close_kernel();
95443 pkmap_count[last_pkmap_nr] = 1;
95444 set_page_address(page, (void *)vaddr);
95445
95446diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95447index 267e419..394bed9 100644
95448--- a/mm/hugetlb.c
95449+++ b/mm/hugetlb.c
95450@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95451 struct ctl_table *table, int write,
95452 void __user *buffer, size_t *length, loff_t *ppos)
95453 {
95454+ ctl_table_no_const t;
95455 struct hstate *h = &default_hstate;
95456 unsigned long tmp = h->max_huge_pages;
95457 int ret;
95458@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95459 if (!hugepages_supported())
95460 return -ENOTSUPP;
95461
95462- table->data = &tmp;
95463- table->maxlen = sizeof(unsigned long);
95464- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95465+ t = *table;
95466+ t.data = &tmp;
95467+ t.maxlen = sizeof(unsigned long);
95468+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
95469 if (ret)
95470 goto out;
95471
95472@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95473 struct hstate *h = &default_hstate;
95474 unsigned long tmp;
95475 int ret;
95476+ ctl_table_no_const hugetlb_table;
95477
95478 if (!hugepages_supported())
95479 return -ENOTSUPP;
95480@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95481 if (write && hstate_is_gigantic(h))
95482 return -EINVAL;
95483
95484- table->data = &tmp;
95485- table->maxlen = sizeof(unsigned long);
95486- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95487+ hugetlb_table = *table;
95488+ hugetlb_table.data = &tmp;
95489+ hugetlb_table.maxlen = sizeof(unsigned long);
95490+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
95491 if (ret)
95492 goto out;
95493
95494@@ -2798,6 +2802,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95495 i_mmap_unlock_write(mapping);
95496 }
95497
95498+#ifdef CONFIG_PAX_SEGMEXEC
95499+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95500+{
95501+ struct mm_struct *mm = vma->vm_mm;
95502+ struct vm_area_struct *vma_m;
95503+ unsigned long address_m;
95504+ pte_t *ptep_m;
95505+
95506+ vma_m = pax_find_mirror_vma(vma);
95507+ if (!vma_m)
95508+ return;
95509+
95510+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95511+ address_m = address + SEGMEXEC_TASK_SIZE;
95512+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95513+ get_page(page_m);
95514+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
95515+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95516+}
95517+#endif
95518+
95519 /*
95520 * Hugetlb_cow() should be called with page lock of the original hugepage held.
95521 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
95522@@ -2910,6 +2935,11 @@ retry_avoidcopy:
95523 make_huge_pte(vma, new_page, 1));
95524 page_remove_rmap(old_page);
95525 hugepage_add_new_anon_rmap(new_page, vma, address);
95526+
95527+#ifdef CONFIG_PAX_SEGMEXEC
95528+ pax_mirror_huge_pte(vma, address, new_page);
95529+#endif
95530+
95531 /* Make the old page be freed below */
95532 new_page = old_page;
95533 }
95534@@ -3070,6 +3100,10 @@ retry:
95535 && (vma->vm_flags & VM_SHARED)));
95536 set_huge_pte_at(mm, address, ptep, new_pte);
95537
95538+#ifdef CONFIG_PAX_SEGMEXEC
95539+ pax_mirror_huge_pte(vma, address, page);
95540+#endif
95541+
95542 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95543 /* Optimization, do the COW without a second fault */
95544 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
95545@@ -3137,6 +3171,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95546 struct address_space *mapping;
95547 int need_wait_lock = 0;
95548
95549+#ifdef CONFIG_PAX_SEGMEXEC
95550+ struct vm_area_struct *vma_m;
95551+#endif
95552+
95553 address &= huge_page_mask(h);
95554
95555 ptep = huge_pte_offset(mm, address);
95556@@ -3150,6 +3188,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95557 VM_FAULT_SET_HINDEX(hstate_index(h));
95558 }
95559
95560+#ifdef CONFIG_PAX_SEGMEXEC
95561+ vma_m = pax_find_mirror_vma(vma);
95562+ if (vma_m) {
95563+ unsigned long address_m;
95564+
95565+ if (vma->vm_start > vma_m->vm_start) {
95566+ address_m = address;
95567+ address -= SEGMEXEC_TASK_SIZE;
95568+ vma = vma_m;
95569+ h = hstate_vma(vma);
95570+ } else
95571+ address_m = address + SEGMEXEC_TASK_SIZE;
95572+
95573+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95574+ return VM_FAULT_OOM;
95575+ address_m &= HPAGE_MASK;
95576+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95577+ }
95578+#endif
95579+
95580 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95581 if (!ptep)
95582 return VM_FAULT_OOM;
95583diff --git a/mm/internal.h b/mm/internal.h
95584index efad241..57ae4ca 100644
95585--- a/mm/internal.h
95586+++ b/mm/internal.h
95587@@ -134,6 +134,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
95588
95589 extern int __isolate_free_page(struct page *page, unsigned int order);
95590 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95591+extern void free_compound_page(struct page *page);
95592 extern void prep_compound_page(struct page *page, unsigned long order);
95593 #ifdef CONFIG_MEMORY_FAILURE
95594 extern bool is_free_buddy_page(struct page *page);
95595@@ -387,7 +388,7 @@ extern u32 hwpoison_filter_enable;
95596
95597 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
95598 unsigned long, unsigned long,
95599- unsigned long, unsigned long);
95600+ unsigned long, unsigned long) __intentional_overflow(-1);
95601
95602 extern void set_pageblock_order(void);
95603 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
95604diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95605index 3cda50c..032ba634 100644
95606--- a/mm/kmemleak.c
95607+++ b/mm/kmemleak.c
95608@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
95609
95610 for (i = 0; i < object->trace_len; i++) {
95611 void *ptr = (void *)object->trace[i];
95612- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95613+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
95614 }
95615 }
95616
95617@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
95618 return -ENOMEM;
95619 }
95620
95621- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
95622+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
95623 &kmemleak_fops);
95624 if (!dentry)
95625 pr_warning("Failed to create the debugfs kmemleak file\n");
95626diff --git a/mm/maccess.c b/mm/maccess.c
95627index d53adf9..03a24bf 100644
95628--- a/mm/maccess.c
95629+++ b/mm/maccess.c
95630@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
95631 set_fs(KERNEL_DS);
95632 pagefault_disable();
95633 ret = __copy_from_user_inatomic(dst,
95634- (__force const void __user *)src, size);
95635+ (const void __force_user *)src, size);
95636 pagefault_enable();
95637 set_fs(old_fs);
95638
95639@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
95640
95641 set_fs(KERNEL_DS);
95642 pagefault_disable();
95643- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95644+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95645 pagefault_enable();
95646 set_fs(old_fs);
95647
95648diff --git a/mm/madvise.c b/mm/madvise.c
95649index a271adc..831d82f 100644
95650--- a/mm/madvise.c
95651+++ b/mm/madvise.c
95652@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
95653 pgoff_t pgoff;
95654 unsigned long new_flags = vma->vm_flags;
95655
95656+#ifdef CONFIG_PAX_SEGMEXEC
95657+ struct vm_area_struct *vma_m;
95658+#endif
95659+
95660 switch (behavior) {
95661 case MADV_NORMAL:
95662 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95663@@ -126,6 +130,13 @@ success:
95664 /*
95665 * vm_flags is protected by the mmap_sem held in write mode.
95666 */
95667+
95668+#ifdef CONFIG_PAX_SEGMEXEC
95669+ vma_m = pax_find_mirror_vma(vma);
95670+ if (vma_m)
95671+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95672+#endif
95673+
95674 vma->vm_flags = new_flags;
95675
95676 out:
95677@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95678 struct vm_area_struct **prev,
95679 unsigned long start, unsigned long end)
95680 {
95681+
95682+#ifdef CONFIG_PAX_SEGMEXEC
95683+ struct vm_area_struct *vma_m;
95684+#endif
95685+
95686 *prev = vma;
95687 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95688 return -EINVAL;
95689@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95690 zap_page_range(vma, start, end - start, &details);
95691 } else
95692 zap_page_range(vma, start, end - start, NULL);
95693+
95694+#ifdef CONFIG_PAX_SEGMEXEC
95695+ vma_m = pax_find_mirror_vma(vma);
95696+ if (vma_m) {
95697+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95698+ struct zap_details details = {
95699+ .nonlinear_vma = vma_m,
95700+ .last_index = ULONG_MAX,
95701+ };
95702+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95703+ } else
95704+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95705+ }
95706+#endif
95707+
95708 return 0;
95709 }
95710
95711@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95712 if (end < start)
95713 return error;
95714
95715+#ifdef CONFIG_PAX_SEGMEXEC
95716+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95717+ if (end > SEGMEXEC_TASK_SIZE)
95718+ return error;
95719+ } else
95720+#endif
95721+
95722+ if (end > TASK_SIZE)
95723+ return error;
95724+
95725 error = 0;
95726 if (end == start)
95727 return error;
95728diff --git a/mm/memory-failure.c b/mm/memory-failure.c
95729index 20c29dd..22bd8e2 100644
95730--- a/mm/memory-failure.c
95731+++ b/mm/memory-failure.c
95732@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
95733
95734 int sysctl_memory_failure_recovery __read_mostly = 1;
95735
95736-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95737+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95738
95739 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
95740
95741@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
95742 pfn, t->comm, t->pid);
95743 si.si_signo = SIGBUS;
95744 si.si_errno = 0;
95745- si.si_addr = (void *)addr;
95746+ si.si_addr = (void __user *)addr;
95747 #ifdef __ARCH_SI_TRAPNO
95748 si.si_trapno = trapno;
95749 #endif
95750@@ -786,7 +786,7 @@ static struct page_state {
95751 unsigned long res;
95752 char *msg;
95753 int (*action)(struct page *p, unsigned long pfn);
95754-} error_states[] = {
95755+} __do_const error_states[] = {
95756 { reserved, reserved, "reserved kernel", me_kernel },
95757 /*
95758 * free pages are specially detected outside this table:
95759@@ -1094,7 +1094,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95760 nr_pages = 1 << compound_order(hpage);
95761 else /* normal page or thp */
95762 nr_pages = 1;
95763- atomic_long_add(nr_pages, &num_poisoned_pages);
95764+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
95765
95766 /*
95767 * We need/can do nothing about count=0 pages.
95768@@ -1123,7 +1123,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95769 if (PageHWPoison(hpage)) {
95770 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
95771 || (p != hpage && TestSetPageHWPoison(hpage))) {
95772- atomic_long_sub(nr_pages, &num_poisoned_pages);
95773+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95774 unlock_page(hpage);
95775 return 0;
95776 }
95777@@ -1191,14 +1191,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95778 */
95779 if (!PageHWPoison(p)) {
95780 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
95781- atomic_long_sub(nr_pages, &num_poisoned_pages);
95782+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95783 put_page(hpage);
95784 res = 0;
95785 goto out;
95786 }
95787 if (hwpoison_filter(p)) {
95788 if (TestClearPageHWPoison(p))
95789- atomic_long_sub(nr_pages, &num_poisoned_pages);
95790+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95791 unlock_page(hpage);
95792 put_page(hpage);
95793 return 0;
95794@@ -1428,7 +1428,7 @@ int unpoison_memory(unsigned long pfn)
95795 return 0;
95796 }
95797 if (TestClearPageHWPoison(p))
95798- atomic_long_dec(&num_poisoned_pages);
95799+ atomic_long_dec_unchecked(&num_poisoned_pages);
95800 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
95801 return 0;
95802 }
95803@@ -1442,7 +1442,7 @@ int unpoison_memory(unsigned long pfn)
95804 */
95805 if (TestClearPageHWPoison(page)) {
95806 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
95807- atomic_long_sub(nr_pages, &num_poisoned_pages);
95808+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95809 freeit = 1;
95810 if (PageHuge(page))
95811 clear_page_hwpoison_huge_page(page);
95812@@ -1567,11 +1567,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
95813 if (PageHuge(page)) {
95814 set_page_hwpoison_huge_page(hpage);
95815 dequeue_hwpoisoned_huge_page(hpage);
95816- atomic_long_add(1 << compound_order(hpage),
95817+ atomic_long_add_unchecked(1 << compound_order(hpage),
95818 &num_poisoned_pages);
95819 } else {
95820 SetPageHWPoison(page);
95821- atomic_long_inc(&num_poisoned_pages);
95822+ atomic_long_inc_unchecked(&num_poisoned_pages);
95823 }
95824 }
95825 return ret;
95826@@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct page *page, int flags)
95827 put_page(page);
95828 pr_info("soft_offline: %#lx: invalidated\n", pfn);
95829 SetPageHWPoison(page);
95830- atomic_long_inc(&num_poisoned_pages);
95831+ atomic_long_inc_unchecked(&num_poisoned_pages);
95832 return 0;
95833 }
95834
95835@@ -1659,7 +1659,7 @@ static int __soft_offline_page(struct page *page, int flags)
95836 if (!is_free_buddy_page(page))
95837 pr_info("soft offline: %#lx: page leaked\n",
95838 pfn);
95839- atomic_long_inc(&num_poisoned_pages);
95840+ atomic_long_inc_unchecked(&num_poisoned_pages);
95841 }
95842 } else {
95843 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
95844@@ -1729,11 +1729,11 @@ int soft_offline_page(struct page *page, int flags)
95845 if (PageHuge(page)) {
95846 set_page_hwpoison_huge_page(hpage);
95847 dequeue_hwpoisoned_huge_page(hpage);
95848- atomic_long_add(1 << compound_order(hpage),
95849+ atomic_long_add_unchecked(1 << compound_order(hpage),
95850 &num_poisoned_pages);
95851 } else {
95852 SetPageHWPoison(page);
95853- atomic_long_inc(&num_poisoned_pages);
95854+ atomic_long_inc_unchecked(&num_poisoned_pages);
95855 }
95856 }
95857 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
95858diff --git a/mm/memory.c b/mm/memory.c
95859index 6aa7822..3c76005 100644
95860--- a/mm/memory.c
95861+++ b/mm/memory.c
95862@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95863 free_pte_range(tlb, pmd, addr);
95864 } while (pmd++, addr = next, addr != end);
95865
95866+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
95867 start &= PUD_MASK;
95868 if (start < floor)
95869 return;
95870@@ -428,6 +429,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95871 pmd = pmd_offset(pud, start);
95872 pud_clear(pud);
95873 pmd_free_tlb(tlb, pmd, start);
95874+#endif
95875+
95876 }
95877
95878 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95879@@ -447,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95880 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
95881 } while (pud++, addr = next, addr != end);
95882
95883+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
95884 start &= PGDIR_MASK;
95885 if (start < floor)
95886 return;
95887@@ -461,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95888 pud = pud_offset(pgd, start);
95889 pgd_clear(pgd);
95890 pud_free_tlb(tlb, pud, start);
95891+#endif
95892+
95893 }
95894
95895 /*
95896@@ -690,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
95897 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
95898 */
95899 if (vma->vm_ops)
95900- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
95901+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
95902 vma->vm_ops->fault);
95903 if (vma->vm_file)
95904- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
95905+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
95906 vma->vm_file->f_op->mmap);
95907 dump_stack();
95908 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
95909@@ -1488,6 +1494,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
95910 page_add_file_rmap(page);
95911 set_pte_at(mm, addr, pte, mk_pte(page, prot));
95912
95913+#ifdef CONFIG_PAX_SEGMEXEC
95914+ pax_mirror_file_pte(vma, addr, page, ptl);
95915+#endif
95916+
95917 retval = 0;
95918 pte_unmap_unlock(pte, ptl);
95919 return retval;
95920@@ -1532,9 +1542,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
95921 if (!page_count(page))
95922 return -EINVAL;
95923 if (!(vma->vm_flags & VM_MIXEDMAP)) {
95924+
95925+#ifdef CONFIG_PAX_SEGMEXEC
95926+ struct vm_area_struct *vma_m;
95927+#endif
95928+
95929 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
95930 BUG_ON(vma->vm_flags & VM_PFNMAP);
95931 vma->vm_flags |= VM_MIXEDMAP;
95932+
95933+#ifdef CONFIG_PAX_SEGMEXEC
95934+ vma_m = pax_find_mirror_vma(vma);
95935+ if (vma_m)
95936+ vma_m->vm_flags |= VM_MIXEDMAP;
95937+#endif
95938+
95939 }
95940 return insert_page(vma, addr, page, vma->vm_page_prot);
95941 }
95942@@ -1617,6 +1639,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
95943 unsigned long pfn)
95944 {
95945 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
95946+ BUG_ON(vma->vm_mirror);
95947
95948 if (addr < vma->vm_start || addr >= vma->vm_end)
95949 return -EFAULT;
95950@@ -1864,7 +1887,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
95951
95952 BUG_ON(pud_huge(*pud));
95953
95954- pmd = pmd_alloc(mm, pud, addr);
95955+ pmd = (mm == &init_mm) ?
95956+ pmd_alloc_kernel(mm, pud, addr) :
95957+ pmd_alloc(mm, pud, addr);
95958 if (!pmd)
95959 return -ENOMEM;
95960 do {
95961@@ -1884,7 +1909,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
95962 unsigned long next;
95963 int err;
95964
95965- pud = pud_alloc(mm, pgd, addr);
95966+ pud = (mm == &init_mm) ?
95967+ pud_alloc_kernel(mm, pgd, addr) :
95968+ pud_alloc(mm, pgd, addr);
95969 if (!pud)
95970 return -ENOMEM;
95971 do {
95972@@ -2006,6 +2033,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
95973 return ret;
95974 }
95975
95976+#ifdef CONFIG_PAX_SEGMEXEC
95977+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
95978+{
95979+ struct mm_struct *mm = vma->vm_mm;
95980+ spinlock_t *ptl;
95981+ pte_t *pte, entry;
95982+
95983+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
95984+ entry = *pte;
95985+ if (!pte_present(entry)) {
95986+ if (!pte_none(entry)) {
95987+ BUG_ON(pte_file(entry));
95988+ free_swap_and_cache(pte_to_swp_entry(entry));
95989+ pte_clear_not_present_full(mm, address, pte, 0);
95990+ }
95991+ } else {
95992+ struct page *page;
95993+
95994+ flush_cache_page(vma, address, pte_pfn(entry));
95995+ entry = ptep_clear_flush(vma, address, pte);
95996+ BUG_ON(pte_dirty(entry));
95997+ page = vm_normal_page(vma, address, entry);
95998+ if (page) {
95999+ update_hiwater_rss(mm);
96000+ if (PageAnon(page))
96001+ dec_mm_counter_fast(mm, MM_ANONPAGES);
96002+ else
96003+ dec_mm_counter_fast(mm, MM_FILEPAGES);
96004+ page_remove_rmap(page);
96005+ page_cache_release(page);
96006+ }
96007+ }
96008+ pte_unmap_unlock(pte, ptl);
96009+}
96010+
96011+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96012+ *
96013+ * the ptl of the lower mapped page is held on entry and is not released on exit
96014+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96015+ */
96016+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96017+{
96018+ struct mm_struct *mm = vma->vm_mm;
96019+ unsigned long address_m;
96020+ spinlock_t *ptl_m;
96021+ struct vm_area_struct *vma_m;
96022+ pmd_t *pmd_m;
96023+ pte_t *pte_m, entry_m;
96024+
96025+ BUG_ON(!page_m || !PageAnon(page_m));
96026+
96027+ vma_m = pax_find_mirror_vma(vma);
96028+ if (!vma_m)
96029+ return;
96030+
96031+ BUG_ON(!PageLocked(page_m));
96032+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96033+ address_m = address + SEGMEXEC_TASK_SIZE;
96034+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96035+ pte_m = pte_offset_map(pmd_m, address_m);
96036+ ptl_m = pte_lockptr(mm, pmd_m);
96037+ if (ptl != ptl_m) {
96038+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96039+ if (!pte_none(*pte_m))
96040+ goto out;
96041+ }
96042+
96043+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96044+ page_cache_get(page_m);
96045+ page_add_anon_rmap(page_m, vma_m, address_m);
96046+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96047+ set_pte_at(mm, address_m, pte_m, entry_m);
96048+ update_mmu_cache(vma_m, address_m, pte_m);
96049+out:
96050+ if (ptl != ptl_m)
96051+ spin_unlock(ptl_m);
96052+ pte_unmap(pte_m);
96053+ unlock_page(page_m);
96054+}
96055+
96056+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96057+{
96058+ struct mm_struct *mm = vma->vm_mm;
96059+ unsigned long address_m;
96060+ spinlock_t *ptl_m;
96061+ struct vm_area_struct *vma_m;
96062+ pmd_t *pmd_m;
96063+ pte_t *pte_m, entry_m;
96064+
96065+ BUG_ON(!page_m || PageAnon(page_m));
96066+
96067+ vma_m = pax_find_mirror_vma(vma);
96068+ if (!vma_m)
96069+ return;
96070+
96071+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96072+ address_m = address + SEGMEXEC_TASK_SIZE;
96073+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96074+ pte_m = pte_offset_map(pmd_m, address_m);
96075+ ptl_m = pte_lockptr(mm, pmd_m);
96076+ if (ptl != ptl_m) {
96077+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96078+ if (!pte_none(*pte_m))
96079+ goto out;
96080+ }
96081+
96082+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96083+ page_cache_get(page_m);
96084+ page_add_file_rmap(page_m);
96085+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96086+ set_pte_at(mm, address_m, pte_m, entry_m);
96087+ update_mmu_cache(vma_m, address_m, pte_m);
96088+out:
96089+ if (ptl != ptl_m)
96090+ spin_unlock(ptl_m);
96091+ pte_unmap(pte_m);
96092+}
96093+
96094+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96095+{
96096+ struct mm_struct *mm = vma->vm_mm;
96097+ unsigned long address_m;
96098+ spinlock_t *ptl_m;
96099+ struct vm_area_struct *vma_m;
96100+ pmd_t *pmd_m;
96101+ pte_t *pte_m, entry_m;
96102+
96103+ vma_m = pax_find_mirror_vma(vma);
96104+ if (!vma_m)
96105+ return;
96106+
96107+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96108+ address_m = address + SEGMEXEC_TASK_SIZE;
96109+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96110+ pte_m = pte_offset_map(pmd_m, address_m);
96111+ ptl_m = pte_lockptr(mm, pmd_m);
96112+ if (ptl != ptl_m) {
96113+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96114+ if (!pte_none(*pte_m))
96115+ goto out;
96116+ }
96117+
96118+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96119+ set_pte_at(mm, address_m, pte_m, entry_m);
96120+out:
96121+ if (ptl != ptl_m)
96122+ spin_unlock(ptl_m);
96123+ pte_unmap(pte_m);
96124+}
96125+
96126+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96127+{
96128+ struct page *page_m;
96129+ pte_t entry;
96130+
96131+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96132+ goto out;
96133+
96134+ entry = *pte;
96135+ page_m = vm_normal_page(vma, address, entry);
96136+ if (!page_m)
96137+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96138+ else if (PageAnon(page_m)) {
96139+ if (pax_find_mirror_vma(vma)) {
96140+ pte_unmap_unlock(pte, ptl);
96141+ lock_page(page_m);
96142+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96143+ if (pte_same(entry, *pte))
96144+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96145+ else
96146+ unlock_page(page_m);
96147+ }
96148+ } else
96149+ pax_mirror_file_pte(vma, address, page_m, ptl);
96150+
96151+out:
96152+ pte_unmap_unlock(pte, ptl);
96153+}
96154+#endif
96155+
96156 /*
96157 * This routine handles present pages, when users try to write
96158 * to a shared page. It is done by copying the page to a new address
96159@@ -2212,6 +2419,12 @@ gotten:
96160 */
96161 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96162 if (likely(pte_same(*page_table, orig_pte))) {
96163+
96164+#ifdef CONFIG_PAX_SEGMEXEC
96165+ if (pax_find_mirror_vma(vma))
96166+ BUG_ON(!trylock_page(new_page));
96167+#endif
96168+
96169 if (old_page) {
96170 if (!PageAnon(old_page)) {
96171 dec_mm_counter_fast(mm, MM_FILEPAGES);
96172@@ -2265,6 +2478,10 @@ gotten:
96173 page_remove_rmap(old_page);
96174 }
96175
96176+#ifdef CONFIG_PAX_SEGMEXEC
96177+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96178+#endif
96179+
96180 /* Free the old page.. */
96181 new_page = old_page;
96182 ret |= VM_FAULT_WRITE;
96183@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96184 swap_free(entry);
96185 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96186 try_to_free_swap(page);
96187+
96188+#ifdef CONFIG_PAX_SEGMEXEC
96189+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96190+#endif
96191+
96192 unlock_page(page);
96193 if (page != swapcache) {
96194 /*
96195@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96196
96197 /* No need to invalidate - it was non-present before */
96198 update_mmu_cache(vma, address, page_table);
96199+
96200+#ifdef CONFIG_PAX_SEGMEXEC
96201+ pax_mirror_anon_pte(vma, address, page, ptl);
96202+#endif
96203+
96204 unlock:
96205 pte_unmap_unlock(page_table, ptl);
96206 out:
96207@@ -2581,40 +2808,6 @@ out_release:
96208 }
96209
96210 /*
96211- * This is like a special single-page "expand_{down|up}wards()",
96212- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96213- * doesn't hit another vma.
96214- */
96215-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96216-{
96217- address &= PAGE_MASK;
96218- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96219- struct vm_area_struct *prev = vma->vm_prev;
96220-
96221- /*
96222- * Is there a mapping abutting this one below?
96223- *
96224- * That's only ok if it's the same stack mapping
96225- * that has gotten split..
96226- */
96227- if (prev && prev->vm_end == address)
96228- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96229-
96230- return expand_downwards(vma, address - PAGE_SIZE);
96231- }
96232- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96233- struct vm_area_struct *next = vma->vm_next;
96234-
96235- /* As VM_GROWSDOWN but s/below/above/ */
96236- if (next && next->vm_start == address + PAGE_SIZE)
96237- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96238-
96239- return expand_upwards(vma, address + PAGE_SIZE);
96240- }
96241- return 0;
96242-}
96243-
96244-/*
96245 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96246 * but allow concurrent faults), and pte mapped but not yet locked.
96247 * We return with mmap_sem still held, but pte unmapped and unlocked.
96248@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96249 unsigned int flags)
96250 {
96251 struct mem_cgroup *memcg;
96252- struct page *page;
96253+ struct page *page = NULL;
96254 spinlock_t *ptl;
96255 pte_t entry;
96256
96257- pte_unmap(page_table);
96258-
96259- /* Check if we need to add a guard page to the stack */
96260- if (check_stack_guard_page(vma, address) < 0)
96261- return VM_FAULT_SIGSEGV;
96262-
96263- /* Use the zero-page for reads */
96264 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
96265 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96266 vma->vm_page_prot));
96267- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96268+ ptl = pte_lockptr(mm, pmd);
96269+ spin_lock(ptl);
96270 if (!pte_none(*page_table))
96271 goto unlock;
96272 goto setpte;
96273 }
96274
96275 /* Allocate our own private page. */
96276+ pte_unmap(page_table);
96277+
96278 if (unlikely(anon_vma_prepare(vma)))
96279 goto oom;
96280 page = alloc_zeroed_user_highpage_movable(vma, address);
96281@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96282 if (!pte_none(*page_table))
96283 goto release;
96284
96285+#ifdef CONFIG_PAX_SEGMEXEC
96286+ if (pax_find_mirror_vma(vma))
96287+ BUG_ON(!trylock_page(page));
96288+#endif
96289+
96290 inc_mm_counter_fast(mm, MM_ANONPAGES);
96291 page_add_new_anon_rmap(page, vma, address);
96292 mem_cgroup_commit_charge(page, memcg, false);
96293@@ -2677,6 +2871,12 @@ setpte:
96294
96295 /* No need to invalidate - it was non-present before */
96296 update_mmu_cache(vma, address, page_table);
96297+
96298+#ifdef CONFIG_PAX_SEGMEXEC
96299+ if (page)
96300+ pax_mirror_anon_pte(vma, address, page, ptl);
96301+#endif
96302+
96303 unlock:
96304 pte_unmap_unlock(page_table, ptl);
96305 return 0;
96306@@ -2907,6 +3107,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96307 return ret;
96308 }
96309 do_set_pte(vma, address, fault_page, pte, false, false);
96310+
96311+#ifdef CONFIG_PAX_SEGMEXEC
96312+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96313+#endif
96314+
96315 unlock_page(fault_page);
96316 unlock_out:
96317 pte_unmap_unlock(pte, ptl);
96318@@ -2949,7 +3154,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96319 page_cache_release(fault_page);
96320 goto uncharge_out;
96321 }
96322+
96323+#ifdef CONFIG_PAX_SEGMEXEC
96324+ if (pax_find_mirror_vma(vma))
96325+ BUG_ON(!trylock_page(new_page));
96326+#endif
96327+
96328 do_set_pte(vma, address, new_page, pte, true, true);
96329+
96330+#ifdef CONFIG_PAX_SEGMEXEC
96331+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96332+#endif
96333+
96334 mem_cgroup_commit_charge(new_page, memcg, false);
96335 lru_cache_add_active_or_unevictable(new_page, vma);
96336 pte_unmap_unlock(pte, ptl);
96337@@ -2999,6 +3215,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96338 return ret;
96339 }
96340 do_set_pte(vma, address, fault_page, pte, true, false);
96341+
96342+#ifdef CONFIG_PAX_SEGMEXEC
96343+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96344+#endif
96345+
96346 pte_unmap_unlock(pte, ptl);
96347
96348 if (set_page_dirty(fault_page))
96349@@ -3255,6 +3476,12 @@ static int handle_pte_fault(struct mm_struct *mm,
96350 if (flags & FAULT_FLAG_WRITE)
96351 flush_tlb_fix_spurious_fault(vma, address);
96352 }
96353+
96354+#ifdef CONFIG_PAX_SEGMEXEC
96355+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96356+ return 0;
96357+#endif
96358+
96359 unlock:
96360 pte_unmap_unlock(pte, ptl);
96361 return 0;
96362@@ -3274,9 +3501,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96363 pmd_t *pmd;
96364 pte_t *pte;
96365
96366+#ifdef CONFIG_PAX_SEGMEXEC
96367+ struct vm_area_struct *vma_m;
96368+#endif
96369+
96370 if (unlikely(is_vm_hugetlb_page(vma)))
96371 return hugetlb_fault(mm, vma, address, flags);
96372
96373+#ifdef CONFIG_PAX_SEGMEXEC
96374+ vma_m = pax_find_mirror_vma(vma);
96375+ if (vma_m) {
96376+ unsigned long address_m;
96377+ pgd_t *pgd_m;
96378+ pud_t *pud_m;
96379+ pmd_t *pmd_m;
96380+
96381+ if (vma->vm_start > vma_m->vm_start) {
96382+ address_m = address;
96383+ address -= SEGMEXEC_TASK_SIZE;
96384+ vma = vma_m;
96385+ } else
96386+ address_m = address + SEGMEXEC_TASK_SIZE;
96387+
96388+ pgd_m = pgd_offset(mm, address_m);
96389+ pud_m = pud_alloc(mm, pgd_m, address_m);
96390+ if (!pud_m)
96391+ return VM_FAULT_OOM;
96392+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96393+ if (!pmd_m)
96394+ return VM_FAULT_OOM;
96395+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
96396+ return VM_FAULT_OOM;
96397+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96398+ }
96399+#endif
96400+
96401 pgd = pgd_offset(mm, address);
96402 pud = pud_alloc(mm, pgd, address);
96403 if (!pud)
96404@@ -3411,6 +3670,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96405 spin_unlock(&mm->page_table_lock);
96406 return 0;
96407 }
96408+
96409+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96410+{
96411+ pud_t *new = pud_alloc_one(mm, address);
96412+ if (!new)
96413+ return -ENOMEM;
96414+
96415+ smp_wmb(); /* See comment in __pte_alloc */
96416+
96417+ spin_lock(&mm->page_table_lock);
96418+ if (pgd_present(*pgd)) /* Another has populated it */
96419+ pud_free(mm, new);
96420+ else
96421+ pgd_populate_kernel(mm, pgd, new);
96422+ spin_unlock(&mm->page_table_lock);
96423+ return 0;
96424+}
96425 #endif /* __PAGETABLE_PUD_FOLDED */
96426
96427 #ifndef __PAGETABLE_PMD_FOLDED
96428@@ -3441,6 +3717,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
96429 spin_unlock(&mm->page_table_lock);
96430 return 0;
96431 }
96432+
96433+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
96434+{
96435+ pmd_t *new = pmd_alloc_one(mm, address);
96436+ if (!new)
96437+ return -ENOMEM;
96438+
96439+ smp_wmb(); /* See comment in __pte_alloc */
96440+
96441+ spin_lock(&mm->page_table_lock);
96442+#ifndef __ARCH_HAS_4LEVEL_HACK
96443+ if (pud_present(*pud)) /* Another has populated it */
96444+ pmd_free(mm, new);
96445+ else
96446+ pud_populate_kernel(mm, pud, new);
96447+#else
96448+ if (pgd_present(*pud)) /* Another has populated it */
96449+ pmd_free(mm, new);
96450+ else
96451+ pgd_populate_kernel(mm, pud, new);
96452+#endif /* __ARCH_HAS_4LEVEL_HACK */
96453+ spin_unlock(&mm->page_table_lock);
96454+ return 0;
96455+}
96456 #endif /* __PAGETABLE_PMD_FOLDED */
96457
96458 static int __follow_pte(struct mm_struct *mm, unsigned long address,
96459@@ -3550,8 +3850,8 @@ out:
96460 return ret;
96461 }
96462
96463-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96464- void *buf, int len, int write)
96465+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96466+ void *buf, size_t len, int write)
96467 {
96468 resource_size_t phys_addr;
96469 unsigned long prot = 0;
96470@@ -3577,8 +3877,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
96471 * Access another process' address space as given in mm. If non-NULL, use the
96472 * given task for page fault accounting.
96473 */
96474-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96475- unsigned long addr, void *buf, int len, int write)
96476+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96477+ unsigned long addr, void *buf, size_t len, int write)
96478 {
96479 struct vm_area_struct *vma;
96480 void *old_buf = buf;
96481@@ -3586,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96482 down_read(&mm->mmap_sem);
96483 /* ignore errors, just check how much was successfully transferred */
96484 while (len) {
96485- int bytes, ret, offset;
96486+ ssize_t bytes, ret, offset;
96487 void *maddr;
96488 struct page *page = NULL;
96489
96490@@ -3647,8 +3947,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96491 *
96492 * The caller must hold a reference on @mm.
96493 */
96494-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96495- void *buf, int len, int write)
96496+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
96497+ void *buf, size_t len, int write)
96498 {
96499 return __access_remote_vm(NULL, mm, addr, buf, len, write);
96500 }
96501@@ -3658,11 +3958,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96502 * Source/target buffer must be kernel space,
96503 * Do not walk the page table directly, use get_user_pages
96504 */
96505-int access_process_vm(struct task_struct *tsk, unsigned long addr,
96506- void *buf, int len, int write)
96507+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
96508+ void *buf, size_t len, int write)
96509 {
96510 struct mm_struct *mm;
96511- int ret;
96512+ ssize_t ret;
96513
96514 mm = get_task_mm(tsk);
96515 if (!mm)
96516diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96517index 0e0961b..c9143b9 100644
96518--- a/mm/mempolicy.c
96519+++ b/mm/mempolicy.c
96520@@ -744,6 +744,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96521 unsigned long vmstart;
96522 unsigned long vmend;
96523
96524+#ifdef CONFIG_PAX_SEGMEXEC
96525+ struct vm_area_struct *vma_m;
96526+#endif
96527+
96528 vma = find_vma(mm, start);
96529 if (!vma || vma->vm_start > start)
96530 return -EFAULT;
96531@@ -787,6 +791,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96532 err = vma_replace_policy(vma, new_pol);
96533 if (err)
96534 goto out;
96535+
96536+#ifdef CONFIG_PAX_SEGMEXEC
96537+ vma_m = pax_find_mirror_vma(vma);
96538+ if (vma_m) {
96539+ err = vma_replace_policy(vma_m, new_pol);
96540+ if (err)
96541+ goto out;
96542+ }
96543+#endif
96544+
96545 }
96546
96547 out:
96548@@ -1201,6 +1215,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96549
96550 if (end < start)
96551 return -EINVAL;
96552+
96553+#ifdef CONFIG_PAX_SEGMEXEC
96554+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96555+ if (end > SEGMEXEC_TASK_SIZE)
96556+ return -EINVAL;
96557+ } else
96558+#endif
96559+
96560+ if (end > TASK_SIZE)
96561+ return -EINVAL;
96562+
96563 if (end == start)
96564 return 0;
96565
96566@@ -1426,8 +1451,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96567 */
96568 tcred = __task_cred(task);
96569 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96570- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96571- !capable(CAP_SYS_NICE)) {
96572+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96573 rcu_read_unlock();
96574 err = -EPERM;
96575 goto out_put;
96576@@ -1458,6 +1482,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96577 goto out;
96578 }
96579
96580+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96581+ if (mm != current->mm &&
96582+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96583+ mmput(mm);
96584+ err = -EPERM;
96585+ goto out;
96586+ }
96587+#endif
96588+
96589 err = do_migrate_pages(mm, old, new,
96590 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
96591
96592diff --git a/mm/migrate.c b/mm/migrate.c
96593index 344cdf6..07399500 100644
96594--- a/mm/migrate.c
96595+++ b/mm/migrate.c
96596@@ -1503,8 +1503,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96597 */
96598 tcred = __task_cred(task);
96599 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96600- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96601- !capable(CAP_SYS_NICE)) {
96602+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96603 rcu_read_unlock();
96604 err = -EPERM;
96605 goto out;
96606diff --git a/mm/mlock.c b/mm/mlock.c
96607index 73cf098..ab547c7 100644
96608--- a/mm/mlock.c
96609+++ b/mm/mlock.c
96610@@ -14,6 +14,7 @@
96611 #include <linux/pagevec.h>
96612 #include <linux/mempolicy.h>
96613 #include <linux/syscalls.h>
96614+#include <linux/security.h>
96615 #include <linux/sched.h>
96616 #include <linux/export.h>
96617 #include <linux/rmap.h>
96618@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96619 {
96620 unsigned long nstart, end, tmp;
96621 struct vm_area_struct * vma, * prev;
96622- int error;
96623+ int error = 0;
96624
96625 VM_BUG_ON(start & ~PAGE_MASK);
96626 VM_BUG_ON(len != PAGE_ALIGN(len));
96627@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96628 return -EINVAL;
96629 if (end == start)
96630 return 0;
96631+ if (end > TASK_SIZE)
96632+ return -EINVAL;
96633+
96634 vma = find_vma(current->mm, start);
96635 if (!vma || vma->vm_start > start)
96636 return -ENOMEM;
96637@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96638 for (nstart = start ; ; ) {
96639 vm_flags_t newflags;
96640
96641+#ifdef CONFIG_PAX_SEGMEXEC
96642+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96643+ break;
96644+#endif
96645+
96646 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96647
96648 newflags = vma->vm_flags & ~VM_LOCKED;
96649@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96650 locked += current->mm->locked_vm;
96651
96652 /* check against resource limits */
96653+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96654 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96655 error = do_mlock(start, len, 1);
96656
96657@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
96658 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96659 vm_flags_t newflags;
96660
96661+#ifdef CONFIG_PAX_SEGMEXEC
96662+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96663+ break;
96664+#endif
96665+
96666 newflags = vma->vm_flags & ~VM_LOCKED;
96667 if (flags & MCL_CURRENT)
96668 newflags |= VM_LOCKED;
96669@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96670 lock_limit >>= PAGE_SHIFT;
96671
96672 ret = -ENOMEM;
96673+
96674+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96675+
96676 down_write(&current->mm->mmap_sem);
96677-
96678 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96679 capable(CAP_IPC_LOCK))
96680 ret = do_mlockall(flags);
96681diff --git a/mm/mmap.c b/mm/mmap.c
96682index e5cc3ca..bb9333f 100644
96683--- a/mm/mmap.c
96684+++ b/mm/mmap.c
96685@@ -41,6 +41,7 @@
96686 #include <linux/notifier.h>
96687 #include <linux/memory.h>
96688 #include <linux/printk.h>
96689+#include <linux/random.h>
96690
96691 #include <asm/uaccess.h>
96692 #include <asm/cacheflush.h>
96693@@ -57,6 +58,16 @@
96694 #define arch_rebalance_pgtables(addr, len) (addr)
96695 #endif
96696
96697+static inline void verify_mm_writelocked(struct mm_struct *mm)
96698+{
96699+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96700+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96701+ up_read(&mm->mmap_sem);
96702+ BUG();
96703+ }
96704+#endif
96705+}
96706+
96707 static void unmap_region(struct mm_struct *mm,
96708 struct vm_area_struct *vma, struct vm_area_struct *prev,
96709 unsigned long start, unsigned long end);
96710@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
96711 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96712 *
96713 */
96714-pgprot_t protection_map[16] = {
96715+pgprot_t protection_map[16] __read_only = {
96716 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96717 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96718 };
96719
96720-pgprot_t vm_get_page_prot(unsigned long vm_flags)
96721+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
96722 {
96723- return __pgprot(pgprot_val(protection_map[vm_flags &
96724+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96725 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96726 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96727+
96728+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96729+ if (!(__supported_pte_mask & _PAGE_NX) &&
96730+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96731+ (vm_flags & (VM_READ | VM_WRITE)))
96732+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96733+#endif
96734+
96735+ return prot;
96736 }
96737 EXPORT_SYMBOL(vm_get_page_prot);
96738
96739@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
96740 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96741 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
96742 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
96743+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96744 /*
96745 * Make sure vm_committed_as in one cacheline and not cacheline shared with
96746 * other variables. It can be updated by several CPUs frequently.
96747@@ -274,6 +295,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96748 struct vm_area_struct *next = vma->vm_next;
96749
96750 might_sleep();
96751+ BUG_ON(vma->vm_mirror);
96752 if (vma->vm_ops && vma->vm_ops->close)
96753 vma->vm_ops->close(vma);
96754 if (vma->vm_file)
96755@@ -287,6 +309,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
96756
96757 SYSCALL_DEFINE1(brk, unsigned long, brk)
96758 {
96759+ unsigned long rlim;
96760 unsigned long retval;
96761 unsigned long newbrk, oldbrk;
96762 struct mm_struct *mm = current->mm;
96763@@ -317,7 +340,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96764 * segment grow beyond its set limit the in case where the limit is
96765 * not page aligned -Ram Gupta
96766 */
96767- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
96768+ rlim = rlimit(RLIMIT_DATA);
96769+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96770+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
96771+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
96772+ rlim = 4096 * PAGE_SIZE;
96773+#endif
96774+ if (check_data_rlimit(rlim, brk, mm->start_brk,
96775 mm->end_data, mm->start_data))
96776 goto out;
96777
96778@@ -978,6 +1007,12 @@ static int
96779 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96780 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96781 {
96782+
96783+#ifdef CONFIG_PAX_SEGMEXEC
96784+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96785+ return 0;
96786+#endif
96787+
96788 if (is_mergeable_vma(vma, file, vm_flags) &&
96789 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96790 if (vma->vm_pgoff == vm_pgoff)
96791@@ -997,6 +1032,12 @@ static int
96792 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96793 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96794 {
96795+
96796+#ifdef CONFIG_PAX_SEGMEXEC
96797+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96798+ return 0;
96799+#endif
96800+
96801 if (is_mergeable_vma(vma, file, vm_flags) &&
96802 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96803 pgoff_t vm_pglen;
96804@@ -1046,6 +1087,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96805 struct vm_area_struct *area, *next;
96806 int err;
96807
96808+#ifdef CONFIG_PAX_SEGMEXEC
96809+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
96810+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
96811+
96812+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
96813+#endif
96814+
96815 /*
96816 * We later require that vma->vm_flags == vm_flags,
96817 * so this tests vma->vm_flags & VM_SPECIAL, too.
96818@@ -1061,6 +1109,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96819 if (next && next->vm_end == end) /* cases 6, 7, 8 */
96820 next = next->vm_next;
96821
96822+#ifdef CONFIG_PAX_SEGMEXEC
96823+ if (prev)
96824+ prev_m = pax_find_mirror_vma(prev);
96825+ if (area)
96826+ area_m = pax_find_mirror_vma(area);
96827+ if (next)
96828+ next_m = pax_find_mirror_vma(next);
96829+#endif
96830+
96831 /*
96832 * Can it merge with the predecessor?
96833 */
96834@@ -1080,9 +1137,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96835 /* cases 1, 6 */
96836 err = vma_adjust(prev, prev->vm_start,
96837 next->vm_end, prev->vm_pgoff, NULL);
96838- } else /* cases 2, 5, 7 */
96839+
96840+#ifdef CONFIG_PAX_SEGMEXEC
96841+ if (!err && prev_m)
96842+ err = vma_adjust(prev_m, prev_m->vm_start,
96843+ next_m->vm_end, prev_m->vm_pgoff, NULL);
96844+#endif
96845+
96846+ } else { /* cases 2, 5, 7 */
96847 err = vma_adjust(prev, prev->vm_start,
96848 end, prev->vm_pgoff, NULL);
96849+
96850+#ifdef CONFIG_PAX_SEGMEXEC
96851+ if (!err && prev_m)
96852+ err = vma_adjust(prev_m, prev_m->vm_start,
96853+ end_m, prev_m->vm_pgoff, NULL);
96854+#endif
96855+
96856+ }
96857 if (err)
96858 return NULL;
96859 khugepaged_enter_vma_merge(prev, vm_flags);
96860@@ -1096,12 +1168,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96861 mpol_equal(policy, vma_policy(next)) &&
96862 can_vma_merge_before(next, vm_flags,
96863 anon_vma, file, pgoff+pglen)) {
96864- if (prev && addr < prev->vm_end) /* case 4 */
96865+ if (prev && addr < prev->vm_end) { /* case 4 */
96866 err = vma_adjust(prev, prev->vm_start,
96867 addr, prev->vm_pgoff, NULL);
96868- else /* cases 3, 8 */
96869+
96870+#ifdef CONFIG_PAX_SEGMEXEC
96871+ if (!err && prev_m)
96872+ err = vma_adjust(prev_m, prev_m->vm_start,
96873+ addr_m, prev_m->vm_pgoff, NULL);
96874+#endif
96875+
96876+ } else { /* cases 3, 8 */
96877 err = vma_adjust(area, addr, next->vm_end,
96878 next->vm_pgoff - pglen, NULL);
96879+
96880+#ifdef CONFIG_PAX_SEGMEXEC
96881+ if (!err && area_m)
96882+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
96883+ next_m->vm_pgoff - pglen, NULL);
96884+#endif
96885+
96886+ }
96887 if (err)
96888 return NULL;
96889 khugepaged_enter_vma_merge(area, vm_flags);
96890@@ -1210,8 +1297,10 @@ none:
96891 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
96892 struct file *file, long pages)
96893 {
96894- const unsigned long stack_flags
96895- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
96896+
96897+#ifdef CONFIG_PAX_RANDMMAP
96898+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
96899+#endif
96900
96901 mm->total_vm += pages;
96902
96903@@ -1219,7 +1308,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
96904 mm->shared_vm += pages;
96905 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
96906 mm->exec_vm += pages;
96907- } else if (flags & stack_flags)
96908+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
96909 mm->stack_vm += pages;
96910 }
96911 #endif /* CONFIG_PROC_FS */
96912@@ -1249,6 +1338,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
96913 locked += mm->locked_vm;
96914 lock_limit = rlimit(RLIMIT_MEMLOCK);
96915 lock_limit >>= PAGE_SHIFT;
96916+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
96917 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
96918 return -EAGAIN;
96919 }
96920@@ -1275,7 +1365,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96921 * (the exception is when the underlying filesystem is noexec
96922 * mounted, in which case we dont add PROT_EXEC.)
96923 */
96924- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
96925+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
96926 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
96927 prot |= PROT_EXEC;
96928
96929@@ -1301,7 +1391,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96930 /* Obtain the address to map to. we verify (or select) it and ensure
96931 * that it represents a valid section of the address space.
96932 */
96933- addr = get_unmapped_area(file, addr, len, pgoff, flags);
96934+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
96935 if (addr & ~PAGE_MASK)
96936 return addr;
96937
96938@@ -1312,6 +1402,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96939 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
96940 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
96941
96942+#ifdef CONFIG_PAX_MPROTECT
96943+ if (mm->pax_flags & MF_PAX_MPROTECT) {
96944+
96945+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
96946+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
96947+ mm->binfmt->handle_mmap)
96948+ mm->binfmt->handle_mmap(file);
96949+#endif
96950+
96951+#ifndef CONFIG_PAX_MPROTECT_COMPAT
96952+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
96953+ gr_log_rwxmmap(file);
96954+
96955+#ifdef CONFIG_PAX_EMUPLT
96956+ vm_flags &= ~VM_EXEC;
96957+#else
96958+ return -EPERM;
96959+#endif
96960+
96961+ }
96962+
96963+ if (!(vm_flags & VM_EXEC))
96964+ vm_flags &= ~VM_MAYEXEC;
96965+#else
96966+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
96967+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
96968+#endif
96969+ else
96970+ vm_flags &= ~VM_MAYWRITE;
96971+ }
96972+#endif
96973+
96974+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96975+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
96976+ vm_flags &= ~VM_PAGEEXEC;
96977+#endif
96978+
96979 if (flags & MAP_LOCKED)
96980 if (!can_do_mlock())
96981 return -EPERM;
96982@@ -1399,6 +1526,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96983 vm_flags |= VM_NORESERVE;
96984 }
96985
96986+ if (!gr_acl_handle_mmap(file, prot))
96987+ return -EACCES;
96988+
96989 addr = mmap_region(file, addr, len, vm_flags, pgoff);
96990 if (!IS_ERR_VALUE(addr) &&
96991 ((vm_flags & VM_LOCKED) ||
96992@@ -1492,7 +1622,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
96993 vm_flags_t vm_flags = vma->vm_flags;
96994
96995 /* If it was private or non-writable, the write bit is already clear */
96996- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
96997+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
96998 return 0;
96999
97000 /* The backer wishes to know when pages are first written to? */
97001@@ -1543,7 +1673,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97002 struct rb_node **rb_link, *rb_parent;
97003 unsigned long charged = 0;
97004
97005+#ifdef CONFIG_PAX_SEGMEXEC
97006+ struct vm_area_struct *vma_m = NULL;
97007+#endif
97008+
97009+ /*
97010+ * mm->mmap_sem is required to protect against another thread
97011+ * changing the mappings in case we sleep.
97012+ */
97013+ verify_mm_writelocked(mm);
97014+
97015 /* Check against address space limit. */
97016+
97017+#ifdef CONFIG_PAX_RANDMMAP
97018+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97019+#endif
97020+
97021 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97022 unsigned long nr_pages;
97023
97024@@ -1562,11 +1707,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97025
97026 /* Clear old maps */
97027 error = -ENOMEM;
97028-munmap_back:
97029 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97030 if (do_munmap(mm, addr, len))
97031 return -ENOMEM;
97032- goto munmap_back;
97033+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97034 }
97035
97036 /*
97037@@ -1597,6 +1741,16 @@ munmap_back:
97038 goto unacct_error;
97039 }
97040
97041+#ifdef CONFIG_PAX_SEGMEXEC
97042+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97043+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97044+ if (!vma_m) {
97045+ error = -ENOMEM;
97046+ goto free_vma;
97047+ }
97048+ }
97049+#endif
97050+
97051 vma->vm_mm = mm;
97052 vma->vm_start = addr;
97053 vma->vm_end = addr + len;
97054@@ -1627,6 +1781,13 @@ munmap_back:
97055 if (error)
97056 goto unmap_and_free_vma;
97057
97058+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97059+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97060+ vma->vm_flags |= VM_PAGEEXEC;
97061+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97062+ }
97063+#endif
97064+
97065 /* Can addr have changed??
97066 *
97067 * Answer: Yes, several device drivers can do it in their
97068@@ -1645,6 +1806,12 @@ munmap_back:
97069 }
97070
97071 vma_link(mm, vma, prev, rb_link, rb_parent);
97072+
97073+#ifdef CONFIG_PAX_SEGMEXEC
97074+ if (vma_m)
97075+ BUG_ON(pax_mirror_vma(vma_m, vma));
97076+#endif
97077+
97078 /* Once vma denies write, undo our temporary denial count */
97079 if (file) {
97080 if (vm_flags & VM_SHARED)
97081@@ -1657,6 +1824,7 @@ out:
97082 perf_event_mmap(vma);
97083
97084 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97085+ track_exec_limit(mm, addr, addr + len, vm_flags);
97086 if (vm_flags & VM_LOCKED) {
97087 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97088 vma == get_gate_vma(current->mm)))
97089@@ -1694,6 +1862,12 @@ allow_write_and_free_vma:
97090 if (vm_flags & VM_DENYWRITE)
97091 allow_write_access(file);
97092 free_vma:
97093+
97094+#ifdef CONFIG_PAX_SEGMEXEC
97095+ if (vma_m)
97096+ kmem_cache_free(vm_area_cachep, vma_m);
97097+#endif
97098+
97099 kmem_cache_free(vm_area_cachep, vma);
97100 unacct_error:
97101 if (charged)
97102@@ -1701,7 +1875,63 @@ unacct_error:
97103 return error;
97104 }
97105
97106-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97107+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97108+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97109+{
97110+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97111+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97112+
97113+ return 0;
97114+}
97115+#endif
97116+
97117+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97118+{
97119+ if (!vma) {
97120+#ifdef CONFIG_STACK_GROWSUP
97121+ if (addr > sysctl_heap_stack_gap)
97122+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97123+ else
97124+ vma = find_vma(current->mm, 0);
97125+ if (vma && (vma->vm_flags & VM_GROWSUP))
97126+ return false;
97127+#endif
97128+ return true;
97129+ }
97130+
97131+ if (addr + len > vma->vm_start)
97132+ return false;
97133+
97134+ if (vma->vm_flags & VM_GROWSDOWN)
97135+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97136+#ifdef CONFIG_STACK_GROWSUP
97137+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97138+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97139+#endif
97140+ else if (offset)
97141+ return offset <= vma->vm_start - addr - len;
97142+
97143+ return true;
97144+}
97145+
97146+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
97147+{
97148+ if (vma->vm_start < len)
97149+ return -ENOMEM;
97150+
97151+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
97152+ if (offset <= vma->vm_start - len)
97153+ return vma->vm_start - len - offset;
97154+ else
97155+ return -ENOMEM;
97156+ }
97157+
97158+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97159+ return vma->vm_start - len - sysctl_heap_stack_gap;
97160+ return -ENOMEM;
97161+}
97162+
97163+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
97164 {
97165 /*
97166 * We implement the search by looking for an rbtree node that
97167@@ -1749,11 +1979,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97168 }
97169 }
97170
97171- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
97172+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
97173 check_current:
97174 /* Check if current node has a suitable gap */
97175 if (gap_start > high_limit)
97176 return -ENOMEM;
97177+
97178+ if (gap_end - gap_start > info->threadstack_offset)
97179+ gap_start += info->threadstack_offset;
97180+ else
97181+ gap_start = gap_end;
97182+
97183+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97184+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97185+ gap_start += sysctl_heap_stack_gap;
97186+ else
97187+ gap_start = gap_end;
97188+ }
97189+ if (vma->vm_flags & VM_GROWSDOWN) {
97190+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97191+ gap_end -= sysctl_heap_stack_gap;
97192+ else
97193+ gap_end = gap_start;
97194+ }
97195 if (gap_end >= low_limit && gap_end - gap_start >= length)
97196 goto found;
97197
97198@@ -1803,7 +2051,7 @@ found:
97199 return gap_start;
97200 }
97201
97202-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
97203+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
97204 {
97205 struct mm_struct *mm = current->mm;
97206 struct vm_area_struct *vma;
97207@@ -1857,6 +2105,24 @@ check_current:
97208 gap_end = vma->vm_start;
97209 if (gap_end < low_limit)
97210 return -ENOMEM;
97211+
97212+ if (gap_end - gap_start > info->threadstack_offset)
97213+ gap_end -= info->threadstack_offset;
97214+ else
97215+ gap_end = gap_start;
97216+
97217+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97218+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97219+ gap_start += sysctl_heap_stack_gap;
97220+ else
97221+ gap_start = gap_end;
97222+ }
97223+ if (vma->vm_flags & VM_GROWSDOWN) {
97224+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97225+ gap_end -= sysctl_heap_stack_gap;
97226+ else
97227+ gap_end = gap_start;
97228+ }
97229 if (gap_start <= high_limit && gap_end - gap_start >= length)
97230 goto found;
97231
97232@@ -1920,6 +2186,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97233 struct mm_struct *mm = current->mm;
97234 struct vm_area_struct *vma;
97235 struct vm_unmapped_area_info info;
97236+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97237
97238 if (len > TASK_SIZE - mmap_min_addr)
97239 return -ENOMEM;
97240@@ -1927,11 +2194,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97241 if (flags & MAP_FIXED)
97242 return addr;
97243
97244+#ifdef CONFIG_PAX_RANDMMAP
97245+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97246+#endif
97247+
97248 if (addr) {
97249 addr = PAGE_ALIGN(addr);
97250 vma = find_vma(mm, addr);
97251 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97252- (!vma || addr + len <= vma->vm_start))
97253+ check_heap_stack_gap(vma, addr, len, offset))
97254 return addr;
97255 }
97256
97257@@ -1940,6 +2211,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97258 info.low_limit = mm->mmap_base;
97259 info.high_limit = TASK_SIZE;
97260 info.align_mask = 0;
97261+ info.threadstack_offset = offset;
97262 return vm_unmapped_area(&info);
97263 }
97264 #endif
97265@@ -1958,6 +2230,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97266 struct mm_struct *mm = current->mm;
97267 unsigned long addr = addr0;
97268 struct vm_unmapped_area_info info;
97269+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97270
97271 /* requested length too big for entire address space */
97272 if (len > TASK_SIZE - mmap_min_addr)
97273@@ -1966,12 +2239,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97274 if (flags & MAP_FIXED)
97275 return addr;
97276
97277+#ifdef CONFIG_PAX_RANDMMAP
97278+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97279+#endif
97280+
97281 /* requesting a specific address */
97282 if (addr) {
97283 addr = PAGE_ALIGN(addr);
97284 vma = find_vma(mm, addr);
97285 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97286- (!vma || addr + len <= vma->vm_start))
97287+ check_heap_stack_gap(vma, addr, len, offset))
97288 return addr;
97289 }
97290
97291@@ -1980,6 +2257,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97292 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
97293 info.high_limit = mm->mmap_base;
97294 info.align_mask = 0;
97295+ info.threadstack_offset = offset;
97296 addr = vm_unmapped_area(&info);
97297
97298 /*
97299@@ -1992,6 +2270,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97300 VM_BUG_ON(addr != -ENOMEM);
97301 info.flags = 0;
97302 info.low_limit = TASK_UNMAPPED_BASE;
97303+
97304+#ifdef CONFIG_PAX_RANDMMAP
97305+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97306+ info.low_limit += mm->delta_mmap;
97307+#endif
97308+
97309 info.high_limit = TASK_SIZE;
97310 addr = vm_unmapped_area(&info);
97311 }
97312@@ -2092,6 +2376,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
97313 return vma;
97314 }
97315
97316+#ifdef CONFIG_PAX_SEGMEXEC
97317+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97318+{
97319+ struct vm_area_struct *vma_m;
97320+
97321+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97322+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97323+ BUG_ON(vma->vm_mirror);
97324+ return NULL;
97325+ }
97326+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97327+ vma_m = vma->vm_mirror;
97328+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97329+ BUG_ON(vma->vm_file != vma_m->vm_file);
97330+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97331+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
97332+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
97333+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
97334+ return vma_m;
97335+}
97336+#endif
97337+
97338 /*
97339 * Verify that the stack growth is acceptable and
97340 * update accounting. This is shared with both the
97341@@ -2109,8 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97342
97343 /* Stack limit test */
97344 actual_size = size;
97345- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
97346- actual_size -= PAGE_SIZE;
97347+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
97348 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
97349 return -ENOMEM;
97350
97351@@ -2121,6 +2426,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97352 locked = mm->locked_vm + grow;
97353 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
97354 limit >>= PAGE_SHIFT;
97355+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97356 if (locked > limit && !capable(CAP_IPC_LOCK))
97357 return -ENOMEM;
97358 }
97359@@ -2150,37 +2456,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97360 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97361 * vma is the last one with address > vma->vm_end. Have to extend vma.
97362 */
97363+#ifndef CONFIG_IA64
97364+static
97365+#endif
97366 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97367 {
97368 int error;
97369+ bool locknext;
97370
97371 if (!(vma->vm_flags & VM_GROWSUP))
97372 return -EFAULT;
97373
97374+ /* Also guard against wrapping around to address 0. */
97375+ if (address < PAGE_ALIGN(address+1))
97376+ address = PAGE_ALIGN(address+1);
97377+ else
97378+ return -ENOMEM;
97379+
97380 /*
97381 * We must make sure the anon_vma is allocated
97382 * so that the anon_vma locking is not a noop.
97383 */
97384 if (unlikely(anon_vma_prepare(vma)))
97385 return -ENOMEM;
97386+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97387+ if (locknext && anon_vma_prepare(vma->vm_next))
97388+ return -ENOMEM;
97389 vma_lock_anon_vma(vma);
97390+ if (locknext)
97391+ vma_lock_anon_vma(vma->vm_next);
97392
97393 /*
97394 * vma->vm_start/vm_end cannot change under us because the caller
97395 * is required to hold the mmap_sem in read mode. We need the
97396- * anon_vma lock to serialize against concurrent expand_stacks.
97397- * Also guard against wrapping around to address 0.
97398+ * anon_vma locks to serialize against concurrent expand_stacks
97399+ * and expand_upwards.
97400 */
97401- if (address < PAGE_ALIGN(address+4))
97402- address = PAGE_ALIGN(address+4);
97403- else {
97404- vma_unlock_anon_vma(vma);
97405- return -ENOMEM;
97406- }
97407 error = 0;
97408
97409 /* Somebody else might have raced and expanded it already */
97410- if (address > vma->vm_end) {
97411+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97412+ error = -ENOMEM;
97413+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97414 unsigned long size, grow;
97415
97416 size = address - vma->vm_start;
97417@@ -2215,6 +2532,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97418 }
97419 }
97420 }
97421+ if (locknext)
97422+ vma_unlock_anon_vma(vma->vm_next);
97423 vma_unlock_anon_vma(vma);
97424 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97425 validate_mm(vma->vm_mm);
97426@@ -2229,6 +2548,8 @@ int expand_downwards(struct vm_area_struct *vma,
97427 unsigned long address)
97428 {
97429 int error;
97430+ bool lockprev = false;
97431+ struct vm_area_struct *prev;
97432
97433 /*
97434 * We must make sure the anon_vma is allocated
97435@@ -2242,6 +2563,15 @@ int expand_downwards(struct vm_area_struct *vma,
97436 if (error)
97437 return error;
97438
97439+ prev = vma->vm_prev;
97440+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97441+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97442+#endif
97443+ if (lockprev && anon_vma_prepare(prev))
97444+ return -ENOMEM;
97445+ if (lockprev)
97446+ vma_lock_anon_vma(prev);
97447+
97448 vma_lock_anon_vma(vma);
97449
97450 /*
97451@@ -2251,9 +2581,17 @@ int expand_downwards(struct vm_area_struct *vma,
97452 */
97453
97454 /* Somebody else might have raced and expanded it already */
97455- if (address < vma->vm_start) {
97456+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97457+ error = -ENOMEM;
97458+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97459 unsigned long size, grow;
97460
97461+#ifdef CONFIG_PAX_SEGMEXEC
97462+ struct vm_area_struct *vma_m;
97463+
97464+ vma_m = pax_find_mirror_vma(vma);
97465+#endif
97466+
97467 size = vma->vm_end - address;
97468 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97469
97470@@ -2278,13 +2616,27 @@ int expand_downwards(struct vm_area_struct *vma,
97471 vma->vm_pgoff -= grow;
97472 anon_vma_interval_tree_post_update_vma(vma);
97473 vma_gap_update(vma);
97474+
97475+#ifdef CONFIG_PAX_SEGMEXEC
97476+ if (vma_m) {
97477+ anon_vma_interval_tree_pre_update_vma(vma_m);
97478+ vma_m->vm_start -= grow << PAGE_SHIFT;
97479+ vma_m->vm_pgoff -= grow;
97480+ anon_vma_interval_tree_post_update_vma(vma_m);
97481+ vma_gap_update(vma_m);
97482+ }
97483+#endif
97484+
97485 spin_unlock(&vma->vm_mm->page_table_lock);
97486
97487+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97488 perf_event_mmap(vma);
97489 }
97490 }
97491 }
97492 vma_unlock_anon_vma(vma);
97493+ if (lockprev)
97494+ vma_unlock_anon_vma(prev);
97495 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97496 validate_mm(vma->vm_mm);
97497 return error;
97498@@ -2384,6 +2736,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97499 do {
97500 long nrpages = vma_pages(vma);
97501
97502+#ifdef CONFIG_PAX_SEGMEXEC
97503+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97504+ vma = remove_vma(vma);
97505+ continue;
97506+ }
97507+#endif
97508+
97509 if (vma->vm_flags & VM_ACCOUNT)
97510 nr_accounted += nrpages;
97511 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97512@@ -2428,6 +2787,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97513 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97514 vma->vm_prev = NULL;
97515 do {
97516+
97517+#ifdef CONFIG_PAX_SEGMEXEC
97518+ if (vma->vm_mirror) {
97519+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97520+ vma->vm_mirror->vm_mirror = NULL;
97521+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97522+ vma->vm_mirror = NULL;
97523+ }
97524+#endif
97525+
97526 vma_rb_erase(vma, &mm->mm_rb);
97527 mm->map_count--;
97528 tail_vma = vma;
97529@@ -2455,14 +2824,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97530 struct vm_area_struct *new;
97531 int err = -ENOMEM;
97532
97533+#ifdef CONFIG_PAX_SEGMEXEC
97534+ struct vm_area_struct *vma_m, *new_m = NULL;
97535+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97536+#endif
97537+
97538 if (is_vm_hugetlb_page(vma) && (addr &
97539 ~(huge_page_mask(hstate_vma(vma)))))
97540 return -EINVAL;
97541
97542+#ifdef CONFIG_PAX_SEGMEXEC
97543+ vma_m = pax_find_mirror_vma(vma);
97544+#endif
97545+
97546 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97547 if (!new)
97548 goto out_err;
97549
97550+#ifdef CONFIG_PAX_SEGMEXEC
97551+ if (vma_m) {
97552+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97553+ if (!new_m) {
97554+ kmem_cache_free(vm_area_cachep, new);
97555+ goto out_err;
97556+ }
97557+ }
97558+#endif
97559+
97560 /* most fields are the same, copy all, and then fixup */
97561 *new = *vma;
97562
97563@@ -2475,6 +2863,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97564 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97565 }
97566
97567+#ifdef CONFIG_PAX_SEGMEXEC
97568+ if (vma_m) {
97569+ *new_m = *vma_m;
97570+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
97571+ new_m->vm_mirror = new;
97572+ new->vm_mirror = new_m;
97573+
97574+ if (new_below)
97575+ new_m->vm_end = addr_m;
97576+ else {
97577+ new_m->vm_start = addr_m;
97578+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97579+ }
97580+ }
97581+#endif
97582+
97583 err = vma_dup_policy(vma, new);
97584 if (err)
97585 goto out_free_vma;
97586@@ -2495,6 +2899,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97587 else
97588 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97589
97590+#ifdef CONFIG_PAX_SEGMEXEC
97591+ if (!err && vma_m) {
97592+ struct mempolicy *pol = vma_policy(new);
97593+
97594+ if (anon_vma_clone(new_m, vma_m))
97595+ goto out_free_mpol;
97596+
97597+ mpol_get(pol);
97598+ set_vma_policy(new_m, pol);
97599+
97600+ if (new_m->vm_file)
97601+ get_file(new_m->vm_file);
97602+
97603+ if (new_m->vm_ops && new_m->vm_ops->open)
97604+ new_m->vm_ops->open(new_m);
97605+
97606+ if (new_below)
97607+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97608+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97609+ else
97610+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97611+
97612+ if (err) {
97613+ if (new_m->vm_ops && new_m->vm_ops->close)
97614+ new_m->vm_ops->close(new_m);
97615+ if (new_m->vm_file)
97616+ fput(new_m->vm_file);
97617+ mpol_put(pol);
97618+ }
97619+ }
97620+#endif
97621+
97622 /* Success. */
97623 if (!err)
97624 return 0;
97625@@ -2504,10 +2940,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97626 new->vm_ops->close(new);
97627 if (new->vm_file)
97628 fput(new->vm_file);
97629- unlink_anon_vmas(new);
97630 out_free_mpol:
97631 mpol_put(vma_policy(new));
97632 out_free_vma:
97633+
97634+#ifdef CONFIG_PAX_SEGMEXEC
97635+ if (new_m) {
97636+ unlink_anon_vmas(new_m);
97637+ kmem_cache_free(vm_area_cachep, new_m);
97638+ }
97639+#endif
97640+
97641+ unlink_anon_vmas(new);
97642 kmem_cache_free(vm_area_cachep, new);
97643 out_err:
97644 return err;
97645@@ -2520,6 +2964,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97646 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97647 unsigned long addr, int new_below)
97648 {
97649+
97650+#ifdef CONFIG_PAX_SEGMEXEC
97651+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97652+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97653+ if (mm->map_count >= sysctl_max_map_count-1)
97654+ return -ENOMEM;
97655+ } else
97656+#endif
97657+
97658 if (mm->map_count >= sysctl_max_map_count)
97659 return -ENOMEM;
97660
97661@@ -2531,11 +2984,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97662 * work. This now handles partial unmappings.
97663 * Jeremy Fitzhardinge <jeremy@goop.org>
97664 */
97665+#ifdef CONFIG_PAX_SEGMEXEC
97666 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97667 {
97668+ int ret = __do_munmap(mm, start, len);
97669+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97670+ return ret;
97671+
97672+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97673+}
97674+
97675+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97676+#else
97677+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97678+#endif
97679+{
97680 unsigned long end;
97681 struct vm_area_struct *vma, *prev, *last;
97682
97683+ /*
97684+ * mm->mmap_sem is required to protect against another thread
97685+ * changing the mappings in case we sleep.
97686+ */
97687+ verify_mm_writelocked(mm);
97688+
97689 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97690 return -EINVAL;
97691
97692@@ -2613,6 +3085,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97693 /* Fix up all other VM information */
97694 remove_vma_list(mm, vma);
97695
97696+ track_exec_limit(mm, start, end, 0UL);
97697+
97698 return 0;
97699 }
97700
97701@@ -2621,6 +3095,13 @@ int vm_munmap(unsigned long start, size_t len)
97702 int ret;
97703 struct mm_struct *mm = current->mm;
97704
97705+
97706+#ifdef CONFIG_PAX_SEGMEXEC
97707+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97708+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
97709+ return -EINVAL;
97710+#endif
97711+
97712 down_write(&mm->mmap_sem);
97713 ret = do_munmap(mm, start, len);
97714 up_write(&mm->mmap_sem);
97715@@ -2634,16 +3115,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97716 return vm_munmap(addr, len);
97717 }
97718
97719-static inline void verify_mm_writelocked(struct mm_struct *mm)
97720-{
97721-#ifdef CONFIG_DEBUG_VM
97722- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97723- WARN_ON(1);
97724- up_read(&mm->mmap_sem);
97725- }
97726-#endif
97727-}
97728-
97729 /*
97730 * this is really a simplified "do_mmap". it only handles
97731 * anonymous maps. eventually we may be able to do some
97732@@ -2657,6 +3128,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97733 struct rb_node **rb_link, *rb_parent;
97734 pgoff_t pgoff = addr >> PAGE_SHIFT;
97735 int error;
97736+ unsigned long charged;
97737
97738 len = PAGE_ALIGN(len);
97739 if (!len)
97740@@ -2664,10 +3136,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97741
97742 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97743
97744+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97745+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97746+ flags &= ~VM_EXEC;
97747+
97748+#ifdef CONFIG_PAX_MPROTECT
97749+ if (mm->pax_flags & MF_PAX_MPROTECT)
97750+ flags &= ~VM_MAYEXEC;
97751+#endif
97752+
97753+ }
97754+#endif
97755+
97756 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97757 if (error & ~PAGE_MASK)
97758 return error;
97759
97760+ charged = len >> PAGE_SHIFT;
97761+
97762 error = mlock_future_check(mm, mm->def_flags, len);
97763 if (error)
97764 return error;
97765@@ -2681,21 +3167,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97766 /*
97767 * Clear old maps. this also does some error checking for us
97768 */
97769- munmap_back:
97770 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97771 if (do_munmap(mm, addr, len))
97772 return -ENOMEM;
97773- goto munmap_back;
97774+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97775 }
97776
97777 /* Check against address space limits *after* clearing old maps... */
97778- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97779+ if (!may_expand_vm(mm, charged))
97780 return -ENOMEM;
97781
97782 if (mm->map_count > sysctl_max_map_count)
97783 return -ENOMEM;
97784
97785- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
97786+ if (security_vm_enough_memory_mm(mm, charged))
97787 return -ENOMEM;
97788
97789 /* Can we just expand an old private anonymous mapping? */
97790@@ -2709,7 +3194,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97791 */
97792 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97793 if (!vma) {
97794- vm_unacct_memory(len >> PAGE_SHIFT);
97795+ vm_unacct_memory(charged);
97796 return -ENOMEM;
97797 }
97798
97799@@ -2723,10 +3208,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97800 vma_link(mm, vma, prev, rb_link, rb_parent);
97801 out:
97802 perf_event_mmap(vma);
97803- mm->total_vm += len >> PAGE_SHIFT;
97804+ mm->total_vm += charged;
97805 if (flags & VM_LOCKED)
97806- mm->locked_vm += (len >> PAGE_SHIFT);
97807+ mm->locked_vm += charged;
97808 vma->vm_flags |= VM_SOFTDIRTY;
97809+ track_exec_limit(mm, addr, addr + len, flags);
97810 return addr;
97811 }
97812
97813@@ -2788,6 +3274,7 @@ void exit_mmap(struct mm_struct *mm)
97814 while (vma) {
97815 if (vma->vm_flags & VM_ACCOUNT)
97816 nr_accounted += vma_pages(vma);
97817+ vma->vm_mirror = NULL;
97818 vma = remove_vma(vma);
97819 }
97820 vm_unacct_memory(nr_accounted);
97821@@ -2805,6 +3292,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
97822 struct vm_area_struct *prev;
97823 struct rb_node **rb_link, *rb_parent;
97824
97825+#ifdef CONFIG_PAX_SEGMEXEC
97826+ struct vm_area_struct *vma_m = NULL;
97827+#endif
97828+
97829+ if (security_mmap_addr(vma->vm_start))
97830+ return -EPERM;
97831+
97832 /*
97833 * The vm_pgoff of a purely anonymous vma should be irrelevant
97834 * until its first write fault, when page's anon_vma and index
97835@@ -2828,7 +3322,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
97836 security_vm_enough_memory_mm(mm, vma_pages(vma)))
97837 return -ENOMEM;
97838
97839+#ifdef CONFIG_PAX_SEGMEXEC
97840+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
97841+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97842+ if (!vma_m)
97843+ return -ENOMEM;
97844+ }
97845+#endif
97846+
97847 vma_link(mm, vma, prev, rb_link, rb_parent);
97848+
97849+#ifdef CONFIG_PAX_SEGMEXEC
97850+ if (vma_m)
97851+ BUG_ON(pax_mirror_vma(vma_m, vma));
97852+#endif
97853+
97854 return 0;
97855 }
97856
97857@@ -2847,6 +3355,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97858 struct rb_node **rb_link, *rb_parent;
97859 bool faulted_in_anon_vma = true;
97860
97861+ BUG_ON(vma->vm_mirror);
97862+
97863 /*
97864 * If anonymous vma has not yet been faulted, update new pgoff
97865 * to match new location, to increase its chance of merging.
97866@@ -2911,6 +3421,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97867 return NULL;
97868 }
97869
97870+#ifdef CONFIG_PAX_SEGMEXEC
97871+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
97872+{
97873+ struct vm_area_struct *prev_m;
97874+ struct rb_node **rb_link_m, *rb_parent_m;
97875+ struct mempolicy *pol_m;
97876+
97877+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
97878+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
97879+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
97880+ *vma_m = *vma;
97881+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
97882+ if (anon_vma_clone(vma_m, vma))
97883+ return -ENOMEM;
97884+ pol_m = vma_policy(vma_m);
97885+ mpol_get(pol_m);
97886+ set_vma_policy(vma_m, pol_m);
97887+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
97888+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
97889+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
97890+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
97891+ if (vma_m->vm_file)
97892+ get_file(vma_m->vm_file);
97893+ if (vma_m->vm_ops && vma_m->vm_ops->open)
97894+ vma_m->vm_ops->open(vma_m);
97895+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
97896+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
97897+ vma_m->vm_mirror = vma;
97898+ vma->vm_mirror = vma_m;
97899+ return 0;
97900+}
97901+#endif
97902+
97903 /*
97904 * Return true if the calling process may expand its vm space by the passed
97905 * number of pages
97906@@ -2922,6 +3465,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
97907
97908 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
97909
97910+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
97911 if (cur + npages > lim)
97912 return 0;
97913 return 1;
97914@@ -3004,6 +3548,22 @@ static struct vm_area_struct *__install_special_mapping(
97915 vma->vm_start = addr;
97916 vma->vm_end = addr + len;
97917
97918+#ifdef CONFIG_PAX_MPROTECT
97919+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97920+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97921+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
97922+ return ERR_PTR(-EPERM);
97923+ if (!(vm_flags & VM_EXEC))
97924+ vm_flags &= ~VM_MAYEXEC;
97925+#else
97926+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97927+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97928+#endif
97929+ else
97930+ vm_flags &= ~VM_MAYWRITE;
97931+ }
97932+#endif
97933+
97934 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
97935 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97936
97937diff --git a/mm/mprotect.c b/mm/mprotect.c
97938index ace9345..63320dc 100644
97939--- a/mm/mprotect.c
97940+++ b/mm/mprotect.c
97941@@ -24,10 +24,18 @@
97942 #include <linux/migrate.h>
97943 #include <linux/perf_event.h>
97944 #include <linux/ksm.h>
97945+#include <linux/sched/sysctl.h>
97946+
97947+#ifdef CONFIG_PAX_MPROTECT
97948+#include <linux/elf.h>
97949+#include <linux/binfmts.h>
97950+#endif
97951+
97952 #include <asm/uaccess.h>
97953 #include <asm/pgtable.h>
97954 #include <asm/cacheflush.h>
97955 #include <asm/tlbflush.h>
97956+#include <asm/mmu_context.h>
97957
97958 /*
97959 * For a prot_numa update we only hold mmap_sem for read so there is a
97960@@ -251,6 +259,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
97961 return pages;
97962 }
97963
97964+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
97965+/* called while holding the mmap semaphor for writing except stack expansion */
97966+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
97967+{
97968+ unsigned long oldlimit, newlimit = 0UL;
97969+
97970+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
97971+ return;
97972+
97973+ spin_lock(&mm->page_table_lock);
97974+ oldlimit = mm->context.user_cs_limit;
97975+ if ((prot & VM_EXEC) && oldlimit < end)
97976+ /* USER_CS limit moved up */
97977+ newlimit = end;
97978+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
97979+ /* USER_CS limit moved down */
97980+ newlimit = start;
97981+
97982+ if (newlimit) {
97983+ mm->context.user_cs_limit = newlimit;
97984+
97985+#ifdef CONFIG_SMP
97986+ wmb();
97987+ cpus_clear(mm->context.cpu_user_cs_mask);
97988+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
97989+#endif
97990+
97991+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
97992+ }
97993+ spin_unlock(&mm->page_table_lock);
97994+ if (newlimit == end) {
97995+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
97996+
97997+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
97998+ if (is_vm_hugetlb_page(vma))
97999+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98000+ else
98001+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
98002+ }
98003+}
98004+#endif
98005+
98006 int
98007 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98008 unsigned long start, unsigned long end, unsigned long newflags)
98009@@ -263,11 +313,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98010 int error;
98011 int dirty_accountable = 0;
98012
98013+#ifdef CONFIG_PAX_SEGMEXEC
98014+ struct vm_area_struct *vma_m = NULL;
98015+ unsigned long start_m, end_m;
98016+
98017+ start_m = start + SEGMEXEC_TASK_SIZE;
98018+ end_m = end + SEGMEXEC_TASK_SIZE;
98019+#endif
98020+
98021 if (newflags == oldflags) {
98022 *pprev = vma;
98023 return 0;
98024 }
98025
98026+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98027+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98028+
98029+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98030+ return -ENOMEM;
98031+
98032+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98033+ return -ENOMEM;
98034+ }
98035+
98036 /*
98037 * If we make a private mapping writable we increase our commit;
98038 * but (without finer accounting) cannot reduce our commit if we
98039@@ -284,6 +352,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98040 }
98041 }
98042
98043+#ifdef CONFIG_PAX_SEGMEXEC
98044+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98045+ if (start != vma->vm_start) {
98046+ error = split_vma(mm, vma, start, 1);
98047+ if (error)
98048+ goto fail;
98049+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98050+ *pprev = (*pprev)->vm_next;
98051+ }
98052+
98053+ if (end != vma->vm_end) {
98054+ error = split_vma(mm, vma, end, 0);
98055+ if (error)
98056+ goto fail;
98057+ }
98058+
98059+ if (pax_find_mirror_vma(vma)) {
98060+ error = __do_munmap(mm, start_m, end_m - start_m);
98061+ if (error)
98062+ goto fail;
98063+ } else {
98064+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98065+ if (!vma_m) {
98066+ error = -ENOMEM;
98067+ goto fail;
98068+ }
98069+ vma->vm_flags = newflags;
98070+ error = pax_mirror_vma(vma_m, vma);
98071+ if (error) {
98072+ vma->vm_flags = oldflags;
98073+ goto fail;
98074+ }
98075+ }
98076+ }
98077+#endif
98078+
98079 /*
98080 * First try to merge with previous and/or next vma.
98081 */
98082@@ -314,7 +418,19 @@ success:
98083 * vm_flags and vm_page_prot are protected by the mmap_sem
98084 * held in write mode.
98085 */
98086+
98087+#ifdef CONFIG_PAX_SEGMEXEC
98088+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98089+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98090+#endif
98091+
98092 vma->vm_flags = newflags;
98093+
98094+#ifdef CONFIG_PAX_MPROTECT
98095+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98096+ mm->binfmt->handle_mprotect(vma, newflags);
98097+#endif
98098+
98099 dirty_accountable = vma_wants_writenotify(vma);
98100 vma_set_page_prot(vma);
98101
98102@@ -350,6 +466,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98103 end = start + len;
98104 if (end <= start)
98105 return -ENOMEM;
98106+
98107+#ifdef CONFIG_PAX_SEGMEXEC
98108+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98109+ if (end > SEGMEXEC_TASK_SIZE)
98110+ return -EINVAL;
98111+ } else
98112+#endif
98113+
98114+ if (end > TASK_SIZE)
98115+ return -EINVAL;
98116+
98117 if (!arch_validate_prot(prot))
98118 return -EINVAL;
98119
98120@@ -357,7 +484,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98121 /*
98122 * Does the application expect PROT_READ to imply PROT_EXEC:
98123 */
98124- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98125+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98126 prot |= PROT_EXEC;
98127
98128 vm_flags = calc_vm_prot_bits(prot);
98129@@ -389,6 +516,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98130 if (start > vma->vm_start)
98131 prev = vma;
98132
98133+#ifdef CONFIG_PAX_MPROTECT
98134+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98135+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98136+#endif
98137+
98138 for (nstart = start ; ; ) {
98139 unsigned long newflags;
98140
98141@@ -399,6 +531,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98142
98143 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98144 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98145+ if (prot & (PROT_WRITE | PROT_EXEC))
98146+ gr_log_rwxmprotect(vma);
98147+
98148+ error = -EACCES;
98149+ goto out;
98150+ }
98151+
98152+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98153 error = -EACCES;
98154 goto out;
98155 }
98156@@ -413,6 +553,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98157 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98158 if (error)
98159 goto out;
98160+
98161+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98162+
98163 nstart = tmp;
98164
98165 if (nstart < prev->vm_end)
98166diff --git a/mm/mremap.c b/mm/mremap.c
98167index 17fa018..6f7892b 100644
98168--- a/mm/mremap.c
98169+++ b/mm/mremap.c
98170@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98171 continue;
98172 pte = ptep_get_and_clear(mm, old_addr, old_pte);
98173 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98174+
98175+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98176+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98177+ pte = pte_exprotect(pte);
98178+#endif
98179+
98180 pte = move_soft_dirty_pte(pte);
98181 set_pte_at(mm, new_addr, new_pte, pte);
98182 }
98183@@ -346,6 +352,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98184 if (is_vm_hugetlb_page(vma))
98185 goto Einval;
98186
98187+#ifdef CONFIG_PAX_SEGMEXEC
98188+ if (pax_find_mirror_vma(vma))
98189+ goto Einval;
98190+#endif
98191+
98192 /* We can't remap across vm area boundaries */
98193 if (old_len > vma->vm_end - addr)
98194 goto Efault;
98195@@ -401,20 +412,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
98196 unsigned long ret = -EINVAL;
98197 unsigned long charged = 0;
98198 unsigned long map_flags;
98199+ unsigned long pax_task_size = TASK_SIZE;
98200
98201 if (new_addr & ~PAGE_MASK)
98202 goto out;
98203
98204- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98205+#ifdef CONFIG_PAX_SEGMEXEC
98206+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98207+ pax_task_size = SEGMEXEC_TASK_SIZE;
98208+#endif
98209+
98210+ pax_task_size -= PAGE_SIZE;
98211+
98212+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98213 goto out;
98214
98215 /* Check if the location we're moving into overlaps the
98216 * old location at all, and fail if it does.
98217 */
98218- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98219- goto out;
98220-
98221- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98222+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98223 goto out;
98224
98225 ret = do_munmap(mm, new_addr, new_len);
98226@@ -483,6 +499,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98227 unsigned long ret = -EINVAL;
98228 unsigned long charged = 0;
98229 bool locked = false;
98230+ unsigned long pax_task_size = TASK_SIZE;
98231
98232 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98233 return ret;
98234@@ -504,6 +521,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98235 if (!new_len)
98236 return ret;
98237
98238+#ifdef CONFIG_PAX_SEGMEXEC
98239+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98240+ pax_task_size = SEGMEXEC_TASK_SIZE;
98241+#endif
98242+
98243+ pax_task_size -= PAGE_SIZE;
98244+
98245+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98246+ old_len > pax_task_size || addr > pax_task_size-old_len)
98247+ return ret;
98248+
98249 down_write(&current->mm->mmap_sem);
98250
98251 if (flags & MREMAP_FIXED) {
98252@@ -554,6 +582,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98253 new_addr = addr;
98254 }
98255 ret = addr;
98256+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98257 goto out;
98258 }
98259 }
98260@@ -577,7 +606,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98261 goto out;
98262 }
98263
98264+ map_flags = vma->vm_flags;
98265 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
98266+ if (!(ret & ~PAGE_MASK)) {
98267+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98268+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98269+ }
98270 }
98271 out:
98272 if (ret & ~PAGE_MASK)
98273diff --git a/mm/nommu.c b/mm/nommu.c
98274index ae5baae..cbb2ed5 100644
98275--- a/mm/nommu.c
98276+++ b/mm/nommu.c
98277@@ -71,7 +71,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98278 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98279 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98280 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98281-int heap_stack_gap = 0;
98282
98283 atomic_long_t mmap_pages_allocated;
98284
98285@@ -858,15 +857,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98286 EXPORT_SYMBOL(find_vma);
98287
98288 /*
98289- * find a VMA
98290- * - we don't extend stack VMAs under NOMMU conditions
98291- */
98292-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98293-{
98294- return find_vma(mm, addr);
98295-}
98296-
98297-/*
98298 * expand a stack to a given address
98299 * - not supported under NOMMU conditions
98300 */
98301@@ -1560,6 +1550,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98302
98303 /* most fields are the same, copy all, and then fixup */
98304 *new = *vma;
98305+ INIT_LIST_HEAD(&new->anon_vma_chain);
98306 *region = *vma->vm_region;
98307 new->vm_region = region;
98308
98309@@ -1990,8 +1981,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
98310 }
98311 EXPORT_SYMBOL(generic_file_remap_pages);
98312
98313-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98314- unsigned long addr, void *buf, int len, int write)
98315+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98316+ unsigned long addr, void *buf, size_t len, int write)
98317 {
98318 struct vm_area_struct *vma;
98319
98320@@ -2032,8 +2023,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98321 *
98322 * The caller must hold a reference on @mm.
98323 */
98324-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98325- void *buf, int len, int write)
98326+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98327+ void *buf, size_t len, int write)
98328 {
98329 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98330 }
98331@@ -2042,7 +2033,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98332 * Access another process' address space.
98333 * - source/target buffer must be kernel space
98334 */
98335-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
98336+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
98337 {
98338 struct mm_struct *mm;
98339
98340diff --git a/mm/page-writeback.c b/mm/page-writeback.c
98341index 6f43352..e44bf41 100644
98342--- a/mm/page-writeback.c
98343+++ b/mm/page-writeback.c
98344@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
98345 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
98346 * - the bdi dirty thresh drops quickly due to change of JBOD workload
98347 */
98348-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
98349+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
98350 unsigned long thresh,
98351 unsigned long bg_thresh,
98352 unsigned long dirty,
98353diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98354index 8bbef06..a8d1989 100644
98355--- a/mm/page_alloc.c
98356+++ b/mm/page_alloc.c
98357@@ -60,6 +60,7 @@
98358 #include <linux/hugetlb.h>
98359 #include <linux/sched/rt.h>
98360 #include <linux/page_owner.h>
98361+#include <linux/random.h>
98362
98363 #include <asm/sections.h>
98364 #include <asm/tlbflush.h>
98365@@ -358,7 +359,7 @@ out:
98366 * This usage means that zero-order pages may not be compound.
98367 */
98368
98369-static void free_compound_page(struct page *page)
98370+void free_compound_page(struct page *page)
98371 {
98372 __free_pages_ok(page, compound_order(page));
98373 }
98374@@ -511,7 +512,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
98375 __mod_zone_freepage_state(zone, (1 << order), migratetype);
98376 }
98377 #else
98378-struct page_ext_operations debug_guardpage_ops = { NULL, };
98379+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
98380 static inline void set_page_guard(struct zone *zone, struct page *page,
98381 unsigned int order, int migratetype) {}
98382 static inline void clear_page_guard(struct zone *zone, struct page *page,
98383@@ -802,6 +803,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98384 int i;
98385 int bad = 0;
98386
98387+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98388+ unsigned long index = 1UL << order;
98389+#endif
98390+
98391 VM_BUG_ON_PAGE(PageTail(page), page);
98392 VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
98393
98394@@ -823,6 +828,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98395 debug_check_no_obj_freed(page_address(page),
98396 PAGE_SIZE << order);
98397 }
98398+
98399+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98400+ for (; index; --index)
98401+ sanitize_highpage(page + index - 1);
98402+#endif
98403+
98404 arch_free_page(page, order);
98405 kernel_map_pages(page, 1 << order, 0);
98406
98407@@ -846,6 +857,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98408 local_irq_restore(flags);
98409 }
98410
98411+#ifdef CONFIG_PAX_LATENT_ENTROPY
98412+bool __meminitdata extra_latent_entropy;
98413+
98414+static int __init setup_pax_extra_latent_entropy(char *str)
98415+{
98416+ extra_latent_entropy = true;
98417+ return 0;
98418+}
98419+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
98420+
98421+volatile u64 latent_entropy __latent_entropy;
98422+EXPORT_SYMBOL(latent_entropy);
98423+#endif
98424+
98425 void __init __free_pages_bootmem(struct page *page, unsigned int order)
98426 {
98427 unsigned int nr_pages = 1 << order;
98428@@ -861,6 +886,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
98429 __ClearPageReserved(p);
98430 set_page_count(p, 0);
98431
98432+#ifdef CONFIG_PAX_LATENT_ENTROPY
98433+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
98434+ u64 hash = 0;
98435+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
98436+ const u64 *data = lowmem_page_address(page);
98437+
98438+ for (index = 0; index < end; index++)
98439+ hash ^= hash + data[index];
98440+ latent_entropy ^= hash;
98441+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
98442+ }
98443+#endif
98444+
98445 page_zone(page)->managed_pages += nr_pages;
98446 set_page_refcounted(page);
98447 __free_pages(page, order);
98448@@ -986,8 +1024,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
98449 arch_alloc_page(page, order);
98450 kernel_map_pages(page, 1 << order, 1);
98451
98452+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98453 if (gfp_flags & __GFP_ZERO)
98454 prep_zero_page(page, order, gfp_flags);
98455+#endif
98456
98457 if (order && (gfp_flags & __GFP_COMP))
98458 prep_compound_page(page, order);
98459@@ -1700,7 +1740,7 @@ again:
98460 }
98461
98462 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
98463- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98464+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98465 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
98466 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98467
98468@@ -2021,7 +2061,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
98469 do {
98470 mod_zone_page_state(zone, NR_ALLOC_BATCH,
98471 high_wmark_pages(zone) - low_wmark_pages(zone) -
98472- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98473+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98474 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98475 } while (zone++ != preferred_zone);
98476 }
98477@@ -5781,7 +5821,7 @@ static void __setup_per_zone_wmarks(void)
98478
98479 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
98480 high_wmark_pages(zone) - low_wmark_pages(zone) -
98481- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98482+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98483
98484 setup_zone_migrate_reserve(zone);
98485 spin_unlock_irqrestore(&zone->lock, flags);
98486diff --git a/mm/percpu.c b/mm/percpu.c
98487index d39e2f4..de5f4b4 100644
98488--- a/mm/percpu.c
98489+++ b/mm/percpu.c
98490@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98491 static unsigned int pcpu_high_unit_cpu __read_mostly;
98492
98493 /* the address of the first chunk which starts with the kernel static area */
98494-void *pcpu_base_addr __read_mostly;
98495+void *pcpu_base_addr __read_only;
98496 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98497
98498 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98499diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
98500index 5077afc..846c9ef 100644
98501--- a/mm/process_vm_access.c
98502+++ b/mm/process_vm_access.c
98503@@ -13,6 +13,7 @@
98504 #include <linux/uio.h>
98505 #include <linux/sched.h>
98506 #include <linux/highmem.h>
98507+#include <linux/security.h>
98508 #include <linux/ptrace.h>
98509 #include <linux/slab.h>
98510 #include <linux/syscalls.h>
98511@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98512 ssize_t iov_len;
98513 size_t total_len = iov_iter_count(iter);
98514
98515+ return -ENOSYS; // PaX: until properly audited
98516+
98517 /*
98518 * Work out how many pages of struct pages we're going to need
98519 * when eventually calling get_user_pages
98520 */
98521 for (i = 0; i < riovcnt; i++) {
98522 iov_len = rvec[i].iov_len;
98523- if (iov_len > 0) {
98524- nr_pages_iov = ((unsigned long)rvec[i].iov_base
98525- + iov_len)
98526- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
98527- / PAGE_SIZE + 1;
98528- nr_pages = max(nr_pages, nr_pages_iov);
98529- }
98530+ if (iov_len <= 0)
98531+ continue;
98532+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
98533+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
98534+ nr_pages = max(nr_pages, nr_pages_iov);
98535 }
98536
98537 if (nr_pages == 0)
98538@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98539 goto free_proc_pages;
98540 }
98541
98542+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
98543+ rc = -EPERM;
98544+ goto put_task_struct;
98545+ }
98546+
98547 mm = mm_access(task, PTRACE_MODE_ATTACH);
98548 if (!mm || IS_ERR(mm)) {
98549 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
98550diff --git a/mm/rmap.c b/mm/rmap.c
98551index 71cd5bd..e259089 100644
98552--- a/mm/rmap.c
98553+++ b/mm/rmap.c
98554@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98555 struct anon_vma *anon_vma = vma->anon_vma;
98556 struct anon_vma_chain *avc;
98557
98558+#ifdef CONFIG_PAX_SEGMEXEC
98559+ struct anon_vma_chain *avc_m = NULL;
98560+#endif
98561+
98562 might_sleep();
98563 if (unlikely(!anon_vma)) {
98564 struct mm_struct *mm = vma->vm_mm;
98565@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98566 if (!avc)
98567 goto out_enomem;
98568
98569+#ifdef CONFIG_PAX_SEGMEXEC
98570+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
98571+ if (!avc_m)
98572+ goto out_enomem_free_avc;
98573+#endif
98574+
98575 anon_vma = find_mergeable_anon_vma(vma);
98576 allocated = NULL;
98577 if (!anon_vma) {
98578@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98579 /* page_table_lock to protect against threads */
98580 spin_lock(&mm->page_table_lock);
98581 if (likely(!vma->anon_vma)) {
98582+
98583+#ifdef CONFIG_PAX_SEGMEXEC
98584+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98585+
98586+ if (vma_m) {
98587+ BUG_ON(vma_m->anon_vma);
98588+ vma_m->anon_vma = anon_vma;
98589+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
98590+ anon_vma->degree++;
98591+ avc_m = NULL;
98592+ }
98593+#endif
98594+
98595 vma->anon_vma = anon_vma;
98596 anon_vma_chain_link(vma, avc, anon_vma);
98597 /* vma reference or self-parent link for new root */
98598@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98599
98600 if (unlikely(allocated))
98601 put_anon_vma(allocated);
98602+
98603+#ifdef CONFIG_PAX_SEGMEXEC
98604+ if (unlikely(avc_m))
98605+ anon_vma_chain_free(avc_m);
98606+#endif
98607+
98608 if (unlikely(avc))
98609 anon_vma_chain_free(avc);
98610 }
98611 return 0;
98612
98613 out_enomem_free_avc:
98614+
98615+#ifdef CONFIG_PAX_SEGMEXEC
98616+ if (avc_m)
98617+ anon_vma_chain_free(avc_m);
98618+#endif
98619+
98620 anon_vma_chain_free(avc);
98621 out_enomem:
98622 return -ENOMEM;
98623@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
98624 * good chance of avoiding scanning the whole hierarchy when it searches where
98625 * page is mapped.
98626 */
98627-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98628+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
98629 {
98630 struct anon_vma_chain *avc, *pavc;
98631 struct anon_vma *root = NULL;
98632@@ -296,7 +331,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98633 * the corresponding VMA in the parent process is attached to.
98634 * Returns 0 on success, non-zero on failure.
98635 */
98636-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
98637+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
98638 {
98639 struct anon_vma_chain *avc;
98640 struct anon_vma *anon_vma;
98641@@ -416,8 +451,10 @@ static void anon_vma_ctor(void *data)
98642 void __init anon_vma_init(void)
98643 {
98644 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
98645- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
98646- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
98647+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
98648+ anon_vma_ctor);
98649+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
98650+ SLAB_PANIC|SLAB_NO_SANITIZE);
98651 }
98652
98653 /*
98654diff --git a/mm/shmem.c b/mm/shmem.c
98655index 993e6ba..a962ba3 100644
98656--- a/mm/shmem.c
98657+++ b/mm/shmem.c
98658@@ -33,7 +33,7 @@
98659 #include <linux/swap.h>
98660 #include <linux/aio.h>
98661
98662-static struct vfsmount *shm_mnt;
98663+struct vfsmount *shm_mnt;
98664
98665 #ifdef CONFIG_SHMEM
98666 /*
98667@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
98668 #define BOGO_DIRENT_SIZE 20
98669
98670 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98671-#define SHORT_SYMLINK_LEN 128
98672+#define SHORT_SYMLINK_LEN 64
98673
98674 /*
98675 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98676@@ -2558,6 +2558,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
98677 static int shmem_xattr_validate(const char *name)
98678 {
98679 struct { const char *prefix; size_t len; } arr[] = {
98680+
98681+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98682+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
98683+#endif
98684+
98685 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
98686 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
98687 };
98688@@ -2613,6 +2618,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
98689 if (err)
98690 return err;
98691
98692+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98693+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
98694+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
98695+ return -EOPNOTSUPP;
98696+ if (size > 8)
98697+ return -EINVAL;
98698+ }
98699+#endif
98700+
98701 return simple_xattr_set(&info->xattrs, name, value, size, flags);
98702 }
98703
98704@@ -2996,8 +3010,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98705 int err = -ENOMEM;
98706
98707 /* Round up to L1_CACHE_BYTES to resist false sharing */
98708- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98709- L1_CACHE_BYTES), GFP_KERNEL);
98710+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98711 if (!sbinfo)
98712 return -ENOMEM;
98713
98714diff --git a/mm/slab.c b/mm/slab.c
98715index 65b5dcb..d53d866 100644
98716--- a/mm/slab.c
98717+++ b/mm/slab.c
98718@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98719 if ((x)->max_freeable < i) \
98720 (x)->max_freeable = i; \
98721 } while (0)
98722-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98723-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98724-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98725-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98726+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98727+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98728+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98729+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98730+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
98731+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
98732 #else
98733 #define STATS_INC_ACTIVE(x) do { } while (0)
98734 #define STATS_DEC_ACTIVE(x) do { } while (0)
98735@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98736 #define STATS_INC_ALLOCMISS(x) do { } while (0)
98737 #define STATS_INC_FREEHIT(x) do { } while (0)
98738 #define STATS_INC_FREEMISS(x) do { } while (0)
98739+#define STATS_INC_SANITIZED(x) do { } while (0)
98740+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
98741 #endif
98742
98743 #if DEBUG
98744@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
98745 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98746 */
98747 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98748- const struct page *page, void *obj)
98749+ const struct page *page, const void *obj)
98750 {
98751 u32 offset = (obj - page->s_mem);
98752 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98753@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
98754 * structures first. Without this, further allocations will bug.
98755 */
98756 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
98757- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
98758+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
98759 slab_state = PARTIAL_NODE;
98760
98761 slab_early_init = 0;
98762@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
98763
98764 cachep = find_mergeable(size, align, flags, name, ctor);
98765 if (cachep) {
98766- cachep->refcount++;
98767+ atomic_inc(&cachep->refcount);
98768
98769 /*
98770 * Adjust the object sizes so that we clear
98771@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
98772 struct array_cache *ac = cpu_cache_get(cachep);
98773
98774 check_irq_off();
98775+
98776+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98777+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
98778+ STATS_INC_NOT_SANITIZED(cachep);
98779+ else {
98780+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
98781+
98782+ if (cachep->ctor)
98783+ cachep->ctor(objp);
98784+
98785+ STATS_INC_SANITIZED(cachep);
98786+ }
98787+#endif
98788+
98789 kmemleak_free_recursive(objp, cachep->flags);
98790 objp = cache_free_debugcheck(cachep, objp, caller);
98791
98792@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
98793 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
98794 }
98795
98796-void *__kmalloc_node(size_t size, gfp_t flags, int node)
98797+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
98798 {
98799 return __do_kmalloc_node(size, flags, node, _RET_IP_);
98800 }
98801@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
98802 * @flags: the type of memory to allocate (see kmalloc).
98803 * @caller: function caller for debug tracking of the caller
98804 */
98805-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
98806+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
98807 unsigned long caller)
98808 {
98809 struct kmem_cache *cachep;
98810@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
98811
98812 if (unlikely(ZERO_OR_NULL_PTR(objp)))
98813 return;
98814+ VM_BUG_ON(!virt_addr_valid(objp));
98815 local_irq_save(flags);
98816 kfree_debugcheck(objp);
98817 c = virt_to_cache(objp);
98818@@ -3984,14 +4003,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
98819 }
98820 /* cpu stats */
98821 {
98822- unsigned long allochit = atomic_read(&cachep->allochit);
98823- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
98824- unsigned long freehit = atomic_read(&cachep->freehit);
98825- unsigned long freemiss = atomic_read(&cachep->freemiss);
98826+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
98827+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
98828+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
98829+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
98830
98831 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
98832 allochit, allocmiss, freehit, freemiss);
98833 }
98834+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98835+ {
98836+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
98837+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
98838+
98839+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
98840+ }
98841+#endif
98842 #endif
98843 }
98844
98845@@ -4199,13 +4226,69 @@ static const struct file_operations proc_slabstats_operations = {
98846 static int __init slab_proc_init(void)
98847 {
98848 #ifdef CONFIG_DEBUG_SLAB_LEAK
98849- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
98850+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
98851 #endif
98852 return 0;
98853 }
98854 module_init(slab_proc_init);
98855 #endif
98856
98857+bool is_usercopy_object(const void *ptr)
98858+{
98859+ struct page *page;
98860+ struct kmem_cache *cachep;
98861+
98862+ if (ZERO_OR_NULL_PTR(ptr))
98863+ return false;
98864+
98865+ if (!slab_is_available())
98866+ return false;
98867+
98868+ if (!virt_addr_valid(ptr))
98869+ return false;
98870+
98871+ page = virt_to_head_page(ptr);
98872+
98873+ if (!PageSlab(page))
98874+ return false;
98875+
98876+ cachep = page->slab_cache;
98877+ return cachep->flags & SLAB_USERCOPY;
98878+}
98879+
98880+#ifdef CONFIG_PAX_USERCOPY
98881+const char *check_heap_object(const void *ptr, unsigned long n)
98882+{
98883+ struct page *page;
98884+ struct kmem_cache *cachep;
98885+ unsigned int objnr;
98886+ unsigned long offset;
98887+
98888+ if (ZERO_OR_NULL_PTR(ptr))
98889+ return "<null>";
98890+
98891+ if (!virt_addr_valid(ptr))
98892+ return NULL;
98893+
98894+ page = virt_to_head_page(ptr);
98895+
98896+ if (!PageSlab(page))
98897+ return NULL;
98898+
98899+ cachep = page->slab_cache;
98900+ if (!(cachep->flags & SLAB_USERCOPY))
98901+ return cachep->name;
98902+
98903+ objnr = obj_to_index(cachep, page, ptr);
98904+ BUG_ON(objnr >= cachep->num);
98905+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
98906+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
98907+ return NULL;
98908+
98909+ return cachep->name;
98910+}
98911+#endif
98912+
98913 /**
98914 * ksize - get the actual amount of memory allocated for a given object
98915 * @objp: Pointer to the object
98916diff --git a/mm/slab.h b/mm/slab.h
98917index 1cf40054..10ad563 100644
98918--- a/mm/slab.h
98919+++ b/mm/slab.h
98920@@ -22,7 +22,7 @@ struct kmem_cache {
98921 unsigned int align; /* Alignment as calculated */
98922 unsigned long flags; /* Active flags on the slab */
98923 const char *name; /* Slab name for sysfs */
98924- int refcount; /* Use counter */
98925+ atomic_t refcount; /* Use counter */
98926 void (*ctor)(void *); /* Called on object slot creation */
98927 struct list_head list; /* List of all slab caches on the system */
98928 };
98929@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
98930 /* The slab cache that manages slab cache information */
98931 extern struct kmem_cache *kmem_cache;
98932
98933+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98934+#ifdef CONFIG_X86_64
98935+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
98936+#else
98937+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
98938+#endif
98939+enum pax_sanitize_mode {
98940+ PAX_SANITIZE_SLAB_OFF = 0,
98941+ PAX_SANITIZE_SLAB_FAST,
98942+ PAX_SANITIZE_SLAB_FULL,
98943+};
98944+extern enum pax_sanitize_mode pax_sanitize_slab;
98945+#endif
98946+
98947 unsigned long calculate_alignment(unsigned long flags,
98948 unsigned long align, unsigned long size);
98949
98950@@ -116,7 +130,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
98951
98952 /* Legal flag mask for kmem_cache_create(), for various configurations */
98953 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
98954- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
98955+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
98956+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
98957
98958 #if defined(CONFIG_DEBUG_SLAB)
98959 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
98960@@ -300,6 +315,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
98961 return s;
98962
98963 page = virt_to_head_page(x);
98964+
98965+ BUG_ON(!PageSlab(page));
98966+
98967 cachep = page->slab_cache;
98968 if (slab_equal_or_root(cachep, s))
98969 return cachep;
98970diff --git a/mm/slab_common.c b/mm/slab_common.c
98971index e03dd6f..c475838 100644
98972--- a/mm/slab_common.c
98973+++ b/mm/slab_common.c
98974@@ -25,11 +25,35 @@
98975
98976 #include "slab.h"
98977
98978-enum slab_state slab_state;
98979+enum slab_state slab_state __read_only;
98980 LIST_HEAD(slab_caches);
98981 DEFINE_MUTEX(slab_mutex);
98982 struct kmem_cache *kmem_cache;
98983
98984+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98985+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
98986+static int __init pax_sanitize_slab_setup(char *str)
98987+{
98988+ if (!str)
98989+ return 0;
98990+
98991+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
98992+ pr_info("PaX slab sanitization: %s\n", "disabled");
98993+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
98994+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
98995+ pr_info("PaX slab sanitization: %s\n", "fast");
98996+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
98997+ } else if (!strcmp(str, "full")) {
98998+ pr_info("PaX slab sanitization: %s\n", "full");
98999+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99000+ } else
99001+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99002+
99003+ return 0;
99004+}
99005+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99006+#endif
99007+
99008 /*
99009 * Set of flags that will prevent slab merging
99010 */
99011@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
99012 * Merge control. If this is set then no merging of slab caches will occur.
99013 * (Could be removed. This was introduced to pacify the merge skeptics.)
99014 */
99015-static int slab_nomerge;
99016+static int slab_nomerge = 1;
99017
99018 static int __init setup_slab_nomerge(char *str)
99019 {
99020@@ -218,7 +242,7 @@ int slab_unmergeable(struct kmem_cache *s)
99021 /*
99022 * We may have set a slab to be unmergeable during bootstrap.
99023 */
99024- if (s->refcount < 0)
99025+ if (atomic_read(&s->refcount) < 0)
99026 return 1;
99027
99028 return 0;
99029@@ -322,7 +346,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
99030 if (err)
99031 goto out_free_cache;
99032
99033- s->refcount = 1;
99034+ atomic_set(&s->refcount, 1);
99035 list_add(&s->list, &slab_caches);
99036 out:
99037 if (err)
99038@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99039 */
99040 flags &= CACHE_CREATE_MASK;
99041
99042+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99043+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99044+ flags |= SLAB_NO_SANITIZE;
99045+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99046+ flags &= ~SLAB_NO_SANITIZE;
99047+#endif
99048+
99049 s = __kmem_cache_alias(name, size, align, flags, ctor);
99050 if (s)
99051 goto out_unlock;
99052@@ -505,8 +536,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99053
99054 mutex_lock(&slab_mutex);
99055
99056- s->refcount--;
99057- if (s->refcount)
99058+ if (!atomic_dec_and_test(&s->refcount))
99059 goto out_unlock;
99060
99061 if (memcg_cleanup_cache_params(s) != 0)
99062@@ -526,7 +556,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99063 rcu_barrier();
99064
99065 memcg_free_cache_params(s);
99066-#ifdef SLAB_SUPPORTS_SYSFS
99067+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99068 sysfs_slab_remove(s);
99069 #else
99070 slab_kmem_cache_release(s);
99071@@ -582,7 +612,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99072 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99073 name, size, err);
99074
99075- s->refcount = -1; /* Exempt from merging for now */
99076+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99077 }
99078
99079 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99080@@ -595,7 +625,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99081
99082 create_boot_cache(s, name, size, flags);
99083 list_add(&s->list, &slab_caches);
99084- s->refcount = 1;
99085+ atomic_set(&s->refcount, 1);
99086 return s;
99087 }
99088
99089@@ -607,6 +637,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99090 EXPORT_SYMBOL(kmalloc_dma_caches);
99091 #endif
99092
99093+#ifdef CONFIG_PAX_USERCOPY_SLABS
99094+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99095+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99096+#endif
99097+
99098 /*
99099 * Conversion table for small slabs sizes / 8 to the index in the
99100 * kmalloc array. This is necessary for slabs < 192 since we have non power
99101@@ -671,6 +706,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99102 return kmalloc_dma_caches[index];
99103
99104 #endif
99105+
99106+#ifdef CONFIG_PAX_USERCOPY_SLABS
99107+ if (unlikely((flags & GFP_USERCOPY)))
99108+ return kmalloc_usercopy_caches[index];
99109+
99110+#endif
99111+
99112 return kmalloc_caches[index];
99113 }
99114
99115@@ -727,7 +769,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99116 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99117 if (!kmalloc_caches[i]) {
99118 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99119- 1 << i, flags);
99120+ 1 << i, SLAB_USERCOPY | flags);
99121 }
99122
99123 /*
99124@@ -736,10 +778,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99125 * earlier power of two caches
99126 */
99127 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99128- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99129+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99130
99131 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
99132- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
99133+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
99134 }
99135
99136 /* Kmalloc array is now usable */
99137@@ -772,6 +814,23 @@ void __init create_kmalloc_caches(unsigned long flags)
99138 }
99139 }
99140 #endif
99141+
99142+#ifdef CONFIG_PAX_USERCOPY_SLABS
99143+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
99144+ struct kmem_cache *s = kmalloc_caches[i];
99145+
99146+ if (s) {
99147+ int size = kmalloc_size(i);
99148+ char *n = kasprintf(GFP_NOWAIT,
99149+ "usercopy-kmalloc-%d", size);
99150+
99151+ BUG_ON(!n);
99152+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
99153+ size, SLAB_USERCOPY | flags);
99154+ }
99155+ }
99156+#endif
99157+
99158 }
99159 #endif /* !CONFIG_SLOB */
99160
99161@@ -830,6 +889,9 @@ static void print_slabinfo_header(struct seq_file *m)
99162 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
99163 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
99164 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
99165+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99166+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
99167+#endif
99168 #endif
99169 seq_putc(m, '\n');
99170 }
99171@@ -964,7 +1026,7 @@ static int __init slab_proc_init(void)
99172 module_init(slab_proc_init);
99173 #endif /* CONFIG_SLABINFO */
99174
99175-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
99176+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
99177 gfp_t flags)
99178 {
99179 void *ret;
99180diff --git a/mm/slob.c b/mm/slob.c
99181index 96a8620..46b3f12 100644
99182--- a/mm/slob.c
99183+++ b/mm/slob.c
99184@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
99185 /*
99186 * Return the size of a slob block.
99187 */
99188-static slobidx_t slob_units(slob_t *s)
99189+static slobidx_t slob_units(const slob_t *s)
99190 {
99191 if (s->units > 0)
99192 return s->units;
99193@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
99194 /*
99195 * Return the next free slob block pointer after this one.
99196 */
99197-static slob_t *slob_next(slob_t *s)
99198+static slob_t *slob_next(const slob_t *s)
99199 {
99200 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
99201 slobidx_t next;
99202@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
99203 /*
99204 * Returns true if s is the last free block in its page.
99205 */
99206-static int slob_last(slob_t *s)
99207+static int slob_last(const slob_t *s)
99208 {
99209 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
99210 }
99211
99212-static void *slob_new_pages(gfp_t gfp, int order, int node)
99213+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
99214 {
99215- void *page;
99216+ struct page *page;
99217
99218 #ifdef CONFIG_NUMA
99219 if (node != NUMA_NO_NODE)
99220@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
99221 if (!page)
99222 return NULL;
99223
99224- return page_address(page);
99225+ __SetPageSlab(page);
99226+ return page;
99227 }
99228
99229-static void slob_free_pages(void *b, int order)
99230+static void slob_free_pages(struct page *sp, int order)
99231 {
99232 if (current->reclaim_state)
99233 current->reclaim_state->reclaimed_slab += 1 << order;
99234- free_pages((unsigned long)b, order);
99235+ __ClearPageSlab(sp);
99236+ page_mapcount_reset(sp);
99237+ sp->private = 0;
99238+ __free_pages(sp, order);
99239 }
99240
99241 /*
99242@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99243
99244 /* Not enough space: must allocate a new page */
99245 if (!b) {
99246- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99247- if (!b)
99248+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99249+ if (!sp)
99250 return NULL;
99251- sp = virt_to_page(b);
99252- __SetPageSlab(sp);
99253+ b = page_address(sp);
99254
99255 spin_lock_irqsave(&slob_lock, flags);
99256 sp->units = SLOB_UNITS(PAGE_SIZE);
99257 sp->freelist = b;
99258+ sp->private = 0;
99259 INIT_LIST_HEAD(&sp->lru);
99260 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
99261 set_slob_page_free(sp, slob_list);
99262@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99263 /*
99264 * slob_free: entry point into the slob allocator.
99265 */
99266-static void slob_free(void *block, int size)
99267+static void slob_free(struct kmem_cache *c, void *block, int size)
99268 {
99269 struct page *sp;
99270 slob_t *prev, *next, *b = (slob_t *)block;
99271@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
99272 if (slob_page_free(sp))
99273 clear_slob_page_free(sp);
99274 spin_unlock_irqrestore(&slob_lock, flags);
99275- __ClearPageSlab(sp);
99276- page_mapcount_reset(sp);
99277- slob_free_pages(b, 0);
99278+ slob_free_pages(sp, 0);
99279 return;
99280 }
99281
99282+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99283+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
99284+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
99285+#endif
99286+
99287 if (!slob_page_free(sp)) {
99288 /* This slob page is about to become partially free. Easy! */
99289 sp->units = units;
99290@@ -424,11 +431,10 @@ out:
99291 */
99292
99293 static __always_inline void *
99294-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99295+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
99296 {
99297- unsigned int *m;
99298- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99299- void *ret;
99300+ slob_t *m;
99301+ void *ret = NULL;
99302
99303 gfp &= gfp_allowed_mask;
99304
99305@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99306
99307 if (!m)
99308 return NULL;
99309- *m = size;
99310+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
99311+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
99312+ m[0].units = size;
99313+ m[1].units = align;
99314 ret = (void *)m + align;
99315
99316 trace_kmalloc_node(caller, ret,
99317 size, size + align, gfp, node);
99318 } else {
99319 unsigned int order = get_order(size);
99320+ struct page *page;
99321
99322 if (likely(order))
99323 gfp |= __GFP_COMP;
99324- ret = slob_new_pages(gfp, order, node);
99325+ page = slob_new_pages(gfp, order, node);
99326+ if (page) {
99327+ ret = page_address(page);
99328+ page->private = size;
99329+ }
99330
99331 trace_kmalloc_node(caller, ret,
99332 size, PAGE_SIZE << order, gfp, node);
99333 }
99334
99335- kmemleak_alloc(ret, size, 1, gfp);
99336 return ret;
99337 }
99338
99339-void *__kmalloc(size_t size, gfp_t gfp)
99340+static __always_inline void *
99341+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99342+{
99343+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99344+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
99345+
99346+ if (!ZERO_OR_NULL_PTR(ret))
99347+ kmemleak_alloc(ret, size, 1, gfp);
99348+ return ret;
99349+}
99350+
99351+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
99352 {
99353 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
99354 }
99355@@ -491,34 +515,112 @@ void kfree(const void *block)
99356 return;
99357 kmemleak_free(block);
99358
99359+ VM_BUG_ON(!virt_addr_valid(block));
99360 sp = virt_to_page(block);
99361- if (PageSlab(sp)) {
99362+ VM_BUG_ON(!PageSlab(sp));
99363+ if (!sp->private) {
99364 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99365- unsigned int *m = (unsigned int *)(block - align);
99366- slob_free(m, *m + align);
99367- } else
99368+ slob_t *m = (slob_t *)(block - align);
99369+ slob_free(NULL, m, m[0].units + align);
99370+ } else {
99371+ __ClearPageSlab(sp);
99372+ page_mapcount_reset(sp);
99373+ sp->private = 0;
99374 __free_pages(sp, compound_order(sp));
99375+ }
99376 }
99377 EXPORT_SYMBOL(kfree);
99378
99379+bool is_usercopy_object(const void *ptr)
99380+{
99381+ if (!slab_is_available())
99382+ return false;
99383+
99384+ // PAX: TODO
99385+
99386+ return false;
99387+}
99388+
99389+#ifdef CONFIG_PAX_USERCOPY
99390+const char *check_heap_object(const void *ptr, unsigned long n)
99391+{
99392+ struct page *page;
99393+ const slob_t *free;
99394+ const void *base;
99395+ unsigned long flags;
99396+
99397+ if (ZERO_OR_NULL_PTR(ptr))
99398+ return "<null>";
99399+
99400+ if (!virt_addr_valid(ptr))
99401+ return NULL;
99402+
99403+ page = virt_to_head_page(ptr);
99404+ if (!PageSlab(page))
99405+ return NULL;
99406+
99407+ if (page->private) {
99408+ base = page;
99409+ if (base <= ptr && n <= page->private - (ptr - base))
99410+ return NULL;
99411+ return "<slob>";
99412+ }
99413+
99414+ /* some tricky double walking to find the chunk */
99415+ spin_lock_irqsave(&slob_lock, flags);
99416+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99417+ free = page->freelist;
99418+
99419+ while (!slob_last(free) && (void *)free <= ptr) {
99420+ base = free + slob_units(free);
99421+ free = slob_next(free);
99422+ }
99423+
99424+ while (base < (void *)free) {
99425+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99426+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99427+ int offset;
99428+
99429+ if (ptr < base + align)
99430+ break;
99431+
99432+ offset = ptr - base - align;
99433+ if (offset >= m) {
99434+ base += size;
99435+ continue;
99436+ }
99437+
99438+ if (n > m - offset)
99439+ break;
99440+
99441+ spin_unlock_irqrestore(&slob_lock, flags);
99442+ return NULL;
99443+ }
99444+
99445+ spin_unlock_irqrestore(&slob_lock, flags);
99446+ return "<slob>";
99447+}
99448+#endif
99449+
99450 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99451 size_t ksize(const void *block)
99452 {
99453 struct page *sp;
99454 int align;
99455- unsigned int *m;
99456+ slob_t *m;
99457
99458 BUG_ON(!block);
99459 if (unlikely(block == ZERO_SIZE_PTR))
99460 return 0;
99461
99462 sp = virt_to_page(block);
99463- if (unlikely(!PageSlab(sp)))
99464- return PAGE_SIZE << compound_order(sp);
99465+ VM_BUG_ON(!PageSlab(sp));
99466+ if (sp->private)
99467+ return sp->private;
99468
99469 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99470- m = (unsigned int *)(block - align);
99471- return SLOB_UNITS(*m) * SLOB_UNIT;
99472+ m = (slob_t *)(block - align);
99473+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99474 }
99475 EXPORT_SYMBOL(ksize);
99476
99477@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
99478
99479 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99480 {
99481- void *b;
99482+ void *b = NULL;
99483
99484 flags &= gfp_allowed_mask;
99485
99486 lockdep_trace_alloc(flags);
99487
99488+#ifdef CONFIG_PAX_USERCOPY_SLABS
99489+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
99490+#else
99491 if (c->size < PAGE_SIZE) {
99492 b = slob_alloc(c->size, flags, c->align, node);
99493 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99494 SLOB_UNITS(c->size) * SLOB_UNIT,
99495 flags, node);
99496 } else {
99497- b = slob_new_pages(flags, get_order(c->size), node);
99498+ struct page *sp;
99499+
99500+ sp = slob_new_pages(flags, get_order(c->size), node);
99501+ if (sp) {
99502+ b = page_address(sp);
99503+ sp->private = c->size;
99504+ }
99505 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99506 PAGE_SIZE << get_order(c->size),
99507 flags, node);
99508 }
99509+#endif
99510
99511 if (b && c->ctor)
99512 c->ctor(b);
99513@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
99514 EXPORT_SYMBOL(kmem_cache_alloc);
99515
99516 #ifdef CONFIG_NUMA
99517-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99518+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
99519 {
99520 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
99521 }
99522@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
99523 EXPORT_SYMBOL(kmem_cache_alloc_node);
99524 #endif
99525
99526-static void __kmem_cache_free(void *b, int size)
99527+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
99528 {
99529- if (size < PAGE_SIZE)
99530- slob_free(b, size);
99531+ struct page *sp;
99532+
99533+ sp = virt_to_page(b);
99534+ BUG_ON(!PageSlab(sp));
99535+ if (!sp->private)
99536+ slob_free(c, b, size);
99537 else
99538- slob_free_pages(b, get_order(size));
99539+ slob_free_pages(sp, get_order(size));
99540 }
99541
99542 static void kmem_rcu_free(struct rcu_head *head)
99543@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
99544 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
99545 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
99546
99547- __kmem_cache_free(b, slob_rcu->size);
99548+ __kmem_cache_free(NULL, b, slob_rcu->size);
99549 }
99550
99551 void kmem_cache_free(struct kmem_cache *c, void *b)
99552 {
99553+ int size = c->size;
99554+
99555+#ifdef CONFIG_PAX_USERCOPY_SLABS
99556+ if (size + c->align < PAGE_SIZE) {
99557+ size += c->align;
99558+ b -= c->align;
99559+ }
99560+#endif
99561+
99562 kmemleak_free_recursive(b, c->flags);
99563 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99564 struct slob_rcu *slob_rcu;
99565- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99566- slob_rcu->size = c->size;
99567+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99568+ slob_rcu->size = size;
99569 call_rcu(&slob_rcu->head, kmem_rcu_free);
99570 } else {
99571- __kmem_cache_free(b, c->size);
99572+ __kmem_cache_free(c, b, size);
99573 }
99574
99575+#ifdef CONFIG_PAX_USERCOPY_SLABS
99576+ trace_kfree(_RET_IP_, b);
99577+#else
99578 trace_kmem_cache_free(_RET_IP_, b);
99579+#endif
99580+
99581 }
99582 EXPORT_SYMBOL(kmem_cache_free);
99583
99584diff --git a/mm/slub.c b/mm/slub.c
99585index fe376fe..2f5757c 100644
99586--- a/mm/slub.c
99587+++ b/mm/slub.c
99588@@ -197,7 +197,7 @@ struct track {
99589
99590 enum track_item { TRACK_ALLOC, TRACK_FREE };
99591
99592-#ifdef CONFIG_SYSFS
99593+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99594 static int sysfs_slab_add(struct kmem_cache *);
99595 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99596 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
99597@@ -535,7 +535,7 @@ static void print_track(const char *s, struct track *t)
99598 if (!t->addr)
99599 return;
99600
99601- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99602+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99603 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99604 #ifdef CONFIG_STACKTRACE
99605 {
99606@@ -2652,6 +2652,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
99607
99608 slab_free_hook(s, x);
99609
99610+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99611+ if (!(s->flags & SLAB_NO_SANITIZE)) {
99612+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
99613+ if (s->ctor)
99614+ s->ctor(x);
99615+ }
99616+#endif
99617+
99618 redo:
99619 /*
99620 * Determine the currently cpus per cpu slab.
99621@@ -2989,6 +2997,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
99622 s->inuse = size;
99623
99624 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
99625+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99626+ (!(flags & SLAB_NO_SANITIZE)) ||
99627+#endif
99628 s->ctor)) {
99629 /*
99630 * Relocate free pointer after the object if it is not
99631@@ -3243,7 +3254,7 @@ static int __init setup_slub_min_objects(char *str)
99632
99633 __setup("slub_min_objects=", setup_slub_min_objects);
99634
99635-void *__kmalloc(size_t size, gfp_t flags)
99636+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
99637 {
99638 struct kmem_cache *s;
99639 void *ret;
99640@@ -3279,7 +3290,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
99641 return ptr;
99642 }
99643
99644-void *__kmalloc_node(size_t size, gfp_t flags, int node)
99645+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
99646 {
99647 struct kmem_cache *s;
99648 void *ret;
99649@@ -3308,6 +3319,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99650 EXPORT_SYMBOL(__kmalloc_node);
99651 #endif
99652
99653+bool is_usercopy_object(const void *ptr)
99654+{
99655+ struct page *page;
99656+ struct kmem_cache *s;
99657+
99658+ if (ZERO_OR_NULL_PTR(ptr))
99659+ return false;
99660+
99661+ if (!slab_is_available())
99662+ return false;
99663+
99664+ if (!virt_addr_valid(ptr))
99665+ return false;
99666+
99667+ page = virt_to_head_page(ptr);
99668+
99669+ if (!PageSlab(page))
99670+ return false;
99671+
99672+ s = page->slab_cache;
99673+ return s->flags & SLAB_USERCOPY;
99674+}
99675+
99676+#ifdef CONFIG_PAX_USERCOPY
99677+const char *check_heap_object(const void *ptr, unsigned long n)
99678+{
99679+ struct page *page;
99680+ struct kmem_cache *s;
99681+ unsigned long offset;
99682+
99683+ if (ZERO_OR_NULL_PTR(ptr))
99684+ return "<null>";
99685+
99686+ if (!virt_addr_valid(ptr))
99687+ return NULL;
99688+
99689+ page = virt_to_head_page(ptr);
99690+
99691+ if (!PageSlab(page))
99692+ return NULL;
99693+
99694+ s = page->slab_cache;
99695+ if (!(s->flags & SLAB_USERCOPY))
99696+ return s->name;
99697+
99698+ offset = (ptr - page_address(page)) % s->size;
99699+ if (offset <= s->object_size && n <= s->object_size - offset)
99700+ return NULL;
99701+
99702+ return s->name;
99703+}
99704+#endif
99705+
99706 size_t ksize(const void *object)
99707 {
99708 struct page *page;
99709@@ -3336,6 +3400,7 @@ void kfree(const void *x)
99710 if (unlikely(ZERO_OR_NULL_PTR(x)))
99711 return;
99712
99713+ VM_BUG_ON(!virt_addr_valid(x));
99714 page = virt_to_head_page(x);
99715 if (unlikely(!PageSlab(page))) {
99716 BUG_ON(!PageCompound(page));
99717@@ -3631,7 +3696,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99718 int i;
99719 struct kmem_cache *c;
99720
99721- s->refcount++;
99722+ atomic_inc(&s->refcount);
99723
99724 /*
99725 * Adjust the object sizes so that we clear
99726@@ -3650,7 +3715,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99727 }
99728
99729 if (sysfs_slab_alias(s, name)) {
99730- s->refcount--;
99731+ atomic_dec(&s->refcount);
99732 s = NULL;
99733 }
99734 }
99735@@ -3767,7 +3832,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
99736 }
99737 #endif
99738
99739-#ifdef CONFIG_SYSFS
99740+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99741 static int count_inuse(struct page *page)
99742 {
99743 return page->inuse;
99744@@ -4048,7 +4113,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
99745 len += sprintf(buf + len, "%7ld ", l->count);
99746
99747 if (l->addr)
99748+#ifdef CONFIG_GRKERNSEC_HIDESYM
99749+ len += sprintf(buf + len, "%pS", NULL);
99750+#else
99751 len += sprintf(buf + len, "%pS", (void *)l->addr);
99752+#endif
99753 else
99754 len += sprintf(buf + len, "<not-available>");
99755
99756@@ -4150,12 +4219,12 @@ static void __init resiliency_test(void)
99757 validate_slab_cache(kmalloc_caches[9]);
99758 }
99759 #else
99760-#ifdef CONFIG_SYSFS
99761+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99762 static void resiliency_test(void) {};
99763 #endif
99764 #endif
99765
99766-#ifdef CONFIG_SYSFS
99767+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99768 enum slab_stat_type {
99769 SL_ALL, /* All slabs */
99770 SL_PARTIAL, /* Only partially allocated slabs */
99771@@ -4392,13 +4461,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
99772 {
99773 if (!s->ctor)
99774 return 0;
99775+#ifdef CONFIG_GRKERNSEC_HIDESYM
99776+ return sprintf(buf, "%pS\n", NULL);
99777+#else
99778 return sprintf(buf, "%pS\n", s->ctor);
99779+#endif
99780 }
99781 SLAB_ATTR_RO(ctor);
99782
99783 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99784 {
99785- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
99786+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
99787 }
99788 SLAB_ATTR_RO(aliases);
99789
99790@@ -4486,6 +4559,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
99791 SLAB_ATTR_RO(cache_dma);
99792 #endif
99793
99794+#ifdef CONFIG_PAX_USERCOPY_SLABS
99795+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
99796+{
99797+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
99798+}
99799+SLAB_ATTR_RO(usercopy);
99800+#endif
99801+
99802+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99803+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
99804+{
99805+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
99806+}
99807+SLAB_ATTR_RO(sanitize);
99808+#endif
99809+
99810 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
99811 {
99812 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
99813@@ -4541,7 +4630,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
99814 * as well as cause other issues like converting a mergeable
99815 * cache into an umergeable one.
99816 */
99817- if (s->refcount > 1)
99818+ if (atomic_read(&s->refcount) > 1)
99819 return -EINVAL;
99820
99821 s->flags &= ~SLAB_TRACE;
99822@@ -4661,7 +4750,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
99823 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
99824 size_t length)
99825 {
99826- if (s->refcount > 1)
99827+ if (atomic_read(&s->refcount) > 1)
99828 return -EINVAL;
99829
99830 s->flags &= ~SLAB_FAILSLAB;
99831@@ -4831,6 +4920,12 @@ static struct attribute *slab_attrs[] = {
99832 #ifdef CONFIG_ZONE_DMA
99833 &cache_dma_attr.attr,
99834 #endif
99835+#ifdef CONFIG_PAX_USERCOPY_SLABS
99836+ &usercopy_attr.attr,
99837+#endif
99838+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99839+ &sanitize_attr.attr,
99840+#endif
99841 #ifdef CONFIG_NUMA
99842 &remote_node_defrag_ratio_attr.attr,
99843 #endif
99844@@ -5075,6 +5170,7 @@ static char *create_unique_id(struct kmem_cache *s)
99845 return name;
99846 }
99847
99848+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99849 static int sysfs_slab_add(struct kmem_cache *s)
99850 {
99851 int err;
99852@@ -5148,6 +5244,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
99853 kobject_del(&s->kobj);
99854 kobject_put(&s->kobj);
99855 }
99856+#endif
99857
99858 /*
99859 * Need to buffer aliases during bootup until sysfs becomes
99860@@ -5161,6 +5258,7 @@ struct saved_alias {
99861
99862 static struct saved_alias *alias_list;
99863
99864+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99865 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99866 {
99867 struct saved_alias *al;
99868@@ -5183,6 +5281,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99869 alias_list = al;
99870 return 0;
99871 }
99872+#endif
99873
99874 static int __init slab_sysfs_init(void)
99875 {
99876diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
99877index 4cba9c2..b4f9fcc 100644
99878--- a/mm/sparse-vmemmap.c
99879+++ b/mm/sparse-vmemmap.c
99880@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
99881 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
99882 if (!p)
99883 return NULL;
99884- pud_populate(&init_mm, pud, p);
99885+ pud_populate_kernel(&init_mm, pud, p);
99886 }
99887 return pud;
99888 }
99889@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
99890 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
99891 if (!p)
99892 return NULL;
99893- pgd_populate(&init_mm, pgd, p);
99894+ pgd_populate_kernel(&init_mm, pgd, p);
99895 }
99896 return pgd;
99897 }
99898diff --git a/mm/sparse.c b/mm/sparse.c
99899index d1b48b6..6e8590e 100644
99900--- a/mm/sparse.c
99901+++ b/mm/sparse.c
99902@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
99903
99904 for (i = 0; i < PAGES_PER_SECTION; i++) {
99905 if (PageHWPoison(&memmap[i])) {
99906- atomic_long_sub(1, &num_poisoned_pages);
99907+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
99908 ClearPageHWPoison(&memmap[i]);
99909 }
99910 }
99911diff --git a/mm/swap.c b/mm/swap.c
99912index 8a12b33..7068e78 100644
99913--- a/mm/swap.c
99914+++ b/mm/swap.c
99915@@ -31,6 +31,7 @@
99916 #include <linux/memcontrol.h>
99917 #include <linux/gfp.h>
99918 #include <linux/uio.h>
99919+#include <linux/hugetlb.h>
99920
99921 #include "internal.h"
99922
99923@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
99924
99925 __page_cache_release(page);
99926 dtor = get_compound_page_dtor(page);
99927+ if (!PageHuge(page))
99928+ BUG_ON(dtor != free_compound_page);
99929 (*dtor)(page);
99930 }
99931
99932diff --git a/mm/swapfile.c b/mm/swapfile.c
99933index 63f55cc..31874e6 100644
99934--- a/mm/swapfile.c
99935+++ b/mm/swapfile.c
99936@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
99937
99938 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
99939 /* Activity counter to indicate that a swapon or swapoff has occurred */
99940-static atomic_t proc_poll_event = ATOMIC_INIT(0);
99941+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
99942
99943 static inline unsigned char swap_count(unsigned char ent)
99944 {
99945@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
99946 spin_unlock(&swap_lock);
99947
99948 err = 0;
99949- atomic_inc(&proc_poll_event);
99950+ atomic_inc_unchecked(&proc_poll_event);
99951 wake_up_interruptible(&proc_poll_wait);
99952
99953 out_dput:
99954@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
99955
99956 poll_wait(file, &proc_poll_wait, wait);
99957
99958- if (seq->poll_event != atomic_read(&proc_poll_event)) {
99959- seq->poll_event = atomic_read(&proc_poll_event);
99960+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
99961+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
99962 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
99963 }
99964
99965@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
99966 return ret;
99967
99968 seq = file->private_data;
99969- seq->poll_event = atomic_read(&proc_poll_event);
99970+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
99971 return 0;
99972 }
99973
99974@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
99975 (frontswap_map) ? "FS" : "");
99976
99977 mutex_unlock(&swapon_mutex);
99978- atomic_inc(&proc_poll_event);
99979+ atomic_inc_unchecked(&proc_poll_event);
99980 wake_up_interruptible(&proc_poll_wait);
99981
99982 if (S_ISREG(inode->i_mode))
99983diff --git a/mm/util.c b/mm/util.c
99984index fec39d4..3e60325 100644
99985--- a/mm/util.c
99986+++ b/mm/util.c
99987@@ -195,6 +195,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
99988 void arch_pick_mmap_layout(struct mm_struct *mm)
99989 {
99990 mm->mmap_base = TASK_UNMAPPED_BASE;
99991+
99992+#ifdef CONFIG_PAX_RANDMMAP
99993+ if (mm->pax_flags & MF_PAX_RANDMMAP)
99994+ mm->mmap_base += mm->delta_mmap;
99995+#endif
99996+
99997 mm->get_unmapped_area = arch_get_unmapped_area;
99998 }
99999 #endif
100000@@ -371,6 +377,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100001 if (!mm->arg_end)
100002 goto out_mm; /* Shh! No looking before we're done */
100003
100004+ if (gr_acl_handle_procpidmem(task))
100005+ goto out_mm;
100006+
100007 len = mm->arg_end - mm->arg_start;
100008
100009 if (len > buflen)
100010diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100011index 39c3388..7d976d4 100644
100012--- a/mm/vmalloc.c
100013+++ b/mm/vmalloc.c
100014@@ -39,20 +39,65 @@ struct vfree_deferred {
100015 struct work_struct wq;
100016 };
100017 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100018+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
100019+
100020+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100021+struct stack_deferred_llist {
100022+ struct llist_head list;
100023+ void *stack;
100024+ void *lowmem_stack;
100025+};
100026+
100027+struct stack_deferred {
100028+ struct stack_deferred_llist list;
100029+ struct work_struct wq;
100030+};
100031+
100032+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100033+#endif
100034
100035 static void __vunmap(const void *, int);
100036
100037-static void free_work(struct work_struct *w)
100038+static void vfree_work(struct work_struct *w)
100039+{
100040+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100041+ struct llist_node *llnode = llist_del_all(&p->list);
100042+ while (llnode) {
100043+ void *x = llnode;
100044+ llnode = llist_next(llnode);
100045+ __vunmap(x, 1);
100046+ }
100047+}
100048+
100049+static void vunmap_work(struct work_struct *w)
100050 {
100051 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100052 struct llist_node *llnode = llist_del_all(&p->list);
100053 while (llnode) {
100054 void *p = llnode;
100055 llnode = llist_next(llnode);
100056- __vunmap(p, 1);
100057+ __vunmap(p, 0);
100058 }
100059 }
100060
100061+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100062+static void unmap_work(struct work_struct *w)
100063+{
100064+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100065+ struct llist_node *llnode = llist_del_all(&p->list.list);
100066+ while (llnode) {
100067+ struct stack_deferred_llist *x =
100068+ llist_entry((struct llist_head *)llnode,
100069+ struct stack_deferred_llist, list);
100070+ void *stack = ACCESS_ONCE(x->stack);
100071+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100072+ llnode = llist_next(llnode);
100073+ __vunmap(stack, 0);
100074+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100075+ }
100076+}
100077+#endif
100078+
100079 /*** Page table manipulation functions ***/
100080
100081 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100082@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100083
100084 pte = pte_offset_kernel(pmd, addr);
100085 do {
100086- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100087- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100088+
100089+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100090+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100091+ BUG_ON(!pte_exec(*pte));
100092+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100093+ continue;
100094+ }
100095+#endif
100096+
100097+ {
100098+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100099+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100100+ }
100101 } while (pte++, addr += PAGE_SIZE, addr != end);
100102 }
100103
100104@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100105 pte = pte_alloc_kernel(pmd, addr);
100106 if (!pte)
100107 return -ENOMEM;
100108+
100109+ pax_open_kernel();
100110 do {
100111 struct page *page = pages[*nr];
100112
100113- if (WARN_ON(!pte_none(*pte)))
100114+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100115+ if (pgprot_val(prot) & _PAGE_NX)
100116+#endif
100117+
100118+ if (!pte_none(*pte)) {
100119+ pax_close_kernel();
100120+ WARN_ON(1);
100121 return -EBUSY;
100122- if (WARN_ON(!page))
100123+ }
100124+ if (!page) {
100125+ pax_close_kernel();
100126+ WARN_ON(1);
100127 return -ENOMEM;
100128+ }
100129 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100130 (*nr)++;
100131 } while (pte++, addr += PAGE_SIZE, addr != end);
100132+ pax_close_kernel();
100133 return 0;
100134 }
100135
100136@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
100137 pmd_t *pmd;
100138 unsigned long next;
100139
100140- pmd = pmd_alloc(&init_mm, pud, addr);
100141+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
100142 if (!pmd)
100143 return -ENOMEM;
100144 do {
100145@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
100146 pud_t *pud;
100147 unsigned long next;
100148
100149- pud = pud_alloc(&init_mm, pgd, addr);
100150+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
100151 if (!pud)
100152 return -ENOMEM;
100153 do {
100154@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
100155 if (addr >= MODULES_VADDR && addr < MODULES_END)
100156 return 1;
100157 #endif
100158+
100159+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100160+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
100161+ return 1;
100162+#endif
100163+
100164 return is_vmalloc_addr(x);
100165 }
100166
100167@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
100168
100169 if (!pgd_none(*pgd)) {
100170 pud_t *pud = pud_offset(pgd, addr);
100171+#ifdef CONFIG_X86
100172+ if (!pud_large(*pud))
100173+#endif
100174 if (!pud_none(*pud)) {
100175 pmd_t *pmd = pmd_offset(pud, addr);
100176+#ifdef CONFIG_X86
100177+ if (!pmd_large(*pmd))
100178+#endif
100179 if (!pmd_none(*pmd)) {
100180 pte_t *ptep, pte;
100181
100182@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
100183 * Allocate a region of KVA of the specified size and alignment, within the
100184 * vstart and vend.
100185 */
100186-static struct vmap_area *alloc_vmap_area(unsigned long size,
100187+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
100188 unsigned long align,
100189 unsigned long vstart, unsigned long vend,
100190 int node, gfp_t gfp_mask)
100191@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
100192 for_each_possible_cpu(i) {
100193 struct vmap_block_queue *vbq;
100194 struct vfree_deferred *p;
100195+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100196+ struct stack_deferred *p2;
100197+#endif
100198
100199 vbq = &per_cpu(vmap_block_queue, i);
100200 spin_lock_init(&vbq->lock);
100201 INIT_LIST_HEAD(&vbq->free);
100202+
100203 p = &per_cpu(vfree_deferred, i);
100204 init_llist_head(&p->list);
100205- INIT_WORK(&p->wq, free_work);
100206+ INIT_WORK(&p->wq, vfree_work);
100207+
100208+ p = &per_cpu(vunmap_deferred, i);
100209+ init_llist_head(&p->list);
100210+ INIT_WORK(&p->wq, vunmap_work);
100211+
100212+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100213+ p2 = &per_cpu(stack_deferred, i);
100214+ init_llist_head(&p2->list.list);
100215+ INIT_WORK(&p2->wq, unmap_work);
100216+#endif
100217 }
100218
100219 /* Import existing vmlist entries. */
100220@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
100221 struct vm_struct *area;
100222
100223 BUG_ON(in_interrupt());
100224+
100225+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100226+ if (flags & VM_KERNEXEC) {
100227+ if (start != VMALLOC_START || end != VMALLOC_END)
100228+ return NULL;
100229+ start = (unsigned long)MODULES_EXEC_VADDR;
100230+ end = (unsigned long)MODULES_EXEC_END;
100231+ }
100232+#endif
100233+
100234 if (flags & VM_IOREMAP)
100235 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
100236
100237@@ -1511,13 +1616,37 @@ EXPORT_SYMBOL(vfree);
100238 */
100239 void vunmap(const void *addr)
100240 {
100241- BUG_ON(in_interrupt());
100242- might_sleep();
100243- if (addr)
100244+ if (!addr)
100245+ return;
100246+
100247+ if (unlikely(in_interrupt())) {
100248+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
100249+ if (llist_add((struct llist_node *)addr, &p->list))
100250+ schedule_work(&p->wq);
100251+ } else {
100252+ might_sleep();
100253 __vunmap(addr, 0);
100254+ }
100255 }
100256 EXPORT_SYMBOL(vunmap);
100257
100258+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100259+void unmap_process_stacks(struct task_struct *task)
100260+{
100261+ if (unlikely(in_interrupt())) {
100262+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
100263+ struct stack_deferred_llist *list = task->stack;
100264+ list->stack = task->stack;
100265+ list->lowmem_stack = task->lowmem_stack;
100266+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
100267+ schedule_work(&p->wq);
100268+ } else {
100269+ __vunmap(task->stack, 0);
100270+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
100271+ }
100272+}
100273+#endif
100274+
100275 /**
100276 * vmap - map an array of pages into virtually contiguous space
100277 * @pages: array of page pointers
100278@@ -1538,6 +1667,11 @@ void *vmap(struct page **pages, unsigned int count,
100279 if (count > totalram_pages)
100280 return NULL;
100281
100282+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100283+ if (!(pgprot_val(prot) & _PAGE_NX))
100284+ flags |= VM_KERNEXEC;
100285+#endif
100286+
100287 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
100288 __builtin_return_address(0));
100289 if (!area)
100290@@ -1640,6 +1774,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
100291 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
100292 goto fail;
100293
100294+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100295+ if (!(pgprot_val(prot) & _PAGE_NX))
100296+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
100297+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
100298+ else
100299+#endif
100300+
100301 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
100302 start, end, node, gfp_mask, caller);
100303 if (!area)
100304@@ -1816,10 +1957,9 @@ EXPORT_SYMBOL(vzalloc_node);
100305 * For tight control over page level allocator and protection flags
100306 * use __vmalloc() instead.
100307 */
100308-
100309 void *vmalloc_exec(unsigned long size)
100310 {
100311- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
100312+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
100313 NUMA_NO_NODE, __builtin_return_address(0));
100314 }
100315
100316@@ -2126,6 +2266,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
100317 {
100318 struct vm_struct *area;
100319
100320+ BUG_ON(vma->vm_mirror);
100321+
100322 size = PAGE_ALIGN(size);
100323
100324 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
100325@@ -2608,7 +2750,11 @@ static int s_show(struct seq_file *m, void *p)
100326 v->addr, v->addr + v->size, v->size);
100327
100328 if (v->caller)
100329+#ifdef CONFIG_GRKERNSEC_HIDESYM
100330+ seq_printf(m, " %pK", v->caller);
100331+#else
100332 seq_printf(m, " %pS", v->caller);
100333+#endif
100334
100335 if (v->nr_pages)
100336 seq_printf(m, " pages=%d", v->nr_pages);
100337diff --git a/mm/vmstat.c b/mm/vmstat.c
100338index cdac773..7dd324e 100644
100339--- a/mm/vmstat.c
100340+++ b/mm/vmstat.c
100341@@ -24,6 +24,7 @@
100342 #include <linux/mm_inline.h>
100343 #include <linux/page_ext.h>
100344 #include <linux/page_owner.h>
100345+#include <linux/grsecurity.h>
100346
100347 #include "internal.h"
100348
100349@@ -83,7 +84,7 @@ void vm_events_fold_cpu(int cpu)
100350 *
100351 * vm_stat contains the global counters
100352 */
100353-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100354+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100355 EXPORT_SYMBOL(vm_stat);
100356
100357 #ifdef CONFIG_SMP
100358@@ -435,7 +436,7 @@ static int fold_diff(int *diff)
100359
100360 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100361 if (diff[i]) {
100362- atomic_long_add(diff[i], &vm_stat[i]);
100363+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
100364 changes++;
100365 }
100366 return changes;
100367@@ -473,7 +474,7 @@ static int refresh_cpu_vm_stats(void)
100368 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
100369 if (v) {
100370
100371- atomic_long_add(v, &zone->vm_stat[i]);
100372+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100373 global_diff[i] += v;
100374 #ifdef CONFIG_NUMA
100375 /* 3 seconds idle till flush */
100376@@ -537,7 +538,7 @@ void cpu_vm_stats_fold(int cpu)
100377
100378 v = p->vm_stat_diff[i];
100379 p->vm_stat_diff[i] = 0;
100380- atomic_long_add(v, &zone->vm_stat[i]);
100381+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100382 global_diff[i] += v;
100383 }
100384 }
100385@@ -557,8 +558,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
100386 if (pset->vm_stat_diff[i]) {
100387 int v = pset->vm_stat_diff[i];
100388 pset->vm_stat_diff[i] = 0;
100389- atomic_long_add(v, &zone->vm_stat[i]);
100390- atomic_long_add(v, &vm_stat[i]);
100391+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100392+ atomic_long_add_unchecked(v, &vm_stat[i]);
100393 }
100394 }
100395 #endif
100396@@ -1291,10 +1292,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
100397 stat_items_size += sizeof(struct vm_event_state);
100398 #endif
100399
100400- v = kmalloc(stat_items_size, GFP_KERNEL);
100401+ v = kzalloc(stat_items_size, GFP_KERNEL);
100402 m->private = v;
100403 if (!v)
100404 return ERR_PTR(-ENOMEM);
100405+
100406+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100407+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
100408+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
100409+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
100410+ && !in_group_p(grsec_proc_gid)
100411+#endif
100412+ )
100413+ return (unsigned long *)m->private + *pos;
100414+#endif
100415+#endif
100416+
100417 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100418 v[i] = global_page_state(i);
100419 v += NR_VM_ZONE_STAT_ITEMS;
100420@@ -1526,10 +1539,16 @@ static int __init setup_vmstat(void)
100421 cpu_notifier_register_done();
100422 #endif
100423 #ifdef CONFIG_PROC_FS
100424- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
100425- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
100426- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100427- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
100428+ {
100429+ mode_t gr_mode = S_IRUGO;
100430+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100431+ gr_mode = S_IRUSR;
100432+#endif
100433+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
100434+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
100435+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100436+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
100437+ }
100438 #endif
100439 return 0;
100440 }
100441diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
100442index 64c6bed..b79a5de 100644
100443--- a/net/8021q/vlan.c
100444+++ b/net/8021q/vlan.c
100445@@ -481,7 +481,7 @@ out:
100446 return NOTIFY_DONE;
100447 }
100448
100449-static struct notifier_block vlan_notifier_block __read_mostly = {
100450+static struct notifier_block vlan_notifier_block = {
100451 .notifier_call = vlan_device_event,
100452 };
100453
100454@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
100455 err = -EPERM;
100456 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
100457 break;
100458- if ((args.u.name_type >= 0) &&
100459- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
100460+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
100461 struct vlan_net *vn;
100462
100463 vn = net_generic(net, vlan_net_id);
100464diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
100465index 8ac8a5c..991defc 100644
100466--- a/net/8021q/vlan_netlink.c
100467+++ b/net/8021q/vlan_netlink.c
100468@@ -238,7 +238,7 @@ nla_put_failure:
100469 return -EMSGSIZE;
100470 }
100471
100472-struct rtnl_link_ops vlan_link_ops __read_mostly = {
100473+struct rtnl_link_ops vlan_link_ops = {
100474 .kind = "vlan",
100475 .maxtype = IFLA_VLAN_MAX,
100476 .policy = vlan_policy,
100477diff --git a/net/9p/client.c b/net/9p/client.c
100478index e86a9bea..e91f70e 100644
100479--- a/net/9p/client.c
100480+++ b/net/9p/client.c
100481@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
100482 len - inline_len);
100483 } else {
100484 err = copy_from_user(ename + inline_len,
100485- uidata, len - inline_len);
100486+ (char __force_user *)uidata, len - inline_len);
100487 if (err) {
100488 err = -EFAULT;
100489 goto out_err;
100490@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
100491 kernel_buf = 1;
100492 indata = data;
100493 } else
100494- indata = (__force char *)udata;
100495+ indata = (__force_kernel char *)udata;
100496 /*
100497 * response header len is 11
100498 * PDU Header(7) + IO Size (4)
100499@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
100500 kernel_buf = 1;
100501 odata = data;
100502 } else
100503- odata = (char *)udata;
100504+ odata = (char __force_kernel *)udata;
100505 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
100506 P9_ZC_HDR_SZ, kernel_buf, "dqd",
100507 fid->fid, offset, rsize);
100508diff --git a/net/9p/mod.c b/net/9p/mod.c
100509index 6ab36ae..6f1841b 100644
100510--- a/net/9p/mod.c
100511+++ b/net/9p/mod.c
100512@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
100513 void v9fs_register_trans(struct p9_trans_module *m)
100514 {
100515 spin_lock(&v9fs_trans_lock);
100516- list_add_tail(&m->list, &v9fs_trans_list);
100517+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
100518 spin_unlock(&v9fs_trans_lock);
100519 }
100520 EXPORT_SYMBOL(v9fs_register_trans);
100521@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
100522 void v9fs_unregister_trans(struct p9_trans_module *m)
100523 {
100524 spin_lock(&v9fs_trans_lock);
100525- list_del_init(&m->list);
100526+ pax_list_del_init((struct list_head *)&m->list);
100527 spin_unlock(&v9fs_trans_lock);
100528 }
100529 EXPORT_SYMBOL(v9fs_unregister_trans);
100530diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
100531index 80d08f6..de63fd1 100644
100532--- a/net/9p/trans_fd.c
100533+++ b/net/9p/trans_fd.c
100534@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
100535 oldfs = get_fs();
100536 set_fs(get_ds());
100537 /* The cast to a user pointer is valid due to the set_fs() */
100538- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
100539+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
100540 set_fs(oldfs);
100541
100542 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
100543diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
100544index af46bc4..f9adfcd 100644
100545--- a/net/appletalk/atalk_proc.c
100546+++ b/net/appletalk/atalk_proc.c
100547@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
100548 struct proc_dir_entry *p;
100549 int rc = -ENOMEM;
100550
100551- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
100552+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
100553 if (!atalk_proc_dir)
100554 goto out;
100555
100556diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
100557index 876fbe8..8bbea9f 100644
100558--- a/net/atm/atm_misc.c
100559+++ b/net/atm/atm_misc.c
100560@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
100561 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
100562 return 1;
100563 atm_return(vcc, truesize);
100564- atomic_inc(&vcc->stats->rx_drop);
100565+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100566 return 0;
100567 }
100568 EXPORT_SYMBOL(atm_charge);
100569@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
100570 }
100571 }
100572 atm_return(vcc, guess);
100573- atomic_inc(&vcc->stats->rx_drop);
100574+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100575 return NULL;
100576 }
100577 EXPORT_SYMBOL(atm_alloc_charge);
100578@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
100579
100580 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100581 {
100582-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100583+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100584 __SONET_ITEMS
100585 #undef __HANDLE_ITEM
100586 }
100587@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
100588
100589 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100590 {
100591-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100592+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
100593 __SONET_ITEMS
100594 #undef __HANDLE_ITEM
100595 }
100596diff --git a/net/atm/lec.c b/net/atm/lec.c
100597index 4b98f89..5a2f6cb 100644
100598--- a/net/atm/lec.c
100599+++ b/net/atm/lec.c
100600@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
100601 }
100602
100603 static struct lane2_ops lane2_ops = {
100604- lane2_resolve, /* resolve, spec 3.1.3 */
100605- lane2_associate_req, /* associate_req, spec 3.1.4 */
100606- NULL /* associate indicator, spec 3.1.5 */
100607+ .resolve = lane2_resolve,
100608+ .associate_req = lane2_associate_req,
100609+ .associate_indicator = NULL
100610 };
100611
100612 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
100613diff --git a/net/atm/lec.h b/net/atm/lec.h
100614index 4149db1..f2ab682 100644
100615--- a/net/atm/lec.h
100616+++ b/net/atm/lec.h
100617@@ -48,7 +48,7 @@ struct lane2_ops {
100618 const u8 *tlvs, u32 sizeoftlvs);
100619 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
100620 const u8 *tlvs, u32 sizeoftlvs);
100621-};
100622+} __no_const;
100623
100624 /*
100625 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
100626diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
100627index d1b2d9a..d549f7f 100644
100628--- a/net/atm/mpoa_caches.c
100629+++ b/net/atm/mpoa_caches.c
100630@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
100631
100632
100633 static struct in_cache_ops ingress_ops = {
100634- in_cache_add_entry, /* add_entry */
100635- in_cache_get, /* get */
100636- in_cache_get_with_mask, /* get_with_mask */
100637- in_cache_get_by_vcc, /* get_by_vcc */
100638- in_cache_put, /* put */
100639- in_cache_remove_entry, /* remove_entry */
100640- cache_hit, /* cache_hit */
100641- clear_count_and_expired, /* clear_count */
100642- check_resolving_entries, /* check_resolving */
100643- refresh_entries, /* refresh */
100644- in_destroy_cache /* destroy_cache */
100645+ .add_entry = in_cache_add_entry,
100646+ .get = in_cache_get,
100647+ .get_with_mask = in_cache_get_with_mask,
100648+ .get_by_vcc = in_cache_get_by_vcc,
100649+ .put = in_cache_put,
100650+ .remove_entry = in_cache_remove_entry,
100651+ .cache_hit = cache_hit,
100652+ .clear_count = clear_count_and_expired,
100653+ .check_resolving = check_resolving_entries,
100654+ .refresh = refresh_entries,
100655+ .destroy_cache = in_destroy_cache
100656 };
100657
100658 static struct eg_cache_ops egress_ops = {
100659- eg_cache_add_entry, /* add_entry */
100660- eg_cache_get_by_cache_id, /* get_by_cache_id */
100661- eg_cache_get_by_tag, /* get_by_tag */
100662- eg_cache_get_by_vcc, /* get_by_vcc */
100663- eg_cache_get_by_src_ip, /* get_by_src_ip */
100664- eg_cache_put, /* put */
100665- eg_cache_remove_entry, /* remove_entry */
100666- update_eg_cache_entry, /* update */
100667- clear_expired, /* clear_expired */
100668- eg_destroy_cache /* destroy_cache */
100669+ .add_entry = eg_cache_add_entry,
100670+ .get_by_cache_id = eg_cache_get_by_cache_id,
100671+ .get_by_tag = eg_cache_get_by_tag,
100672+ .get_by_vcc = eg_cache_get_by_vcc,
100673+ .get_by_src_ip = eg_cache_get_by_src_ip,
100674+ .put = eg_cache_put,
100675+ .remove_entry = eg_cache_remove_entry,
100676+ .update = update_eg_cache_entry,
100677+ .clear_expired = clear_expired,
100678+ .destroy_cache = eg_destroy_cache
100679 };
100680
100681
100682diff --git a/net/atm/proc.c b/net/atm/proc.c
100683index bbb6461..cf04016 100644
100684--- a/net/atm/proc.c
100685+++ b/net/atm/proc.c
100686@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
100687 const struct k_atm_aal_stats *stats)
100688 {
100689 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
100690- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
100691- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
100692- atomic_read(&stats->rx_drop));
100693+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
100694+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
100695+ atomic_read_unchecked(&stats->rx_drop));
100696 }
100697
100698 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
100699diff --git a/net/atm/resources.c b/net/atm/resources.c
100700index 0447d5d..3cf4728 100644
100701--- a/net/atm/resources.c
100702+++ b/net/atm/resources.c
100703@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
100704 static void copy_aal_stats(struct k_atm_aal_stats *from,
100705 struct atm_aal_stats *to)
100706 {
100707-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100708+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100709 __AAL_STAT_ITEMS
100710 #undef __HANDLE_ITEM
100711 }
100712@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
100713 static void subtract_aal_stats(struct k_atm_aal_stats *from,
100714 struct atm_aal_stats *to)
100715 {
100716-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100717+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
100718 __AAL_STAT_ITEMS
100719 #undef __HANDLE_ITEM
100720 }
100721diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
100722index 919a5ce..cc6b444 100644
100723--- a/net/ax25/sysctl_net_ax25.c
100724+++ b/net/ax25/sysctl_net_ax25.c
100725@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
100726 {
100727 char path[sizeof("net/ax25/") + IFNAMSIZ];
100728 int k;
100729- struct ctl_table *table;
100730+ ctl_table_no_const *table;
100731
100732 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
100733 if (!table)
100734diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
100735index 1e80539..676c37a 100644
100736--- a/net/batman-adv/bat_iv_ogm.c
100737+++ b/net/batman-adv/bat_iv_ogm.c
100738@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
100739
100740 /* randomize initial seqno to avoid collision */
100741 get_random_bytes(&random_seqno, sizeof(random_seqno));
100742- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100743+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100744
100745 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
100746 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
100747@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
100748 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
100749
100750 /* change sequence number to network order */
100751- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
100752+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
100753 batadv_ogm_packet->seqno = htonl(seqno);
100754- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
100755+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
100756
100757 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
100758
100759@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
100760 return;
100761
100762 /* could be changed by schedule_own_packet() */
100763- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
100764+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
100765
100766 if (ogm_packet->flags & BATADV_DIRECTLINK)
100767 has_directlink_flag = true;
100768diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
100769index 00f9e14..e1c7203 100644
100770--- a/net/batman-adv/fragmentation.c
100771+++ b/net/batman-adv/fragmentation.c
100772@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
100773 frag_header.packet_type = BATADV_UNICAST_FRAG;
100774 frag_header.version = BATADV_COMPAT_VERSION;
100775 frag_header.ttl = BATADV_TTL;
100776- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
100777+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
100778 frag_header.reserved = 0;
100779 frag_header.no = 0;
100780 frag_header.total_size = htons(skb->len);
100781diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
100782index 5467955..75ad4e3 100644
100783--- a/net/batman-adv/soft-interface.c
100784+++ b/net/batman-adv/soft-interface.c
100785@@ -296,7 +296,7 @@ send:
100786 primary_if->net_dev->dev_addr);
100787
100788 /* set broadcast sequence number */
100789- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
100790+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
100791 bcast_packet->seqno = htonl(seqno);
100792
100793 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
100794@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100795 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
100796
100797 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
100798- atomic_set(&bat_priv->bcast_seqno, 1);
100799+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
100800 atomic_set(&bat_priv->tt.vn, 0);
100801 atomic_set(&bat_priv->tt.local_changes, 0);
100802 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
100803@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100804
100805 /* randomize initial seqno to avoid collision */
100806 get_random_bytes(&random_seqno, sizeof(random_seqno));
100807- atomic_set(&bat_priv->frag_seqno, random_seqno);
100808+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
100809
100810 bat_priv->primary_if = NULL;
100811 bat_priv->num_ifaces = 0;
100812@@ -983,7 +983,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
100813 return 0;
100814 }
100815
100816-struct rtnl_link_ops batadv_link_ops __read_mostly = {
100817+struct rtnl_link_ops batadv_link_ops = {
100818 .kind = "batadv",
100819 .priv_size = sizeof(struct batadv_priv),
100820 .setup = batadv_softif_init_early,
100821diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
100822index 8854c05..ee5d5497 100644
100823--- a/net/batman-adv/types.h
100824+++ b/net/batman-adv/types.h
100825@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
100826 struct batadv_hard_iface_bat_iv {
100827 unsigned char *ogm_buff;
100828 int ogm_buff_len;
100829- atomic_t ogm_seqno;
100830+ atomic_unchecked_t ogm_seqno;
100831 };
100832
100833 /**
100834@@ -768,7 +768,7 @@ struct batadv_priv {
100835 atomic_t bonding;
100836 atomic_t fragmentation;
100837 atomic_t packet_size_max;
100838- atomic_t frag_seqno;
100839+ atomic_unchecked_t frag_seqno;
100840 #ifdef CONFIG_BATMAN_ADV_BLA
100841 atomic_t bridge_loop_avoidance;
100842 #endif
100843@@ -787,7 +787,7 @@ struct batadv_priv {
100844 #endif
100845 uint32_t isolation_mark;
100846 uint32_t isolation_mark_mask;
100847- atomic_t bcast_seqno;
100848+ atomic_unchecked_t bcast_seqno;
100849 atomic_t bcast_queue_left;
100850 atomic_t batman_queue_left;
100851 char num_ifaces;
100852diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
100853index 2c245fd..dccf543 100644
100854--- a/net/bluetooth/hci_sock.c
100855+++ b/net/bluetooth/hci_sock.c
100856@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
100857 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
100858 }
100859
100860- len = min_t(unsigned int, len, sizeof(uf));
100861+ len = min((size_t)len, sizeof(uf));
100862 if (copy_from_user(&uf, optval, len)) {
100863 err = -EFAULT;
100864 break;
100865diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
100866index d04dc00..d25d576 100644
100867--- a/net/bluetooth/l2cap_core.c
100868+++ b/net/bluetooth/l2cap_core.c
100869@@ -3524,8 +3524,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
100870 break;
100871
100872 case L2CAP_CONF_RFC:
100873- if (olen == sizeof(rfc))
100874- memcpy(&rfc, (void *)val, olen);
100875+ if (olen != sizeof(rfc))
100876+ break;
100877+
100878+ memcpy(&rfc, (void *)val, olen);
100879
100880 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
100881 rfc.mode != chan->mode)
100882diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
100883index f65caf4..c07110c 100644
100884--- a/net/bluetooth/l2cap_sock.c
100885+++ b/net/bluetooth/l2cap_sock.c
100886@@ -634,7 +634,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100887 struct sock *sk = sock->sk;
100888 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
100889 struct l2cap_options opts;
100890- int len, err = 0;
100891+ int err = 0;
100892+ size_t len = optlen;
100893 u32 opt;
100894
100895 BT_DBG("sk %p", sk);
100896@@ -661,7 +662,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100897 opts.max_tx = chan->max_tx;
100898 opts.txwin_size = chan->tx_win;
100899
100900- len = min_t(unsigned int, sizeof(opts), optlen);
100901+ len = min(sizeof(opts), len);
100902 if (copy_from_user((char *) &opts, optval, len)) {
100903 err = -EFAULT;
100904 break;
100905@@ -748,7 +749,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100906 struct bt_security sec;
100907 struct bt_power pwr;
100908 struct l2cap_conn *conn;
100909- int len, err = 0;
100910+ int err = 0;
100911+ size_t len = optlen;
100912 u32 opt;
100913
100914 BT_DBG("sk %p", sk);
100915@@ -772,7 +774,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100916
100917 sec.level = BT_SECURITY_LOW;
100918
100919- len = min_t(unsigned int, sizeof(sec), optlen);
100920+ len = min(sizeof(sec), len);
100921 if (copy_from_user((char *) &sec, optval, len)) {
100922 err = -EFAULT;
100923 break;
100924@@ -868,7 +870,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100925
100926 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
100927
100928- len = min_t(unsigned int, sizeof(pwr), optlen);
100929+ len = min(sizeof(pwr), len);
100930 if (copy_from_user((char *) &pwr, optval, len)) {
100931 err = -EFAULT;
100932 break;
100933diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
100934index 2348176..b9b6cf2 100644
100935--- a/net/bluetooth/rfcomm/sock.c
100936+++ b/net/bluetooth/rfcomm/sock.c
100937@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
100938 struct sock *sk = sock->sk;
100939 struct bt_security sec;
100940 int err = 0;
100941- size_t len;
100942+ size_t len = optlen;
100943 u32 opt;
100944
100945 BT_DBG("sk %p", sk);
100946@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
100947
100948 sec.level = BT_SECURITY_LOW;
100949
100950- len = min_t(unsigned int, sizeof(sec), optlen);
100951+ len = min(sizeof(sec), len);
100952 if (copy_from_user((char *) &sec, optval, len)) {
100953 err = -EFAULT;
100954 break;
100955diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
100956index 8e385a0..a5bdd8e 100644
100957--- a/net/bluetooth/rfcomm/tty.c
100958+++ b/net/bluetooth/rfcomm/tty.c
100959@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
100960 BT_DBG("tty %p id %d", tty, tty->index);
100961
100962 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
100963- dev->channel, dev->port.count);
100964+ dev->channel, atomic_read(&dev->port.count));
100965
100966 err = tty_port_open(&dev->port, tty, filp);
100967 if (err)
100968@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
100969 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
100970
100971 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
100972- dev->port.count);
100973+ atomic_read(&dev->port.count));
100974
100975 tty_port_close(&dev->port, tty, filp);
100976 }
100977diff --git a/net/bridge/br.c b/net/bridge/br.c
100978index 44425af..4ee730e 100644
100979--- a/net/bridge/br.c
100980+++ b/net/bridge/br.c
100981@@ -147,6 +147,8 @@ static int __init br_init(void)
100982 {
100983 int err;
100984
100985+ BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
100986+
100987 err = stp_proto_register(&br_stp_proto);
100988 if (err < 0) {
100989 pr_err("bridge: can't register sap for STP\n");
100990diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
100991index 9f5eb55..45ab9c5 100644
100992--- a/net/bridge/br_netlink.c
100993+++ b/net/bridge/br_netlink.c
100994@@ -566,7 +566,7 @@ static struct rtnl_af_ops br_af_ops = {
100995 .get_link_af_size = br_get_link_af_size,
100996 };
100997
100998-struct rtnl_link_ops br_link_ops __read_mostly = {
100999+struct rtnl_link_ops br_link_ops = {
101000 .kind = "bridge",
101001 .priv_size = sizeof(struct net_bridge),
101002 .setup = br_dev_setup,
101003diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101004index d9a8c05..8dadc6c6 100644
101005--- a/net/bridge/netfilter/ebtables.c
101006+++ b/net/bridge/netfilter/ebtables.c
101007@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101008 tmp.valid_hooks = t->table->valid_hooks;
101009 }
101010 mutex_unlock(&ebt_mutex);
101011- if (copy_to_user(user, &tmp, *len) != 0) {
101012+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101013 BUGPRINT("c2u Didn't work\n");
101014 ret = -EFAULT;
101015 break;
101016@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101017 goto out;
101018 tmp.valid_hooks = t->valid_hooks;
101019
101020- if (copy_to_user(user, &tmp, *len) != 0) {
101021+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101022 ret = -EFAULT;
101023 break;
101024 }
101025@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101026 tmp.entries_size = t->table->entries_size;
101027 tmp.valid_hooks = t->table->valid_hooks;
101028
101029- if (copy_to_user(user, &tmp, *len) != 0) {
101030+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101031 ret = -EFAULT;
101032 break;
101033 }
101034diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101035index f5afda1..dcf770a 100644
101036--- a/net/caif/cfctrl.c
101037+++ b/net/caif/cfctrl.c
101038@@ -10,6 +10,7 @@
101039 #include <linux/spinlock.h>
101040 #include <linux/slab.h>
101041 #include <linux/pkt_sched.h>
101042+#include <linux/sched.h>
101043 #include <net/caif/caif_layer.h>
101044 #include <net/caif/cfpkt.h>
101045 #include <net/caif/cfctrl.h>
101046@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101047 memset(&dev_info, 0, sizeof(dev_info));
101048 dev_info.id = 0xff;
101049 cfsrvl_init(&this->serv, 0, &dev_info, false);
101050- atomic_set(&this->req_seq_no, 1);
101051- atomic_set(&this->rsp_seq_no, 1);
101052+ atomic_set_unchecked(&this->req_seq_no, 1);
101053+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101054 this->serv.layer.receive = cfctrl_recv;
101055 sprintf(this->serv.layer.name, "ctrl");
101056 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101057@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101058 struct cfctrl_request_info *req)
101059 {
101060 spin_lock_bh(&ctrl->info_list_lock);
101061- atomic_inc(&ctrl->req_seq_no);
101062- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101063+ atomic_inc_unchecked(&ctrl->req_seq_no);
101064+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101065 list_add_tail(&req->list, &ctrl->list);
101066 spin_unlock_bh(&ctrl->info_list_lock);
101067 }
101068@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101069 if (p != first)
101070 pr_warn("Requests are not received in order\n");
101071
101072- atomic_set(&ctrl->rsp_seq_no,
101073+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101074 p->sequence_no);
101075 list_del(&p->list);
101076 goto out;
101077diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
101078index 67a4a36..8d28068 100644
101079--- a/net/caif/chnl_net.c
101080+++ b/net/caif/chnl_net.c
101081@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
101082 };
101083
101084
101085-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
101086+static struct rtnl_link_ops ipcaif_link_ops = {
101087 .kind = "caif",
101088 .priv_size = sizeof(struct chnl_net),
101089 .setup = ipcaif_net_setup,
101090diff --git a/net/can/af_can.c b/net/can/af_can.c
101091index 66e0804..93bcf05 100644
101092--- a/net/can/af_can.c
101093+++ b/net/can/af_can.c
101094@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop)
101095 goto inval_skb;
101096 }
101097
101098+ skb->ip_summed = CHECKSUM_UNNECESSARY;
101099+
101100+ skb_reset_mac_header(skb);
101101 skb_reset_network_header(skb);
101102 skb_reset_transport_header(skb);
101103
101104@@ -881,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101105 };
101106
101107 /* notifier block for netdevice event */
101108-static struct notifier_block can_netdev_notifier __read_mostly = {
101109+static struct notifier_block can_netdev_notifier = {
101110 .notifier_call = can_notifier,
101111 };
101112
101113diff --git a/net/can/bcm.c b/net/can/bcm.c
101114index ee9ffd9..dfdf3d4 100644
101115--- a/net/can/bcm.c
101116+++ b/net/can/bcm.c
101117@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
101118 }
101119
101120 /* create /proc/net/can-bcm directory */
101121- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101122+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101123 return 0;
101124 }
101125
101126diff --git a/net/can/gw.c b/net/can/gw.c
101127index 295f62e..0c3b09e 100644
101128--- a/net/can/gw.c
101129+++ b/net/can/gw.c
101130@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101131 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101132
101133 static HLIST_HEAD(cgw_list);
101134-static struct notifier_block notifier;
101135
101136 static struct kmem_cache *cgw_cache __read_mostly;
101137
101138@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101139 return err;
101140 }
101141
101142+static struct notifier_block notifier = {
101143+ .notifier_call = cgw_notifier
101144+};
101145+
101146 static __init int cgw_module_init(void)
101147 {
101148 /* sanitize given module parameter */
101149@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
101150 return -ENOMEM;
101151
101152 /* set notifier */
101153- notifier.notifier_call = cgw_notifier;
101154 register_netdevice_notifier(&notifier);
101155
101156 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101157diff --git a/net/can/proc.c b/net/can/proc.c
101158index 1a19b98..df2b4ec 100644
101159--- a/net/can/proc.c
101160+++ b/net/can/proc.c
101161@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101162 void can_init_proc(void)
101163 {
101164 /* create /proc/net/can directory */
101165- can_dir = proc_mkdir("can", init_net.proc_net);
101166+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101167
101168 if (!can_dir) {
101169 printk(KERN_INFO "can: failed to create /proc/net/can . "
101170diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
101171index 33a2f20..371bd09 100644
101172--- a/net/ceph/messenger.c
101173+++ b/net/ceph/messenger.c
101174@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
101175 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
101176
101177 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
101178-static atomic_t addr_str_seq = ATOMIC_INIT(0);
101179+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
101180
101181 static struct page *zero_page; /* used in certain error cases */
101182
101183@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
101184 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
101185 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
101186
101187- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101188+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101189 s = addr_str[i];
101190
101191 switch (ss->ss_family) {
101192diff --git a/net/compat.c b/net/compat.c
101193index 94d3d5e..2bd2649 100644
101194--- a/net/compat.c
101195+++ b/net/compat.c
101196@@ -93,20 +93,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
101197
101198 #define CMSG_COMPAT_FIRSTHDR(msg) \
101199 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
101200- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
101201+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
101202 (struct compat_cmsghdr __user *)NULL)
101203
101204 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
101205 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
101206 (ucmlen) <= (unsigned long) \
101207 ((mhdr)->msg_controllen - \
101208- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
101209+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
101210
101211 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
101212 struct compat_cmsghdr __user *cmsg, int cmsg_len)
101213 {
101214 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
101215- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
101216+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
101217 msg->msg_controllen)
101218 return NULL;
101219 return (struct compat_cmsghdr __user *)ptr;
101220@@ -196,7 +196,7 @@ Efault:
101221
101222 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
101223 {
101224- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101225+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101226 struct compat_cmsghdr cmhdr;
101227 struct compat_timeval ctv;
101228 struct compat_timespec cts[3];
101229@@ -252,7 +252,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
101230
101231 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
101232 {
101233- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101234+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101235 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
101236 int fdnum = scm->fp->count;
101237 struct file **fp = scm->fp->fp;
101238@@ -340,7 +340,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
101239 return -EFAULT;
101240 old_fs = get_fs();
101241 set_fs(KERNEL_DS);
101242- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
101243+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
101244 set_fs(old_fs);
101245
101246 return err;
101247@@ -401,7 +401,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
101248 len = sizeof(ktime);
101249 old_fs = get_fs();
101250 set_fs(KERNEL_DS);
101251- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
101252+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
101253 set_fs(old_fs);
101254
101255 if (!err) {
101256@@ -544,7 +544,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101257 case MCAST_JOIN_GROUP:
101258 case MCAST_LEAVE_GROUP:
101259 {
101260- struct compat_group_req __user *gr32 = (void *)optval;
101261+ struct compat_group_req __user *gr32 = (void __user *)optval;
101262 struct group_req __user *kgr =
101263 compat_alloc_user_space(sizeof(struct group_req));
101264 u32 interface;
101265@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101266 case MCAST_BLOCK_SOURCE:
101267 case MCAST_UNBLOCK_SOURCE:
101268 {
101269- struct compat_group_source_req __user *gsr32 = (void *)optval;
101270+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
101271 struct group_source_req __user *kgsr = compat_alloc_user_space(
101272 sizeof(struct group_source_req));
101273 u32 interface;
101274@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101275 }
101276 case MCAST_MSFILTER:
101277 {
101278- struct compat_group_filter __user *gf32 = (void *)optval;
101279+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101280 struct group_filter __user *kgf;
101281 u32 interface, fmode, numsrc;
101282
101283@@ -624,7 +624,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
101284 char __user *optval, int __user *optlen,
101285 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
101286 {
101287- struct compat_group_filter __user *gf32 = (void *)optval;
101288+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101289 struct group_filter __user *kgf;
101290 int __user *koptlen;
101291 u32 interface, fmode, numsrc;
101292@@ -768,7 +768,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
101293
101294 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
101295 return -EINVAL;
101296- if (copy_from_user(a, args, nas[call]))
101297+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
101298 return -EFAULT;
101299 a0 = a[0];
101300 a1 = a[1];
101301diff --git a/net/core/datagram.c b/net/core/datagram.c
101302index df493d6..1145766 100644
101303--- a/net/core/datagram.c
101304+++ b/net/core/datagram.c
101305@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
101306 }
101307
101308 kfree_skb(skb);
101309- atomic_inc(&sk->sk_drops);
101310+ atomic_inc_unchecked(&sk->sk_drops);
101311 sk_mem_reclaim_partial(sk);
101312
101313 return err;
101314diff --git a/net/core/dev.c b/net/core/dev.c
101315index 4ff46f8..e877e78 100644
101316--- a/net/core/dev.c
101317+++ b/net/core/dev.c
101318@@ -1680,14 +1680,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
101319 {
101320 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
101321 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
101322- atomic_long_inc(&dev->rx_dropped);
101323+ atomic_long_inc_unchecked(&dev->rx_dropped);
101324 kfree_skb(skb);
101325 return NET_RX_DROP;
101326 }
101327 }
101328
101329 if (unlikely(!is_skb_forwardable(dev, skb))) {
101330- atomic_long_inc(&dev->rx_dropped);
101331+ atomic_long_inc_unchecked(&dev->rx_dropped);
101332 kfree_skb(skb);
101333 return NET_RX_DROP;
101334 }
101335@@ -2958,7 +2958,7 @@ recursion_alert:
101336 drop:
101337 rcu_read_unlock_bh();
101338
101339- atomic_long_inc(&dev->tx_dropped);
101340+ atomic_long_inc_unchecked(&dev->tx_dropped);
101341 kfree_skb_list(skb);
101342 return rc;
101343 out:
101344@@ -3301,7 +3301,7 @@ enqueue:
101345
101346 local_irq_restore(flags);
101347
101348- atomic_long_inc(&skb->dev->rx_dropped);
101349+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101350 kfree_skb(skb);
101351 return NET_RX_DROP;
101352 }
101353@@ -3378,7 +3378,7 @@ int netif_rx_ni(struct sk_buff *skb)
101354 }
101355 EXPORT_SYMBOL(netif_rx_ni);
101356
101357-static void net_tx_action(struct softirq_action *h)
101358+static __latent_entropy void net_tx_action(void)
101359 {
101360 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101361
101362@@ -3711,7 +3711,7 @@ ncls:
101363 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
101364 } else {
101365 drop:
101366- atomic_long_inc(&skb->dev->rx_dropped);
101367+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101368 kfree_skb(skb);
101369 /* Jamal, now you will not able to escape explaining
101370 * me how you were going to use this. :-)
101371@@ -4599,7 +4599,7 @@ out_unlock:
101372 return work;
101373 }
101374
101375-static void net_rx_action(struct softirq_action *h)
101376+static __latent_entropy void net_rx_action(void)
101377 {
101378 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101379 unsigned long time_limit = jiffies + 2;
101380@@ -6610,8 +6610,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
101381 } else {
101382 netdev_stats_to_stats64(storage, &dev->stats);
101383 }
101384- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
101385- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
101386+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
101387+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
101388 return storage;
101389 }
101390 EXPORT_SYMBOL(dev_get_stats);
101391diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
101392index b94b1d2..da3ed7c 100644
101393--- a/net/core/dev_ioctl.c
101394+++ b/net/core/dev_ioctl.c
101395@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
101396 no_module = !dev;
101397 if (no_module && capable(CAP_NET_ADMIN))
101398 no_module = request_module("netdev-%s", name);
101399- if (no_module && capable(CAP_SYS_MODULE))
101400+ if (no_module && capable(CAP_SYS_MODULE)) {
101401+#ifdef CONFIG_GRKERNSEC_MODHARDEN
101402+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
101403+#else
101404 request_module("%s", name);
101405+#endif
101406+ }
101407 }
101408 EXPORT_SYMBOL(dev_load);
101409
101410diff --git a/net/core/filter.c b/net/core/filter.c
101411index ec9baea..dd6195d 100644
101412--- a/net/core/filter.c
101413+++ b/net/core/filter.c
101414@@ -533,7 +533,11 @@ do_pass:
101415
101416 /* Unkown instruction. */
101417 default:
101418- goto err;
101419+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
101420+ fp->code, fp->jt, fp->jf, fp->k);
101421+ kfree(addrs);
101422+ BUG();
101423+ return -EINVAL;
101424 }
101425
101426 insn++;
101427@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
101428 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
101429 int pc, ret = 0;
101430
101431- BUILD_BUG_ON(BPF_MEMWORDS > 16);
101432+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
101433
101434 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
101435 if (!masks)
101436@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
101437 if (!fp)
101438 return -ENOMEM;
101439
101440- memcpy(fp->insns, fprog->filter, fsize);
101441+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
101442
101443 fp->len = fprog->len;
101444 /* Since unattached filters are not copied back to user
101445diff --git a/net/core/flow.c b/net/core/flow.c
101446index 1033725..340f65d 100644
101447--- a/net/core/flow.c
101448+++ b/net/core/flow.c
101449@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
101450 static int flow_entry_valid(struct flow_cache_entry *fle,
101451 struct netns_xfrm *xfrm)
101452 {
101453- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
101454+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
101455 return 0;
101456 if (fle->object && !fle->object->ops->check(fle->object))
101457 return 0;
101458@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
101459 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
101460 fcp->hash_count++;
101461 }
101462- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
101463+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
101464 flo = fle->object;
101465 if (!flo)
101466 goto ret_object;
101467@@ -263,7 +263,7 @@ nocache:
101468 }
101469 flo = resolver(net, key, family, dir, flo, ctx);
101470 if (fle) {
101471- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
101472+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
101473 if (!IS_ERR(flo))
101474 fle->object = flo;
101475 else
101476diff --git a/net/core/neighbour.c b/net/core/neighbour.c
101477index 8d614c9..55752ea 100644
101478--- a/net/core/neighbour.c
101479+++ b/net/core/neighbour.c
101480@@ -2802,7 +2802,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
101481 void __user *buffer, size_t *lenp, loff_t *ppos)
101482 {
101483 int size, ret;
101484- struct ctl_table tmp = *ctl;
101485+ ctl_table_no_const tmp = *ctl;
101486
101487 tmp.extra1 = &zero;
101488 tmp.extra2 = &unres_qlen_max;
101489@@ -2864,7 +2864,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
101490 void __user *buffer,
101491 size_t *lenp, loff_t *ppos)
101492 {
101493- struct ctl_table tmp = *ctl;
101494+ ctl_table_no_const tmp = *ctl;
101495 int ret;
101496
101497 tmp.extra1 = &zero;
101498diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
101499index 2bf8329..2eb1423 100644
101500--- a/net/core/net-procfs.c
101501+++ b/net/core/net-procfs.c
101502@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
101503 struct rtnl_link_stats64 temp;
101504 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
101505
101506- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101507+ if (gr_proc_is_restricted())
101508+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101509+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101510+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
101511+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
101512+ else
101513+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101514 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101515 dev->name, stats->rx_bytes, stats->rx_packets,
101516 stats->rx_errors,
101517@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
101518 return 0;
101519 }
101520
101521-static const struct seq_operations dev_seq_ops = {
101522+const struct seq_operations dev_seq_ops = {
101523 .start = dev_seq_start,
101524 .next = dev_seq_next,
101525 .stop = dev_seq_stop,
101526@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
101527
101528 static int softnet_seq_open(struct inode *inode, struct file *file)
101529 {
101530- return seq_open(file, &softnet_seq_ops);
101531+ return seq_open_restrict(file, &softnet_seq_ops);
101532 }
101533
101534 static const struct file_operations softnet_seq_fops = {
101535@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
101536 else
101537 seq_printf(seq, "%04x", ntohs(pt->type));
101538
101539+#ifdef CONFIG_GRKERNSEC_HIDESYM
101540+ seq_printf(seq, " %-8s %pf\n",
101541+ pt->dev ? pt->dev->name : "", NULL);
101542+#else
101543 seq_printf(seq, " %-8s %pf\n",
101544 pt->dev ? pt->dev->name : "", pt->func);
101545+#endif
101546 }
101547
101548 return 0;
101549diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
101550index 9993412..2a4672b 100644
101551--- a/net/core/net-sysfs.c
101552+++ b/net/core/net-sysfs.c
101553@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
101554 {
101555 struct net_device *netdev = to_net_dev(dev);
101556 return sprintf(buf, fmt_dec,
101557- atomic_read(&netdev->carrier_changes));
101558+ atomic_read_unchecked(&netdev->carrier_changes));
101559 }
101560 static DEVICE_ATTR_RO(carrier_changes);
101561
101562diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
101563index ce780c7..6d296b3 100644
101564--- a/net/core/net_namespace.c
101565+++ b/net/core/net_namespace.c
101566@@ -448,7 +448,7 @@ static int __register_pernet_operations(struct list_head *list,
101567 int error;
101568 LIST_HEAD(net_exit_list);
101569
101570- list_add_tail(&ops->list, list);
101571+ pax_list_add_tail((struct list_head *)&ops->list, list);
101572 if (ops->init || (ops->id && ops->size)) {
101573 for_each_net(net) {
101574 error = ops_init(ops, net);
101575@@ -461,7 +461,7 @@ static int __register_pernet_operations(struct list_head *list,
101576
101577 out_undo:
101578 /* If I have an error cleanup all namespaces I initialized */
101579- list_del(&ops->list);
101580+ pax_list_del((struct list_head *)&ops->list);
101581 ops_exit_list(ops, &net_exit_list);
101582 ops_free_list(ops, &net_exit_list);
101583 return error;
101584@@ -472,7 +472,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
101585 struct net *net;
101586 LIST_HEAD(net_exit_list);
101587
101588- list_del(&ops->list);
101589+ pax_list_del((struct list_head *)&ops->list);
101590 for_each_net(net)
101591 list_add_tail(&net->exit_list, &net_exit_list);
101592 ops_exit_list(ops, &net_exit_list);
101593@@ -606,7 +606,7 @@ int register_pernet_device(struct pernet_operations *ops)
101594 mutex_lock(&net_mutex);
101595 error = register_pernet_operations(&pernet_list, ops);
101596 if (!error && (first_device == &pernet_list))
101597- first_device = &ops->list;
101598+ first_device = (struct list_head *)&ops->list;
101599 mutex_unlock(&net_mutex);
101600 return error;
101601 }
101602diff --git a/net/core/netpoll.c b/net/core/netpoll.c
101603index e0ad5d1..04fa7f7 100644
101604--- a/net/core/netpoll.c
101605+++ b/net/core/netpoll.c
101606@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101607 struct udphdr *udph;
101608 struct iphdr *iph;
101609 struct ethhdr *eth;
101610- static atomic_t ip_ident;
101611+ static atomic_unchecked_t ip_ident;
101612 struct ipv6hdr *ip6h;
101613
101614 udp_len = len + sizeof(*udph);
101615@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101616 put_unaligned(0x45, (unsigned char *)iph);
101617 iph->tos = 0;
101618 put_unaligned(htons(ip_len), &(iph->tot_len));
101619- iph->id = htons(atomic_inc_return(&ip_ident));
101620+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
101621 iph->frag_off = 0;
101622 iph->ttl = 64;
101623 iph->protocol = IPPROTO_UDP;
101624diff --git a/net/core/pktgen.c b/net/core/pktgen.c
101625index 352d183..1bddfaf 100644
101626--- a/net/core/pktgen.c
101627+++ b/net/core/pktgen.c
101628@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
101629 pn->net = net;
101630 INIT_LIST_HEAD(&pn->pktgen_threads);
101631 pn->pktgen_exiting = false;
101632- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
101633+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
101634 if (!pn->proc_dir) {
101635 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
101636 return -ENODEV;
101637diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
101638index 76ec6c5..9cfb81c 100644
101639--- a/net/core/rtnetlink.c
101640+++ b/net/core/rtnetlink.c
101641@@ -60,7 +60,7 @@ struct rtnl_link {
101642 rtnl_doit_func doit;
101643 rtnl_dumpit_func dumpit;
101644 rtnl_calcit_func calcit;
101645-};
101646+} __no_const;
101647
101648 static DEFINE_MUTEX(rtnl_mutex);
101649
101650@@ -306,10 +306,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
101651 * to use the ops for creating device. So do not
101652 * fill up dellink as well. That disables rtnl_dellink.
101653 */
101654- if (ops->setup && !ops->dellink)
101655- ops->dellink = unregister_netdevice_queue;
101656+ if (ops->setup && !ops->dellink) {
101657+ pax_open_kernel();
101658+ *(void **)&ops->dellink = unregister_netdevice_queue;
101659+ pax_close_kernel();
101660+ }
101661
101662- list_add_tail(&ops->list, &link_ops);
101663+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
101664 return 0;
101665 }
101666 EXPORT_SYMBOL_GPL(__rtnl_link_register);
101667@@ -356,7 +359,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
101668 for_each_net(net) {
101669 __rtnl_kill_links(net, ops);
101670 }
101671- list_del(&ops->list);
101672+ pax_list_del((struct list_head *)&ops->list);
101673 }
101674 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
101675
101676@@ -1035,7 +1038,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
101677 (dev->ifalias &&
101678 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
101679 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
101680- atomic_read(&dev->carrier_changes)))
101681+ atomic_read_unchecked(&dev->carrier_changes)))
101682 goto nla_put_failure;
101683
101684 if (1) {
101685@@ -2094,6 +2097,10 @@ replay:
101686 if (IS_ERR(dest_net))
101687 return PTR_ERR(dest_net);
101688
101689+ err = -EPERM;
101690+ if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
101691+ goto out;
101692+
101693 dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
101694 if (IS_ERR(dev)) {
101695 err = PTR_ERR(dev);
101696diff --git a/net/core/scm.c b/net/core/scm.c
101697index 3b6899b..cf36238 100644
101698--- a/net/core/scm.c
101699+++ b/net/core/scm.c
101700@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
101701 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101702 {
101703 struct cmsghdr __user *cm
101704- = (__force struct cmsghdr __user *)msg->msg_control;
101705+ = (struct cmsghdr __force_user *)msg->msg_control;
101706 struct cmsghdr cmhdr;
101707 int cmlen = CMSG_LEN(len);
101708 int err;
101709@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101710 err = -EFAULT;
101711 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
101712 goto out;
101713- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
101714+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
101715 goto out;
101716 cmlen = CMSG_SPACE(len);
101717 if (msg->msg_controllen < cmlen)
101718@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
101719 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101720 {
101721 struct cmsghdr __user *cm
101722- = (__force struct cmsghdr __user*)msg->msg_control;
101723+ = (struct cmsghdr __force_user *)msg->msg_control;
101724
101725 int fdmax = 0;
101726 int fdnum = scm->fp->count;
101727@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101728 if (fdnum < fdmax)
101729 fdmax = fdnum;
101730
101731- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
101732+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
101733 i++, cmfptr++)
101734 {
101735 struct socket *sock;
101736diff --git a/net/core/skbuff.c b/net/core/skbuff.c
101737index 62c67be..01893a0a 100644
101738--- a/net/core/skbuff.c
101739+++ b/net/core/skbuff.c
101740@@ -2123,7 +2123,7 @@ EXPORT_SYMBOL(__skb_checksum);
101741 __wsum skb_checksum(const struct sk_buff *skb, int offset,
101742 int len, __wsum csum)
101743 {
101744- const struct skb_checksum_ops ops = {
101745+ static const struct skb_checksum_ops ops = {
101746 .update = csum_partial_ext,
101747 .combine = csum_block_add_ext,
101748 };
101749@@ -3363,12 +3363,14 @@ void __init skb_init(void)
101750 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
101751 sizeof(struct sk_buff),
101752 0,
101753- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101754+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101755+ SLAB_NO_SANITIZE,
101756 NULL);
101757 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
101758 sizeof(struct sk_buff_fclones),
101759 0,
101760- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101761+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101762+ SLAB_NO_SANITIZE,
101763 NULL);
101764 }
101765
101766diff --git a/net/core/sock.c b/net/core/sock.c
101767index 1c7a33d..a3817e2 100644
101768--- a/net/core/sock.c
101769+++ b/net/core/sock.c
101770@@ -441,7 +441,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101771 struct sk_buff_head *list = &sk->sk_receive_queue;
101772
101773 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
101774- atomic_inc(&sk->sk_drops);
101775+ atomic_inc_unchecked(&sk->sk_drops);
101776 trace_sock_rcvqueue_full(sk, skb);
101777 return -ENOMEM;
101778 }
101779@@ -451,7 +451,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101780 return err;
101781
101782 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
101783- atomic_inc(&sk->sk_drops);
101784+ atomic_inc_unchecked(&sk->sk_drops);
101785 return -ENOBUFS;
101786 }
101787
101788@@ -464,7 +464,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101789 skb_dst_force(skb);
101790
101791 spin_lock_irqsave(&list->lock, flags);
101792- skb->dropcount = atomic_read(&sk->sk_drops);
101793+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
101794 __skb_queue_tail(list, skb);
101795 spin_unlock_irqrestore(&list->lock, flags);
101796
101797@@ -484,7 +484,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101798 skb->dev = NULL;
101799
101800 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
101801- atomic_inc(&sk->sk_drops);
101802+ atomic_inc_unchecked(&sk->sk_drops);
101803 goto discard_and_relse;
101804 }
101805 if (nested)
101806@@ -502,7 +502,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101807 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
101808 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
101809 bh_unlock_sock(sk);
101810- atomic_inc(&sk->sk_drops);
101811+ atomic_inc_unchecked(&sk->sk_drops);
101812 goto discard_and_relse;
101813 }
101814
101815@@ -888,6 +888,7 @@ set_rcvbuf:
101816 }
101817 break;
101818
101819+#ifndef GRKERNSEC_BPF_HARDEN
101820 case SO_ATTACH_BPF:
101821 ret = -EINVAL;
101822 if (optlen == sizeof(u32)) {
101823@@ -900,7 +901,7 @@ set_rcvbuf:
101824 ret = sk_attach_bpf(ufd, sk);
101825 }
101826 break;
101827-
101828+#endif
101829 case SO_DETACH_FILTER:
101830 ret = sk_detach_filter(sk);
101831 break;
101832@@ -1004,12 +1005,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101833 struct timeval tm;
101834 } v;
101835
101836- int lv = sizeof(int);
101837- int len;
101838+ unsigned int lv = sizeof(int);
101839+ unsigned int len;
101840
101841 if (get_user(len, optlen))
101842 return -EFAULT;
101843- if (len < 0)
101844+ if (len > INT_MAX)
101845 return -EINVAL;
101846
101847 memset(&v, 0, sizeof(v));
101848@@ -1147,11 +1148,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101849
101850 case SO_PEERNAME:
101851 {
101852- char address[128];
101853+ char address[_K_SS_MAXSIZE];
101854
101855 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
101856 return -ENOTCONN;
101857- if (lv < len)
101858+ if (lv < len || sizeof address < len)
101859 return -EINVAL;
101860 if (copy_to_user(optval, address, len))
101861 return -EFAULT;
101862@@ -1236,7 +1237,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101863
101864 if (len > lv)
101865 len = lv;
101866- if (copy_to_user(optval, &v, len))
101867+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
101868 return -EFAULT;
101869 lenout:
101870 if (put_user(len, optlen))
101871@@ -2349,7 +2350,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
101872 */
101873 smp_wmb();
101874 atomic_set(&sk->sk_refcnt, 1);
101875- atomic_set(&sk->sk_drops, 0);
101876+ atomic_set_unchecked(&sk->sk_drops, 0);
101877 }
101878 EXPORT_SYMBOL(sock_init_data);
101879
101880@@ -2477,6 +2478,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
101881 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
101882 int level, int type)
101883 {
101884+ struct sock_extended_err ee;
101885 struct sock_exterr_skb *serr;
101886 struct sk_buff *skb;
101887 int copied, err;
101888@@ -2498,7 +2500,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
101889 sock_recv_timestamp(msg, sk, skb);
101890
101891 serr = SKB_EXT_ERR(skb);
101892- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
101893+ ee = serr->ee;
101894+ put_cmsg(msg, level, type, sizeof ee, &ee);
101895
101896 msg->msg_flags |= MSG_ERRQUEUE;
101897 err = copied;
101898diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
101899index ad704c7..ca48aff 100644
101900--- a/net/core/sock_diag.c
101901+++ b/net/core/sock_diag.c
101902@@ -9,26 +9,33 @@
101903 #include <linux/inet_diag.h>
101904 #include <linux/sock_diag.h>
101905
101906-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
101907+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
101908 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
101909 static DEFINE_MUTEX(sock_diag_table_mutex);
101910
101911 int sock_diag_check_cookie(void *sk, __u32 *cookie)
101912 {
101913+#ifndef CONFIG_GRKERNSEC_HIDESYM
101914 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
101915 cookie[1] != INET_DIAG_NOCOOKIE) &&
101916 ((u32)(unsigned long)sk != cookie[0] ||
101917 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
101918 return -ESTALE;
101919 else
101920+#endif
101921 return 0;
101922 }
101923 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
101924
101925 void sock_diag_save_cookie(void *sk, __u32 *cookie)
101926 {
101927+#ifdef CONFIG_GRKERNSEC_HIDESYM
101928+ cookie[0] = 0;
101929+ cookie[1] = 0;
101930+#else
101931 cookie[0] = (u32)(unsigned long)sk;
101932 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
101933+#endif
101934 }
101935 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
101936
101937@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
101938 mutex_lock(&sock_diag_table_mutex);
101939 if (sock_diag_handlers[hndl->family])
101940 err = -EBUSY;
101941- else
101942+ else {
101943+ pax_open_kernel();
101944 sock_diag_handlers[hndl->family] = hndl;
101945+ pax_close_kernel();
101946+ }
101947 mutex_unlock(&sock_diag_table_mutex);
101948
101949 return err;
101950@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
101951
101952 mutex_lock(&sock_diag_table_mutex);
101953 BUG_ON(sock_diag_handlers[family] != hnld);
101954+ pax_open_kernel();
101955 sock_diag_handlers[family] = NULL;
101956+ pax_close_kernel();
101957 mutex_unlock(&sock_diag_table_mutex);
101958 }
101959 EXPORT_SYMBOL_GPL(sock_diag_unregister);
101960diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
101961index 31baba2..754e2e5 100644
101962--- a/net/core/sysctl_net_core.c
101963+++ b/net/core/sysctl_net_core.c
101964@@ -25,6 +25,8 @@
101965 static int zero = 0;
101966 static int one = 1;
101967 static int ushort_max = USHRT_MAX;
101968+static int min_sndbuf = SOCK_MIN_SNDBUF;
101969+static int min_rcvbuf = SOCK_MIN_RCVBUF;
101970
101971 static int net_msg_warn; /* Unused, but still a sysctl */
101972
101973@@ -34,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
101974 {
101975 unsigned int orig_size, size;
101976 int ret, i;
101977- struct ctl_table tmp = {
101978+ ctl_table_no_const tmp = {
101979 .data = &size,
101980 .maxlen = sizeof(size),
101981 .mode = table->mode
101982@@ -202,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
101983 void __user *buffer, size_t *lenp, loff_t *ppos)
101984 {
101985 char id[IFNAMSIZ];
101986- struct ctl_table tbl = {
101987+ ctl_table_no_const tbl = {
101988 .data = id,
101989 .maxlen = IFNAMSIZ,
101990 };
101991@@ -220,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
101992 static int proc_do_rss_key(struct ctl_table *table, int write,
101993 void __user *buffer, size_t *lenp, loff_t *ppos)
101994 {
101995- struct ctl_table fake_table;
101996+ ctl_table_no_const fake_table;
101997 char buf[NETDEV_RSS_KEY_LEN * 3];
101998
101999 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
102000@@ -237,7 +239,7 @@ static struct ctl_table net_core_table[] = {
102001 .maxlen = sizeof(int),
102002 .mode = 0644,
102003 .proc_handler = proc_dointvec_minmax,
102004- .extra1 = &one,
102005+ .extra1 = &min_sndbuf,
102006 },
102007 {
102008 .procname = "rmem_max",
102009@@ -245,7 +247,7 @@ static struct ctl_table net_core_table[] = {
102010 .maxlen = sizeof(int),
102011 .mode = 0644,
102012 .proc_handler = proc_dointvec_minmax,
102013- .extra1 = &one,
102014+ .extra1 = &min_rcvbuf,
102015 },
102016 {
102017 .procname = "wmem_default",
102018@@ -253,7 +255,7 @@ static struct ctl_table net_core_table[] = {
102019 .maxlen = sizeof(int),
102020 .mode = 0644,
102021 .proc_handler = proc_dointvec_minmax,
102022- .extra1 = &one,
102023+ .extra1 = &min_sndbuf,
102024 },
102025 {
102026 .procname = "rmem_default",
102027@@ -261,7 +263,7 @@ static struct ctl_table net_core_table[] = {
102028 .maxlen = sizeof(int),
102029 .mode = 0644,
102030 .proc_handler = proc_dointvec_minmax,
102031- .extra1 = &one,
102032+ .extra1 = &min_rcvbuf,
102033 },
102034 {
102035 .procname = "dev_weight",
102036@@ -284,7 +286,7 @@ static struct ctl_table net_core_table[] = {
102037 .mode = 0444,
102038 .proc_handler = proc_do_rss_key,
102039 },
102040-#ifdef CONFIG_BPF_JIT
102041+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102042 {
102043 .procname = "bpf_jit_enable",
102044 .data = &bpf_jit_enable,
102045@@ -400,13 +402,12 @@ static struct ctl_table netns_core_table[] = {
102046
102047 static __net_init int sysctl_core_net_init(struct net *net)
102048 {
102049- struct ctl_table *tbl;
102050+ ctl_table_no_const *tbl = NULL;
102051
102052 net->core.sysctl_somaxconn = SOMAXCONN;
102053
102054- tbl = netns_core_table;
102055 if (!net_eq(net, &init_net)) {
102056- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102057+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102058 if (tbl == NULL)
102059 goto err_dup;
102060
102061@@ -416,17 +417,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102062 if (net->user_ns != &init_user_ns) {
102063 tbl[0].procname = NULL;
102064 }
102065- }
102066-
102067- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102068+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102069+ } else
102070+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102071 if (net->core.sysctl_hdr == NULL)
102072 goto err_reg;
102073
102074 return 0;
102075
102076 err_reg:
102077- if (tbl != netns_core_table)
102078- kfree(tbl);
102079+ kfree(tbl);
102080 err_dup:
102081 return -ENOMEM;
102082 }
102083@@ -441,7 +441,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102084 kfree(tbl);
102085 }
102086
102087-static __net_initdata struct pernet_operations sysctl_core_ops = {
102088+static __net_initconst struct pernet_operations sysctl_core_ops = {
102089 .init = sysctl_core_net_init,
102090 .exit = sysctl_core_net_exit,
102091 };
102092diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102093index 8102286..a0c2755 100644
102094--- a/net/decnet/af_decnet.c
102095+++ b/net/decnet/af_decnet.c
102096@@ -466,6 +466,7 @@ static struct proto dn_proto = {
102097 .sysctl_rmem = sysctl_decnet_rmem,
102098 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102099 .obj_size = sizeof(struct dn_sock),
102100+ .slab_flags = SLAB_USERCOPY,
102101 };
102102
102103 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102104diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102105index 4400da7..3429972 100644
102106--- a/net/decnet/dn_dev.c
102107+++ b/net/decnet/dn_dev.c
102108@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
102109 .extra1 = &min_t3,
102110 .extra2 = &max_t3
102111 },
102112- {0}
102113+ { }
102114 },
102115 };
102116
102117diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102118index 5325b54..a0d4d69 100644
102119--- a/net/decnet/sysctl_net_decnet.c
102120+++ b/net/decnet/sysctl_net_decnet.c
102121@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102122
102123 if (len > *lenp) len = *lenp;
102124
102125- if (copy_to_user(buffer, addr, len))
102126+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102127 return -EFAULT;
102128
102129 *lenp = len;
102130@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102131
102132 if (len > *lenp) len = *lenp;
102133
102134- if (copy_to_user(buffer, devname, len))
102135+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102136 return -EFAULT;
102137
102138 *lenp = len;
102139diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
102140index a2c7e4c..3dc9f67 100644
102141--- a/net/hsr/hsr_netlink.c
102142+++ b/net/hsr/hsr_netlink.c
102143@@ -102,7 +102,7 @@ nla_put_failure:
102144 return -EMSGSIZE;
102145 }
102146
102147-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
102148+static struct rtnl_link_ops hsr_link_ops = {
102149 .kind = "hsr",
102150 .maxtype = IFLA_HSR_MAX,
102151 .policy = hsr_policy,
102152diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
102153index 27eaa65..7083217 100644
102154--- a/net/ieee802154/6lowpan_rtnl.c
102155+++ b/net/ieee802154/6lowpan_rtnl.c
102156@@ -642,7 +642,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
102157 dev_put(real_dev);
102158 }
102159
102160-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
102161+static struct rtnl_link_ops lowpan_link_ops = {
102162 .kind = "lowpan",
102163 .priv_size = sizeof(struct lowpan_dev_info),
102164 .setup = lowpan_setup,
102165diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
102166index 9d980ed..7d01e12 100644
102167--- a/net/ieee802154/reassembly.c
102168+++ b/net/ieee802154/reassembly.c
102169@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102170
102171 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102172 {
102173- struct ctl_table *table;
102174+ ctl_table_no_const *table = NULL;
102175 struct ctl_table_header *hdr;
102176 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102177 net_ieee802154_lowpan(net);
102178
102179- table = lowpan_frags_ns_ctl_table;
102180 if (!net_eq(net, &init_net)) {
102181- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102182+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102183 GFP_KERNEL);
102184 if (table == NULL)
102185 goto err_alloc;
102186@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102187 /* Don't export sysctls to unprivileged users */
102188 if (net->user_ns != &init_user_ns)
102189 table[0].procname = NULL;
102190- }
102191-
102192- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102193+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102194+ } else
102195+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102196 if (hdr == NULL)
102197 goto err_reg;
102198
102199@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102200 return 0;
102201
102202 err_reg:
102203- if (!net_eq(net, &init_net))
102204- kfree(table);
102205+ kfree(table);
102206 err_alloc:
102207 return -ENOMEM;
102208 }
102209diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
102210index a44773c..a6ae415 100644
102211--- a/net/ipv4/af_inet.c
102212+++ b/net/ipv4/af_inet.c
102213@@ -1392,7 +1392,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
102214 return ip_recv_error(sk, msg, len, addr_len);
102215 #if IS_ENABLED(CONFIG_IPV6)
102216 if (sk->sk_family == AF_INET6)
102217- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
102218+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
102219 #endif
102220 return -EINVAL;
102221 }
102222diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
102223index 214882e..ec032f6 100644
102224--- a/net/ipv4/devinet.c
102225+++ b/net/ipv4/devinet.c
102226@@ -69,7 +69,8 @@
102227
102228 static struct ipv4_devconf ipv4_devconf = {
102229 .data = {
102230- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102231+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102232+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102233 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102234 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102235 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102236@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
102237
102238 static struct ipv4_devconf ipv4_devconf_dflt = {
102239 .data = {
102240- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102241+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102242+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102243 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102244 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102245 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102246@@ -1548,7 +1550,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
102247 idx = 0;
102248 head = &net->dev_index_head[h];
102249 rcu_read_lock();
102250- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102251+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102252 net->dev_base_seq;
102253 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102254 if (idx < s_idx)
102255@@ -1866,7 +1868,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
102256 idx = 0;
102257 head = &net->dev_index_head[h];
102258 rcu_read_lock();
102259- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102260+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102261 net->dev_base_seq;
102262 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102263 if (idx < s_idx)
102264@@ -2101,7 +2103,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
102265 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
102266 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
102267
102268-static struct devinet_sysctl_table {
102269+static const struct devinet_sysctl_table {
102270 struct ctl_table_header *sysctl_header;
102271 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
102272 } devinet_sysctl = {
102273@@ -2233,7 +2235,7 @@ static __net_init int devinet_init_net(struct net *net)
102274 int err;
102275 struct ipv4_devconf *all, *dflt;
102276 #ifdef CONFIG_SYSCTL
102277- struct ctl_table *tbl = ctl_forward_entry;
102278+ ctl_table_no_const *tbl = NULL;
102279 struct ctl_table_header *forw_hdr;
102280 #endif
102281
102282@@ -2251,7 +2253,7 @@ static __net_init int devinet_init_net(struct net *net)
102283 goto err_alloc_dflt;
102284
102285 #ifdef CONFIG_SYSCTL
102286- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
102287+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
102288 if (tbl == NULL)
102289 goto err_alloc_ctl;
102290
102291@@ -2271,7 +2273,10 @@ static __net_init int devinet_init_net(struct net *net)
102292 goto err_reg_dflt;
102293
102294 err = -ENOMEM;
102295- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102296+ if (!net_eq(net, &init_net))
102297+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102298+ else
102299+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
102300 if (forw_hdr == NULL)
102301 goto err_reg_ctl;
102302 net->ipv4.forw_hdr = forw_hdr;
102303@@ -2287,8 +2292,7 @@ err_reg_ctl:
102304 err_reg_dflt:
102305 __devinet_sysctl_unregister(all);
102306 err_reg_all:
102307- if (tbl != ctl_forward_entry)
102308- kfree(tbl);
102309+ kfree(tbl);
102310 err_alloc_ctl:
102311 #endif
102312 if (dflt != &ipv4_devconf_dflt)
102313diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
102314index 23104a3..9f5570b 100644
102315--- a/net/ipv4/fib_frontend.c
102316+++ b/net/ipv4/fib_frontend.c
102317@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
102318 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102319 fib_sync_up(dev);
102320 #endif
102321- atomic_inc(&net->ipv4.dev_addr_genid);
102322+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102323 rt_cache_flush(dev_net(dev));
102324 break;
102325 case NETDEV_DOWN:
102326 fib_del_ifaddr(ifa, NULL);
102327- atomic_inc(&net->ipv4.dev_addr_genid);
102328+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102329 if (ifa->ifa_dev->ifa_list == NULL) {
102330 /* Last address was deleted from this interface.
102331 * Disable IP.
102332@@ -1060,7 +1060,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
102333 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102334 fib_sync_up(dev);
102335 #endif
102336- atomic_inc(&net->ipv4.dev_addr_genid);
102337+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102338 rt_cache_flush(net);
102339 break;
102340 case NETDEV_DOWN:
102341diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
102342index f99f41b..1879da9 100644
102343--- a/net/ipv4/fib_semantics.c
102344+++ b/net/ipv4/fib_semantics.c
102345@@ -770,7 +770,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
102346 nh->nh_saddr = inet_select_addr(nh->nh_dev,
102347 nh->nh_gw,
102348 nh->nh_parent->fib_scope);
102349- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
102350+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
102351
102352 return nh->nh_saddr;
102353 }
102354diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
102355index e34dccb..4eeba4e 100644
102356--- a/net/ipv4/inet_diag.c
102357+++ b/net/ipv4/inet_diag.c
102358@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
102359 mutex_unlock(&inet_diag_table_mutex);
102360 }
102361
102362+static size_t inet_sk_attr_size(void)
102363+{
102364+ return nla_total_size(sizeof(struct tcp_info))
102365+ + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
102366+ + nla_total_size(1) /* INET_DIAG_TOS */
102367+ + nla_total_size(1) /* INET_DIAG_TCLASS */
102368+ + nla_total_size(sizeof(struct inet_diag_meminfo))
102369+ + nla_total_size(sizeof(struct inet_diag_msg))
102370+ + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
102371+ + nla_total_size(TCP_CA_NAME_MAX)
102372+ + nla_total_size(sizeof(struct tcpvegas_info))
102373+ + 64;
102374+}
102375+
102376 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
102377 struct sk_buff *skb, struct inet_diag_req_v2 *req,
102378 struct user_namespace *user_ns,
102379@@ -324,9 +338,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
102380 if (err)
102381 goto out;
102382
102383- rep = nlmsg_new(sizeof(struct inet_diag_msg) +
102384- sizeof(struct inet_diag_meminfo) +
102385- sizeof(struct tcp_info) + 64, GFP_KERNEL);
102386+ rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
102387 if (!rep) {
102388 err = -ENOMEM;
102389 goto out;
102390diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
102391index 9111a4e..3576905 100644
102392--- a/net/ipv4/inet_hashtables.c
102393+++ b/net/ipv4/inet_hashtables.c
102394@@ -18,6 +18,7 @@
102395 #include <linux/sched.h>
102396 #include <linux/slab.h>
102397 #include <linux/wait.h>
102398+#include <linux/security.h>
102399
102400 #include <net/inet_connection_sock.h>
102401 #include <net/inet_hashtables.h>
102402@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
102403 return inet_ehashfn(net, laddr, lport, faddr, fport);
102404 }
102405
102406+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
102407+
102408 /*
102409 * Allocate and initialize a new local port bind bucket.
102410 * The bindhash mutex for snum's hash chain must be held here.
102411@@ -554,6 +557,8 @@ ok:
102412 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
102413 spin_unlock(&head->lock);
102414
102415+ gr_update_task_in_ip_table(inet_sk(sk));
102416+
102417 if (tw) {
102418 inet_twsk_deschedule(tw, death_row);
102419 while (twrefcnt) {
102420diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
102421index 241afd7..31b95d5 100644
102422--- a/net/ipv4/inetpeer.c
102423+++ b/net/ipv4/inetpeer.c
102424@@ -461,7 +461,7 @@ relookup:
102425 if (p) {
102426 p->daddr = *daddr;
102427 atomic_set(&p->refcnt, 1);
102428- atomic_set(&p->rid, 0);
102429+ atomic_set_unchecked(&p->rid, 0);
102430 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
102431 p->rate_tokens = 0;
102432 /* 60*HZ is arbitrary, but chosen enough high so that the first
102433diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
102434index 145a50c..5dd8cc5 100644
102435--- a/net/ipv4/ip_fragment.c
102436+++ b/net/ipv4/ip_fragment.c
102437@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
102438 return 0;
102439
102440 start = qp->rid;
102441- end = atomic_inc_return(&peer->rid);
102442+ end = atomic_inc_return_unchecked(&peer->rid);
102443 qp->rid = end;
102444
102445 rc = qp->q.fragments && (end - start) > max;
102446@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
102447
102448 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102449 {
102450- struct ctl_table *table;
102451+ ctl_table_no_const *table = NULL;
102452 struct ctl_table_header *hdr;
102453
102454- table = ip4_frags_ns_ctl_table;
102455 if (!net_eq(net, &init_net)) {
102456- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102457+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102458 if (table == NULL)
102459 goto err_alloc;
102460
102461@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102462 /* Don't export sysctls to unprivileged users */
102463 if (net->user_ns != &init_user_ns)
102464 table[0].procname = NULL;
102465- }
102466+ hdr = register_net_sysctl(net, "net/ipv4", table);
102467+ } else
102468+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
102469
102470- hdr = register_net_sysctl(net, "net/ipv4", table);
102471 if (hdr == NULL)
102472 goto err_reg;
102473
102474@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102475 return 0;
102476
102477 err_reg:
102478- if (!net_eq(net, &init_net))
102479- kfree(table);
102480+ kfree(table);
102481 err_alloc:
102482 return -ENOMEM;
102483 }
102484diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
102485index 4f4bf5b..2c936fe 100644
102486--- a/net/ipv4/ip_gre.c
102487+++ b/net/ipv4/ip_gre.c
102488@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
102489 module_param(log_ecn_error, bool, 0644);
102490 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102491
102492-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
102493+static struct rtnl_link_ops ipgre_link_ops;
102494 static int ipgre_tunnel_init(struct net_device *dev);
102495
102496 static int ipgre_net_id __read_mostly;
102497@@ -816,7 +816,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
102498 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
102499 };
102500
102501-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102502+static struct rtnl_link_ops ipgre_link_ops = {
102503 .kind = "gre",
102504 .maxtype = IFLA_GRE_MAX,
102505 .policy = ipgre_policy,
102506@@ -830,7 +830,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102507 .fill_info = ipgre_fill_info,
102508 };
102509
102510-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
102511+static struct rtnl_link_ops ipgre_tap_ops = {
102512 .kind = "gretap",
102513 .maxtype = IFLA_GRE_MAX,
102514 .policy = ipgre_policy,
102515diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
102516index 3d4da2c..40f9c29 100644
102517--- a/net/ipv4/ip_input.c
102518+++ b/net/ipv4/ip_input.c
102519@@ -147,6 +147,10 @@
102520 #include <linux/mroute.h>
102521 #include <linux/netlink.h>
102522
102523+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102524+extern int grsec_enable_blackhole;
102525+#endif
102526+
102527 /*
102528 * Process Router Attention IP option (RFC 2113)
102529 */
102530@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
102531 if (!raw) {
102532 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
102533 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
102534+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102535+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
102536+#endif
102537 icmp_send(skb, ICMP_DEST_UNREACH,
102538 ICMP_PROT_UNREACH, 0);
102539 }
102540diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
102541index 6b85adb..cd7e5d3 100644
102542--- a/net/ipv4/ip_sockglue.c
102543+++ b/net/ipv4/ip_sockglue.c
102544@@ -1193,7 +1193,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102545 len = min_t(unsigned int, len, opt->optlen);
102546 if (put_user(len, optlen))
102547 return -EFAULT;
102548- if (copy_to_user(optval, opt->__data, len))
102549+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
102550+ copy_to_user(optval, opt->__data, len))
102551 return -EFAULT;
102552 return 0;
102553 }
102554@@ -1324,7 +1325,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102555 if (sk->sk_type != SOCK_STREAM)
102556 return -ENOPROTOOPT;
102557
102558- msg.msg_control = (__force void *) optval;
102559+ msg.msg_control = (__force_kernel void *) optval;
102560 msg.msg_controllen = len;
102561 msg.msg_flags = flags;
102562
102563diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
102564index 1a7e979..fd05aa4 100644
102565--- a/net/ipv4/ip_vti.c
102566+++ b/net/ipv4/ip_vti.c
102567@@ -45,7 +45,7 @@
102568 #include <net/net_namespace.h>
102569 #include <net/netns/generic.h>
102570
102571-static struct rtnl_link_ops vti_link_ops __read_mostly;
102572+static struct rtnl_link_ops vti_link_ops;
102573
102574 static int vti_net_id __read_mostly;
102575 static int vti_tunnel_init(struct net_device *dev);
102576@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
102577 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
102578 };
102579
102580-static struct rtnl_link_ops vti_link_ops __read_mostly = {
102581+static struct rtnl_link_ops vti_link_ops = {
102582 .kind = "vti",
102583 .maxtype = IFLA_VTI_MAX,
102584 .policy = vti_policy,
102585diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
102586index 7fa18bc..bea16af 100644
102587--- a/net/ipv4/ipconfig.c
102588+++ b/net/ipv4/ipconfig.c
102589@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
102590
102591 mm_segment_t oldfs = get_fs();
102592 set_fs(get_ds());
102593- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102594+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102595 set_fs(oldfs);
102596 return res;
102597 }
102598@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
102599
102600 mm_segment_t oldfs = get_fs();
102601 set_fs(get_ds());
102602- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102603+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102604 set_fs(oldfs);
102605 return res;
102606 }
102607@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
102608
102609 mm_segment_t oldfs = get_fs();
102610 set_fs(get_ds());
102611- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
102612+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
102613 set_fs(oldfs);
102614 return res;
102615 }
102616diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
102617index 40403114..c35c647 100644
102618--- a/net/ipv4/ipip.c
102619+++ b/net/ipv4/ipip.c
102620@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102621 static int ipip_net_id __read_mostly;
102622
102623 static int ipip_tunnel_init(struct net_device *dev);
102624-static struct rtnl_link_ops ipip_link_ops __read_mostly;
102625+static struct rtnl_link_ops ipip_link_ops;
102626
102627 static int ipip_err(struct sk_buff *skb, u32 info)
102628 {
102629@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
102630 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
102631 };
102632
102633-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
102634+static struct rtnl_link_ops ipip_link_ops = {
102635 .kind = "ipip",
102636 .maxtype = IFLA_IPTUN_MAX,
102637 .policy = ipip_policy,
102638diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
102639index f95b6f9..2ee2097 100644
102640--- a/net/ipv4/netfilter/arp_tables.c
102641+++ b/net/ipv4/netfilter/arp_tables.c
102642@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
102643 #endif
102644
102645 static int get_info(struct net *net, void __user *user,
102646- const int *len, int compat)
102647+ int len, int compat)
102648 {
102649 char name[XT_TABLE_MAXNAMELEN];
102650 struct xt_table *t;
102651 int ret;
102652
102653- if (*len != sizeof(struct arpt_getinfo)) {
102654- duprintf("length %u != %Zu\n", *len,
102655+ if (len != sizeof(struct arpt_getinfo)) {
102656+ duprintf("length %u != %Zu\n", len,
102657 sizeof(struct arpt_getinfo));
102658 return -EINVAL;
102659 }
102660@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
102661 info.size = private->size;
102662 strcpy(info.name, name);
102663
102664- if (copy_to_user(user, &info, *len) != 0)
102665+ if (copy_to_user(user, &info, len) != 0)
102666 ret = -EFAULT;
102667 else
102668 ret = 0;
102669@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
102670
102671 switch (cmd) {
102672 case ARPT_SO_GET_INFO:
102673- ret = get_info(sock_net(sk), user, len, 1);
102674+ ret = get_info(sock_net(sk), user, *len, 1);
102675 break;
102676 case ARPT_SO_GET_ENTRIES:
102677 ret = compat_get_entries(sock_net(sk), user, len);
102678@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
102679
102680 switch (cmd) {
102681 case ARPT_SO_GET_INFO:
102682- ret = get_info(sock_net(sk), user, len, 0);
102683+ ret = get_info(sock_net(sk), user, *len, 0);
102684 break;
102685
102686 case ARPT_SO_GET_ENTRIES:
102687diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
102688index 99e810f..3711b81 100644
102689--- a/net/ipv4/netfilter/ip_tables.c
102690+++ b/net/ipv4/netfilter/ip_tables.c
102691@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
102692 #endif
102693
102694 static int get_info(struct net *net, void __user *user,
102695- const int *len, int compat)
102696+ int len, int compat)
102697 {
102698 char name[XT_TABLE_MAXNAMELEN];
102699 struct xt_table *t;
102700 int ret;
102701
102702- if (*len != sizeof(struct ipt_getinfo)) {
102703- duprintf("length %u != %zu\n", *len,
102704+ if (len != sizeof(struct ipt_getinfo)) {
102705+ duprintf("length %u != %zu\n", len,
102706 sizeof(struct ipt_getinfo));
102707 return -EINVAL;
102708 }
102709@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
102710 info.size = private->size;
102711 strcpy(info.name, name);
102712
102713- if (copy_to_user(user, &info, *len) != 0)
102714+ if (copy_to_user(user, &info, len) != 0)
102715 ret = -EFAULT;
102716 else
102717 ret = 0;
102718@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102719
102720 switch (cmd) {
102721 case IPT_SO_GET_INFO:
102722- ret = get_info(sock_net(sk), user, len, 1);
102723+ ret = get_info(sock_net(sk), user, *len, 1);
102724 break;
102725 case IPT_SO_GET_ENTRIES:
102726 ret = compat_get_entries(sock_net(sk), user, len);
102727@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102728
102729 switch (cmd) {
102730 case IPT_SO_GET_INFO:
102731- ret = get_info(sock_net(sk), user, len, 0);
102732+ ret = get_info(sock_net(sk), user, *len, 0);
102733 break;
102734
102735 case IPT_SO_GET_ENTRIES:
102736diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102737index e90f83a..3e6acca 100644
102738--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
102739+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102740@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
102741 spin_lock_init(&cn->lock);
102742
102743 #ifdef CONFIG_PROC_FS
102744- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
102745+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
102746 if (!cn->procdir) {
102747 pr_err("Unable to proc dir entry\n");
102748 return -ENOMEM;
102749diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
102750index 0ae28f5..d32b565 100644
102751--- a/net/ipv4/ping.c
102752+++ b/net/ipv4/ping.c
102753@@ -59,7 +59,7 @@ struct ping_table {
102754 };
102755
102756 static struct ping_table ping_table;
102757-struct pingv6_ops pingv6_ops;
102758+struct pingv6_ops *pingv6_ops;
102759 EXPORT_SYMBOL_GPL(pingv6_ops);
102760
102761 static u16 ping_port_rover;
102762@@ -358,7 +358,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102763 return -ENODEV;
102764 }
102765 }
102766- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
102767+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
102768 scoped);
102769 rcu_read_unlock();
102770
102771@@ -566,7 +566,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102772 }
102773 #if IS_ENABLED(CONFIG_IPV6)
102774 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102775- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
102776+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
102777 #endif
102778 }
102779
102780@@ -584,7 +584,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102781 info, (u8 *)icmph);
102782 #if IS_ENABLED(CONFIG_IPV6)
102783 } else if (family == AF_INET6) {
102784- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
102785+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
102786 info, (u8 *)icmph);
102787 #endif
102788 }
102789@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102790 }
102791
102792 if (inet6_sk(sk)->rxopt.all)
102793- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
102794+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
102795 if (skb->protocol == htons(ETH_P_IPV6) &&
102796 inet6_sk(sk)->rxopt.all)
102797- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
102798+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
102799 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
102800 ip_cmsg_recv(msg, skb);
102801 #endif
102802@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
102803 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
102804 0, sock_i_ino(sp),
102805 atomic_read(&sp->sk_refcnt), sp,
102806- atomic_read(&sp->sk_drops));
102807+ atomic_read_unchecked(&sp->sk_drops));
102808 }
102809
102810 static int ping_v4_seq_show(struct seq_file *seq, void *v)
102811diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
102812index 0bb68df..59405fc 100644
102813--- a/net/ipv4/raw.c
102814+++ b/net/ipv4/raw.c
102815@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
102816 int raw_rcv(struct sock *sk, struct sk_buff *skb)
102817 {
102818 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
102819- atomic_inc(&sk->sk_drops);
102820+ atomic_inc_unchecked(&sk->sk_drops);
102821 kfree_skb(skb);
102822 return NET_RX_DROP;
102823 }
102824@@ -774,16 +774,20 @@ static int raw_init(struct sock *sk)
102825
102826 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
102827 {
102828+ struct icmp_filter filter;
102829+
102830 if (optlen > sizeof(struct icmp_filter))
102831 optlen = sizeof(struct icmp_filter);
102832- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
102833+ if (copy_from_user(&filter, optval, optlen))
102834 return -EFAULT;
102835+ raw_sk(sk)->filter = filter;
102836 return 0;
102837 }
102838
102839 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
102840 {
102841 int len, ret = -EFAULT;
102842+ struct icmp_filter filter;
102843
102844 if (get_user(len, optlen))
102845 goto out;
102846@@ -793,8 +797,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
102847 if (len > sizeof(struct icmp_filter))
102848 len = sizeof(struct icmp_filter);
102849 ret = -EFAULT;
102850- if (put_user(len, optlen) ||
102851- copy_to_user(optval, &raw_sk(sk)->filter, len))
102852+ filter = raw_sk(sk)->filter;
102853+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
102854 goto out;
102855 ret = 0;
102856 out: return ret;
102857@@ -1023,7 +1027,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
102858 0, 0L, 0,
102859 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
102860 0, sock_i_ino(sp),
102861- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
102862+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
102863 }
102864
102865 static int raw_seq_show(struct seq_file *seq, void *v)
102866diff --git a/net/ipv4/route.c b/net/ipv4/route.c
102867index 52e1f2b..e736cb4 100644
102868--- a/net/ipv4/route.c
102869+++ b/net/ipv4/route.c
102870@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
102871
102872 static int rt_cache_seq_open(struct inode *inode, struct file *file)
102873 {
102874- return seq_open(file, &rt_cache_seq_ops);
102875+ return seq_open_restrict(file, &rt_cache_seq_ops);
102876 }
102877
102878 static const struct file_operations rt_cache_seq_fops = {
102879@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
102880
102881 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
102882 {
102883- return seq_open(file, &rt_cpu_seq_ops);
102884+ return seq_open_restrict(file, &rt_cpu_seq_ops);
102885 }
102886
102887 static const struct file_operations rt_cpu_seq_fops = {
102888@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
102889
102890 static int rt_acct_proc_open(struct inode *inode, struct file *file)
102891 {
102892- return single_open(file, rt_acct_proc_show, NULL);
102893+ return single_open_restrict(file, rt_acct_proc_show, NULL);
102894 }
102895
102896 static const struct file_operations rt_acct_proc_fops = {
102897@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
102898
102899 #define IP_IDENTS_SZ 2048u
102900 struct ip_ident_bucket {
102901- atomic_t id;
102902+ atomic_unchecked_t id;
102903 u32 stamp32;
102904 };
102905
102906-static struct ip_ident_bucket *ip_idents __read_mostly;
102907+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
102908
102909 /* In order to protect privacy, we add a perturbation to identifiers
102910 * if one generator is seldom used. This makes hard for an attacker
102911@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
102912 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
102913 delta = prandom_u32_max(now - old);
102914
102915- return atomic_add_return(segs + delta, &bucket->id) - segs;
102916+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
102917 }
102918 EXPORT_SYMBOL(ip_idents_reserve);
102919
102920@@ -2628,34 +2628,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
102921 .maxlen = sizeof(int),
102922 .mode = 0200,
102923 .proc_handler = ipv4_sysctl_rtcache_flush,
102924+ .extra1 = &init_net,
102925 },
102926 { },
102927 };
102928
102929 static __net_init int sysctl_route_net_init(struct net *net)
102930 {
102931- struct ctl_table *tbl;
102932+ ctl_table_no_const *tbl = NULL;
102933
102934- tbl = ipv4_route_flush_table;
102935 if (!net_eq(net, &init_net)) {
102936- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
102937+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
102938 if (tbl == NULL)
102939 goto err_dup;
102940
102941 /* Don't export sysctls to unprivileged users */
102942 if (net->user_ns != &init_user_ns)
102943 tbl[0].procname = NULL;
102944- }
102945- tbl[0].extra1 = net;
102946+ tbl[0].extra1 = net;
102947+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
102948+ } else
102949+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
102950
102951- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
102952 if (net->ipv4.route_hdr == NULL)
102953 goto err_reg;
102954 return 0;
102955
102956 err_reg:
102957- if (tbl != ipv4_route_flush_table)
102958- kfree(tbl);
102959+ kfree(tbl);
102960 err_dup:
102961 return -ENOMEM;
102962 }
102963@@ -2678,8 +2678,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
102964
102965 static __net_init int rt_genid_init(struct net *net)
102966 {
102967- atomic_set(&net->ipv4.rt_genid, 0);
102968- atomic_set(&net->fnhe_genid, 0);
102969+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
102970+ atomic_set_unchecked(&net->fnhe_genid, 0);
102971 get_random_bytes(&net->ipv4.dev_addr_genid,
102972 sizeof(net->ipv4.dev_addr_genid));
102973 return 0;
102974@@ -2722,11 +2722,7 @@ int __init ip_rt_init(void)
102975 {
102976 int rc = 0;
102977
102978- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
102979- if (!ip_idents)
102980- panic("IP: failed to allocate ip_idents\n");
102981-
102982- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
102983+ prandom_bytes(ip_idents, sizeof(ip_idents));
102984
102985 #ifdef CONFIG_IP_ROUTE_CLASSID
102986 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
102987diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
102988index e0ee384..e2688d9 100644
102989--- a/net/ipv4/sysctl_net_ipv4.c
102990+++ b/net/ipv4/sysctl_net_ipv4.c
102991@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
102992 container_of(table->data, struct net, ipv4.ip_local_ports.range);
102993 int ret;
102994 int range[2];
102995- struct ctl_table tmp = {
102996+ ctl_table_no_const tmp = {
102997 .data = &range,
102998 .maxlen = sizeof(range),
102999 .mode = table->mode,
103000@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103001 int ret;
103002 gid_t urange[2];
103003 kgid_t low, high;
103004- struct ctl_table tmp = {
103005+ ctl_table_no_const tmp = {
103006 .data = &urange,
103007 .maxlen = sizeof(urange),
103008 .mode = table->mode,
103009@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103010 void __user *buffer, size_t *lenp, loff_t *ppos)
103011 {
103012 char val[TCP_CA_NAME_MAX];
103013- struct ctl_table tbl = {
103014+ ctl_table_no_const tbl = {
103015 .data = val,
103016 .maxlen = TCP_CA_NAME_MAX,
103017 };
103018@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103019 void __user *buffer, size_t *lenp,
103020 loff_t *ppos)
103021 {
103022- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103023+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103024 int ret;
103025
103026 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103027@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103028 void __user *buffer, size_t *lenp,
103029 loff_t *ppos)
103030 {
103031- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103032+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103033 int ret;
103034
103035 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103036@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103037 void __user *buffer, size_t *lenp,
103038 loff_t *ppos)
103039 {
103040- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103041+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103042 struct tcp_fastopen_context *ctxt;
103043 int ret;
103044 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103045@@ -881,13 +881,12 @@ static struct ctl_table ipv4_net_table[] = {
103046
103047 static __net_init int ipv4_sysctl_init_net(struct net *net)
103048 {
103049- struct ctl_table *table;
103050+ ctl_table_no_const *table = NULL;
103051
103052- table = ipv4_net_table;
103053 if (!net_eq(net, &init_net)) {
103054 int i;
103055
103056- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
103057+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
103058 if (table == NULL)
103059 goto err_alloc;
103060
103061@@ -896,7 +895,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
103062 table[i].data += (void *)net - (void *)&init_net;
103063 }
103064
103065- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103066+ if (!net_eq(net, &init_net))
103067+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103068+ else
103069+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103070 if (net->ipv4.ipv4_hdr == NULL)
103071 goto err_reg;
103072
103073diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103074index 075ab4d..623bb9d 100644
103075--- a/net/ipv4/tcp_input.c
103076+++ b/net/ipv4/tcp_input.c
103077@@ -766,7 +766,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103078 * without any lock. We want to make sure compiler wont store
103079 * intermediate values in this location.
103080 */
103081- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103082+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103083 sk->sk_max_pacing_rate);
103084 }
103085
103086@@ -4528,7 +4528,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103087 * simplifies code)
103088 */
103089 static void
103090-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103091+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103092 struct sk_buff *head, struct sk_buff *tail,
103093 u32 start, u32 end)
103094 {
103095@@ -5506,6 +5506,7 @@ discard:
103096 tcp_paws_reject(&tp->rx_opt, 0))
103097 goto discard_and_undo;
103098
103099+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103100 if (th->syn) {
103101 /* We see SYN without ACK. It is attempt of
103102 * simultaneous connect with crossed SYNs.
103103@@ -5556,6 +5557,7 @@ discard:
103104 goto discard;
103105 #endif
103106 }
103107+#endif
103108 /* "fifth, if neither of the SYN or RST bits is set then
103109 * drop the segment and return."
103110 */
103111@@ -5602,7 +5604,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103112 goto discard;
103113
103114 if (th->syn) {
103115- if (th->fin)
103116+ if (th->fin || th->urg || th->psh)
103117 goto discard;
103118 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103119 return 1;
103120diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103121index d22f544..62f6787 100644
103122--- a/net/ipv4/tcp_ipv4.c
103123+++ b/net/ipv4/tcp_ipv4.c
103124@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
103125 int sysctl_tcp_low_latency __read_mostly;
103126 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103127
103128+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103129+extern int grsec_enable_blackhole;
103130+#endif
103131+
103132 #ifdef CONFIG_TCP_MD5SIG
103133 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103134 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103135@@ -1473,6 +1477,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103136 return 0;
103137
103138 reset:
103139+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103140+ if (!grsec_enable_blackhole)
103141+#endif
103142 tcp_v4_send_reset(rsk, skb);
103143 discard:
103144 kfree_skb(skb);
103145@@ -1637,12 +1644,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103146 TCP_SKB_CB(skb)->sacked = 0;
103147
103148 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103149- if (!sk)
103150+ if (!sk) {
103151+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103152+ ret = 1;
103153+#endif
103154 goto no_tcp_socket;
103155-
103156+ }
103157 process:
103158- if (sk->sk_state == TCP_TIME_WAIT)
103159+ if (sk->sk_state == TCP_TIME_WAIT) {
103160+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103161+ ret = 2;
103162+#endif
103163 goto do_time_wait;
103164+ }
103165
103166 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103167 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103168@@ -1698,6 +1712,10 @@ csum_error:
103169 bad_packet:
103170 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103171 } else {
103172+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103173+ if (!grsec_enable_blackhole || (ret == 1 &&
103174+ (skb->dev->flags & IFF_LOOPBACK)))
103175+#endif
103176 tcp_v4_send_reset(NULL, skb);
103177 }
103178
103179diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103180index 63d2680..2db9d6b 100644
103181--- a/net/ipv4/tcp_minisocks.c
103182+++ b/net/ipv4/tcp_minisocks.c
103183@@ -27,6 +27,10 @@
103184 #include <net/inet_common.h>
103185 #include <net/xfrm.h>
103186
103187+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103188+extern int grsec_enable_blackhole;
103189+#endif
103190+
103191 int sysctl_tcp_syncookies __read_mostly = 1;
103192 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103193
103194@@ -739,7 +743,10 @@ embryonic_reset:
103195 * avoid becoming vulnerable to outside attack aiming at
103196 * resetting legit local connections.
103197 */
103198- req->rsk_ops->send_reset(sk, skb);
103199+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103200+ if (!grsec_enable_blackhole)
103201+#endif
103202+ req->rsk_ops->send_reset(sk, skb);
103203 } else if (fastopen) { /* received a valid RST pkt */
103204 reqsk_fastopen_remove(sk, req, true);
103205 tcp_reset(sk);
103206diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
103207index ebf5ff5..4d1ff32 100644
103208--- a/net/ipv4/tcp_probe.c
103209+++ b/net/ipv4/tcp_probe.c
103210@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
103211 if (cnt + width >= len)
103212 break;
103213
103214- if (copy_to_user(buf + cnt, tbuf, width))
103215+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
103216 return -EFAULT;
103217 cnt += width;
103218 }
103219diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
103220index 1829c7f..c0b3d52 100644
103221--- a/net/ipv4/tcp_timer.c
103222+++ b/net/ipv4/tcp_timer.c
103223@@ -22,6 +22,10 @@
103224 #include <linux/gfp.h>
103225 #include <net/tcp.h>
103226
103227+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103228+extern int grsec_lastack_retries;
103229+#endif
103230+
103231 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
103232 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
103233 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
103234@@ -191,6 +195,13 @@ static int tcp_write_timeout(struct sock *sk)
103235 }
103236 }
103237
103238+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103239+ if ((sk->sk_state == TCP_LAST_ACK) &&
103240+ (grsec_lastack_retries > 0) &&
103241+ (grsec_lastack_retries < retry_until))
103242+ retry_until = grsec_lastack_retries;
103243+#endif
103244+
103245 if (retransmits_timed_out(sk, retry_until,
103246 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
103247 /* Has it gone just too far? */
103248diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
103249index 13b4dcf..b866a2a 100644
103250--- a/net/ipv4/udp.c
103251+++ b/net/ipv4/udp.c
103252@@ -87,6 +87,7 @@
103253 #include <linux/types.h>
103254 #include <linux/fcntl.h>
103255 #include <linux/module.h>
103256+#include <linux/security.h>
103257 #include <linux/socket.h>
103258 #include <linux/sockios.h>
103259 #include <linux/igmp.h>
103260@@ -114,6 +115,10 @@
103261 #include <net/busy_poll.h>
103262 #include "udp_impl.h"
103263
103264+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103265+extern int grsec_enable_blackhole;
103266+#endif
103267+
103268 struct udp_table udp_table __read_mostly;
103269 EXPORT_SYMBOL(udp_table);
103270
103271@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
103272 return true;
103273 }
103274
103275+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
103276+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
103277+
103278 /*
103279 * This routine is called by the ICMP module when it gets some
103280 * sort of error condition. If err < 0 then the socket should
103281@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103282 dport = usin->sin_port;
103283 if (dport == 0)
103284 return -EINVAL;
103285+
103286+ err = gr_search_udp_sendmsg(sk, usin);
103287+ if (err)
103288+ return err;
103289 } else {
103290 if (sk->sk_state != TCP_ESTABLISHED)
103291 return -EDESTADDRREQ;
103292+
103293+ err = gr_search_udp_sendmsg(sk, NULL);
103294+ if (err)
103295+ return err;
103296+
103297 daddr = inet->inet_daddr;
103298 dport = inet->inet_dport;
103299 /* Open fast path for connected socket.
103300@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
103301 IS_UDPLITE(sk));
103302 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103303 IS_UDPLITE(sk));
103304- atomic_inc(&sk->sk_drops);
103305+ atomic_inc_unchecked(&sk->sk_drops);
103306 __skb_unlink(skb, rcvq);
103307 __skb_queue_tail(&list_kill, skb);
103308 }
103309@@ -1275,6 +1292,10 @@ try_again:
103310 if (!skb)
103311 goto out;
103312
103313+ err = gr_search_udp_recvmsg(sk, skb);
103314+ if (err)
103315+ goto out_free;
103316+
103317 ulen = skb->len - sizeof(struct udphdr);
103318 copied = len;
103319 if (copied > ulen)
103320@@ -1307,7 +1328,7 @@ try_again:
103321 if (unlikely(err)) {
103322 trace_kfree_skb(skb, udp_recvmsg);
103323 if (!peeked) {
103324- atomic_inc(&sk->sk_drops);
103325+ atomic_inc_unchecked(&sk->sk_drops);
103326 UDP_INC_STATS_USER(sock_net(sk),
103327 UDP_MIB_INERRORS, is_udplite);
103328 }
103329@@ -1605,7 +1626,7 @@ csum_error:
103330 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
103331 drop:
103332 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
103333- atomic_inc(&sk->sk_drops);
103334+ atomic_inc_unchecked(&sk->sk_drops);
103335 kfree_skb(skb);
103336 return -1;
103337 }
103338@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
103339 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
103340
103341 if (!skb1) {
103342- atomic_inc(&sk->sk_drops);
103343+ atomic_inc_unchecked(&sk->sk_drops);
103344 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
103345 IS_UDPLITE(sk));
103346 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103347@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
103348 goto csum_error;
103349
103350 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
103351+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103352+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103353+#endif
103354 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
103355
103356 /*
103357@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
103358 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103359 0, sock_i_ino(sp),
103360 atomic_read(&sp->sk_refcnt), sp,
103361- atomic_read(&sp->sk_drops));
103362+ atomic_read_unchecked(&sp->sk_drops));
103363 }
103364
103365 int udp4_seq_show(struct seq_file *seq, void *v)
103366diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
103367index 6156f68..d6ab46d 100644
103368--- a/net/ipv4/xfrm4_policy.c
103369+++ b/net/ipv4/xfrm4_policy.c
103370@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
103371 fl4->flowi4_tos = iph->tos;
103372 }
103373
103374-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
103375+static int xfrm4_garbage_collect(struct dst_ops *ops)
103376 {
103377 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
103378
103379- xfrm4_policy_afinfo.garbage_collect(net);
103380+ xfrm_garbage_collect_deferred(net);
103381 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
103382 }
103383
103384@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
103385
103386 static int __net_init xfrm4_net_init(struct net *net)
103387 {
103388- struct ctl_table *table;
103389+ ctl_table_no_const *table = NULL;
103390 struct ctl_table_header *hdr;
103391
103392- table = xfrm4_policy_table;
103393 if (!net_eq(net, &init_net)) {
103394- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103395+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103396 if (!table)
103397 goto err_alloc;
103398
103399 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
103400- }
103401-
103402- hdr = register_net_sysctl(net, "net/ipv4", table);
103403+ hdr = register_net_sysctl(net, "net/ipv4", table);
103404+ } else
103405+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
103406 if (!hdr)
103407 goto err_reg;
103408
103409@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
103410 return 0;
103411
103412 err_reg:
103413- if (!net_eq(net, &init_net))
103414- kfree(table);
103415+ kfree(table);
103416 err_alloc:
103417 return -ENOMEM;
103418 }
103419diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
103420index dac9419..534fa31 100644
103421--- a/net/ipv6/addrconf.c
103422+++ b/net/ipv6/addrconf.c
103423@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
103424 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103425 .mtu6 = IPV6_MIN_MTU,
103426 .accept_ra = 1,
103427- .accept_redirects = 1,
103428+ .accept_redirects = 0,
103429 .autoconf = 1,
103430 .force_mld_version = 0,
103431 .mldv1_unsolicited_report_interval = 10 * HZ,
103432@@ -208,7 +208,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
103433 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103434 .mtu6 = IPV6_MIN_MTU,
103435 .accept_ra = 1,
103436- .accept_redirects = 1,
103437+ .accept_redirects = 0,
103438 .autoconf = 1,
103439 .force_mld_version = 0,
103440 .mldv1_unsolicited_report_interval = 10 * HZ,
103441@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
103442 idx = 0;
103443 head = &net->dev_index_head[h];
103444 rcu_read_lock();
103445- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
103446+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
103447 net->dev_base_seq;
103448 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103449 if (idx < s_idx)
103450@@ -2420,7 +2420,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
103451 p.iph.ihl = 5;
103452 p.iph.protocol = IPPROTO_IPV6;
103453 p.iph.ttl = 64;
103454- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
103455+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
103456
103457 if (ops->ndo_do_ioctl) {
103458 mm_segment_t oldfs = get_fs();
103459@@ -3569,16 +3569,23 @@ static const struct file_operations if6_fops = {
103460 .release = seq_release_net,
103461 };
103462
103463+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
103464+extern void unregister_ipv6_seq_ops_addr(void);
103465+
103466 static int __net_init if6_proc_net_init(struct net *net)
103467 {
103468- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
103469+ register_ipv6_seq_ops_addr(&if6_seq_ops);
103470+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
103471+ unregister_ipv6_seq_ops_addr();
103472 return -ENOMEM;
103473+ }
103474 return 0;
103475 }
103476
103477 static void __net_exit if6_proc_net_exit(struct net *net)
103478 {
103479 remove_proc_entry("if_inet6", net->proc_net);
103480+ unregister_ipv6_seq_ops_addr();
103481 }
103482
103483 static struct pernet_operations if6_proc_net_ops = {
103484@@ -4194,7 +4201,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
103485 s_ip_idx = ip_idx = cb->args[2];
103486
103487 rcu_read_lock();
103488- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103489+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103490 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
103491 idx = 0;
103492 head = &net->dev_index_head[h];
103493@@ -4840,7 +4847,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103494 rt_genid_bump_ipv6(net);
103495 break;
103496 }
103497- atomic_inc(&net->ipv6.dev_addr_genid);
103498+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
103499 }
103500
103501 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103502@@ -4860,7 +4867,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
103503 int *valp = ctl->data;
103504 int val = *valp;
103505 loff_t pos = *ppos;
103506- struct ctl_table lctl;
103507+ ctl_table_no_const lctl;
103508 int ret;
103509
103510 /*
103511@@ -4945,7 +4952,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
103512 int *valp = ctl->data;
103513 int val = *valp;
103514 loff_t pos = *ppos;
103515- struct ctl_table lctl;
103516+ ctl_table_no_const lctl;
103517 int ret;
103518
103519 /*
103520diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
103521index e8c4400..a4cd5da 100644
103522--- a/net/ipv6/af_inet6.c
103523+++ b/net/ipv6/af_inet6.c
103524@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
103525 net->ipv6.sysctl.icmpv6_time = 1*HZ;
103526 net->ipv6.sysctl.flowlabel_consistency = 1;
103527 net->ipv6.sysctl.auto_flowlabels = 0;
103528- atomic_set(&net->ipv6.fib6_sernum, 1);
103529+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
103530
103531 err = ipv6_init_mibs(net);
103532 if (err)
103533diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
103534index 49f5e73..ae02d54 100644
103535--- a/net/ipv6/datagram.c
103536+++ b/net/ipv6/datagram.c
103537@@ -941,5 +941,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
103538 0,
103539 sock_i_ino(sp),
103540 atomic_read(&sp->sk_refcnt), sp,
103541- atomic_read(&sp->sk_drops));
103542+ atomic_read_unchecked(&sp->sk_drops));
103543 }
103544diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
103545index d674152..fb5a01d 100644
103546--- a/net/ipv6/icmp.c
103547+++ b/net/ipv6/icmp.c
103548@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
103549
103550 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
103551 {
103552- struct ctl_table *table;
103553+ ctl_table_no_const *table;
103554
103555 table = kmemdup(ipv6_icmp_table_template,
103556 sizeof(ipv6_icmp_table_template),
103557diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
103558index f1c6d5e..faabef6 100644
103559--- a/net/ipv6/ip6_fib.c
103560+++ b/net/ipv6/ip6_fib.c
103561@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
103562 int new, old;
103563
103564 do {
103565- old = atomic_read(&net->ipv6.fib6_sernum);
103566+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
103567 new = old < INT_MAX ? old + 1 : 1;
103568- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
103569+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
103570 old, new) != old);
103571 return new;
103572 }
103573diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
103574index 01ccc28..66861c7 100644
103575--- a/net/ipv6/ip6_gre.c
103576+++ b/net/ipv6/ip6_gre.c
103577@@ -71,8 +71,8 @@ struct ip6gre_net {
103578 struct net_device *fb_tunnel_dev;
103579 };
103580
103581-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
103582-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
103583+static struct rtnl_link_ops ip6gre_link_ops;
103584+static struct rtnl_link_ops ip6gre_tap_ops;
103585 static int ip6gre_tunnel_init(struct net_device *dev);
103586 static void ip6gre_tunnel_setup(struct net_device *dev);
103587 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
103588@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
103589 }
103590
103591
103592-static struct inet6_protocol ip6gre_protocol __read_mostly = {
103593+static struct inet6_protocol ip6gre_protocol = {
103594 .handler = ip6gre_rcv,
103595 .err_handler = ip6gre_err,
103596 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
103597@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
103598 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
103599 };
103600
103601-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103602+static struct rtnl_link_ops ip6gre_link_ops = {
103603 .kind = "ip6gre",
103604 .maxtype = IFLA_GRE_MAX,
103605 .policy = ip6gre_policy,
103606@@ -1664,7 +1664,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103607 .fill_info = ip6gre_fill_info,
103608 };
103609
103610-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
103611+static struct rtnl_link_ops ip6gre_tap_ops = {
103612 .kind = "ip6gretap",
103613 .maxtype = IFLA_GRE_MAX,
103614 .policy = ip6gre_policy,
103615diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
103616index 92b3da5..77837b8 100644
103617--- a/net/ipv6/ip6_tunnel.c
103618+++ b/net/ipv6/ip6_tunnel.c
103619@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103620
103621 static int ip6_tnl_dev_init(struct net_device *dev);
103622 static void ip6_tnl_dev_setup(struct net_device *dev);
103623-static struct rtnl_link_ops ip6_link_ops __read_mostly;
103624+static struct rtnl_link_ops ip6_link_ops;
103625
103626 static int ip6_tnl_net_id __read_mostly;
103627 struct ip6_tnl_net {
103628@@ -1771,7 +1771,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
103629 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
103630 };
103631
103632-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
103633+static struct rtnl_link_ops ip6_link_ops = {
103634 .kind = "ip6tnl",
103635 .maxtype = IFLA_IPTUN_MAX,
103636 .policy = ip6_tnl_policy,
103637diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
103638index ace10d0..97a8b49 100644
103639--- a/net/ipv6/ip6_vti.c
103640+++ b/net/ipv6/ip6_vti.c
103641@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103642
103643 static int vti6_dev_init(struct net_device *dev);
103644 static void vti6_dev_setup(struct net_device *dev);
103645-static struct rtnl_link_ops vti6_link_ops __read_mostly;
103646+static struct rtnl_link_ops vti6_link_ops;
103647
103648 static int vti6_net_id __read_mostly;
103649 struct vti6_net {
103650@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
103651 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
103652 };
103653
103654-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
103655+static struct rtnl_link_ops vti6_link_ops = {
103656 .kind = "vti6",
103657 .maxtype = IFLA_VTI_MAX,
103658 .policy = vti6_policy,
103659diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
103660index 66980d8d..8aef0d1 100644
103661--- a/net/ipv6/ipv6_sockglue.c
103662+++ b/net/ipv6/ipv6_sockglue.c
103663@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
103664 if (sk->sk_type != SOCK_STREAM)
103665 return -ENOPROTOOPT;
103666
103667- msg.msg_control = optval;
103668+ msg.msg_control = (void __force_kernel *)optval;
103669 msg.msg_controllen = len;
103670 msg.msg_flags = flags;
103671
103672diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
103673index e080fbb..412b3cf 100644
103674--- a/net/ipv6/netfilter/ip6_tables.c
103675+++ b/net/ipv6/netfilter/ip6_tables.c
103676@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
103677 #endif
103678
103679 static int get_info(struct net *net, void __user *user,
103680- const int *len, int compat)
103681+ int len, int compat)
103682 {
103683 char name[XT_TABLE_MAXNAMELEN];
103684 struct xt_table *t;
103685 int ret;
103686
103687- if (*len != sizeof(struct ip6t_getinfo)) {
103688- duprintf("length %u != %zu\n", *len,
103689+ if (len != sizeof(struct ip6t_getinfo)) {
103690+ duprintf("length %u != %zu\n", len,
103691 sizeof(struct ip6t_getinfo));
103692 return -EINVAL;
103693 }
103694@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
103695 info.size = private->size;
103696 strcpy(info.name, name);
103697
103698- if (copy_to_user(user, &info, *len) != 0)
103699+ if (copy_to_user(user, &info, len) != 0)
103700 ret = -EFAULT;
103701 else
103702 ret = 0;
103703@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103704
103705 switch (cmd) {
103706 case IP6T_SO_GET_INFO:
103707- ret = get_info(sock_net(sk), user, len, 1);
103708+ ret = get_info(sock_net(sk), user, *len, 1);
103709 break;
103710 case IP6T_SO_GET_ENTRIES:
103711 ret = compat_get_entries(sock_net(sk), user, len);
103712@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103713
103714 switch (cmd) {
103715 case IP6T_SO_GET_INFO:
103716- ret = get_info(sock_net(sk), user, len, 0);
103717+ ret = get_info(sock_net(sk), user, *len, 0);
103718 break;
103719
103720 case IP6T_SO_GET_ENTRIES:
103721diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
103722index 6f187c8..34b367f 100644
103723--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
103724+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
103725@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
103726
103727 static int nf_ct_frag6_sysctl_register(struct net *net)
103728 {
103729- struct ctl_table *table;
103730+ ctl_table_no_const *table = NULL;
103731 struct ctl_table_header *hdr;
103732
103733- table = nf_ct_frag6_sysctl_table;
103734 if (!net_eq(net, &init_net)) {
103735- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
103736+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
103737 GFP_KERNEL);
103738 if (table == NULL)
103739 goto err_alloc;
103740@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103741 table[2].data = &net->nf_frag.frags.high_thresh;
103742 table[2].extra1 = &net->nf_frag.frags.low_thresh;
103743 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
103744- }
103745-
103746- hdr = register_net_sysctl(net, "net/netfilter", table);
103747+ hdr = register_net_sysctl(net, "net/netfilter", table);
103748+ } else
103749+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
103750 if (hdr == NULL)
103751 goto err_reg;
103752
103753@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103754 return 0;
103755
103756 err_reg:
103757- if (!net_eq(net, &init_net))
103758- kfree(table);
103759+ kfree(table);
103760 err_alloc:
103761 return -ENOMEM;
103762 }
103763diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
103764index fe7e3e4..47aba96 100644
103765--- a/net/ipv6/ping.c
103766+++ b/net/ipv6/ping.c
103767@@ -242,6 +242,24 @@ static struct pernet_operations ping_v6_net_ops = {
103768 };
103769 #endif
103770
103771+static struct pingv6_ops real_pingv6_ops = {
103772+ .ipv6_recv_error = ipv6_recv_error,
103773+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
103774+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
103775+ .icmpv6_err_convert = icmpv6_err_convert,
103776+ .ipv6_icmp_error = ipv6_icmp_error,
103777+ .ipv6_chk_addr = ipv6_chk_addr,
103778+};
103779+
103780+static struct pingv6_ops dummy_pingv6_ops = {
103781+ .ipv6_recv_error = dummy_ipv6_recv_error,
103782+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
103783+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
103784+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
103785+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
103786+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
103787+};
103788+
103789 int __init pingv6_init(void)
103790 {
103791 #ifdef CONFIG_PROC_FS
103792@@ -249,13 +267,7 @@ int __init pingv6_init(void)
103793 if (ret)
103794 return ret;
103795 #endif
103796- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
103797- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
103798- pingv6_ops.ip6_datagram_recv_specific_ctl =
103799- ip6_datagram_recv_specific_ctl;
103800- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
103801- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
103802- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
103803+ pingv6_ops = &real_pingv6_ops;
103804 return inet6_register_protosw(&pingv6_protosw);
103805 }
103806
103807@@ -264,14 +276,9 @@ int __init pingv6_init(void)
103808 */
103809 void pingv6_exit(void)
103810 {
103811- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
103812- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
103813- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
103814- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
103815- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
103816- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
103817 #ifdef CONFIG_PROC_FS
103818 unregister_pernet_subsys(&ping_v6_net_ops);
103819 #endif
103820+ pingv6_ops = &dummy_pingv6_ops;
103821 inet6_unregister_protosw(&pingv6_protosw);
103822 }
103823diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
103824index 679253d0..70b653c 100644
103825--- a/net/ipv6/proc.c
103826+++ b/net/ipv6/proc.c
103827@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
103828 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
103829 goto proc_snmp6_fail;
103830
103831- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
103832+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
103833 if (!net->mib.proc_net_devsnmp6)
103834 goto proc_dev_snmp6_fail;
103835 return 0;
103836diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
103837index ee25631..3c3ac5d 100644
103838--- a/net/ipv6/raw.c
103839+++ b/net/ipv6/raw.c
103840@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
103841 {
103842 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
103843 skb_checksum_complete(skb)) {
103844- atomic_inc(&sk->sk_drops);
103845+ atomic_inc_unchecked(&sk->sk_drops);
103846 kfree_skb(skb);
103847 return NET_RX_DROP;
103848 }
103849@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103850 struct raw6_sock *rp = raw6_sk(sk);
103851
103852 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
103853- atomic_inc(&sk->sk_drops);
103854+ atomic_inc_unchecked(&sk->sk_drops);
103855 kfree_skb(skb);
103856 return NET_RX_DROP;
103857 }
103858@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103859
103860 if (inet->hdrincl) {
103861 if (skb_checksum_complete(skb)) {
103862- atomic_inc(&sk->sk_drops);
103863+ atomic_inc_unchecked(&sk->sk_drops);
103864 kfree_skb(skb);
103865 return NET_RX_DROP;
103866 }
103867@@ -609,7 +609,7 @@ out:
103868 return err;
103869 }
103870
103871-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
103872+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
103873 struct flowi6 *fl6, struct dst_entry **dstp,
103874 unsigned int flags)
103875 {
103876@@ -916,12 +916,15 @@ do_confirm:
103877 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
103878 char __user *optval, int optlen)
103879 {
103880+ struct icmp6_filter filter;
103881+
103882 switch (optname) {
103883 case ICMPV6_FILTER:
103884 if (optlen > sizeof(struct icmp6_filter))
103885 optlen = sizeof(struct icmp6_filter);
103886- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
103887+ if (copy_from_user(&filter, optval, optlen))
103888 return -EFAULT;
103889+ raw6_sk(sk)->filter = filter;
103890 return 0;
103891 default:
103892 return -ENOPROTOOPT;
103893@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103894 char __user *optval, int __user *optlen)
103895 {
103896 int len;
103897+ struct icmp6_filter filter;
103898
103899 switch (optname) {
103900 case ICMPV6_FILTER:
103901@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103902 len = sizeof(struct icmp6_filter);
103903 if (put_user(len, optlen))
103904 return -EFAULT;
103905- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
103906+ filter = raw6_sk(sk)->filter;
103907+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
103908 return -EFAULT;
103909 return 0;
103910 default:
103911diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
103912index d7d70e6..bd5e9fc 100644
103913--- a/net/ipv6/reassembly.c
103914+++ b/net/ipv6/reassembly.c
103915@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
103916
103917 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103918 {
103919- struct ctl_table *table;
103920+ ctl_table_no_const *table = NULL;
103921 struct ctl_table_header *hdr;
103922
103923- table = ip6_frags_ns_ctl_table;
103924 if (!net_eq(net, &init_net)) {
103925- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
103926+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
103927 if (table == NULL)
103928 goto err_alloc;
103929
103930@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103931 /* Don't export sysctls to unprivileged users */
103932 if (net->user_ns != &init_user_ns)
103933 table[0].procname = NULL;
103934- }
103935+ hdr = register_net_sysctl(net, "net/ipv6", table);
103936+ } else
103937+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
103938
103939- hdr = register_net_sysctl(net, "net/ipv6", table);
103940 if (hdr == NULL)
103941 goto err_reg;
103942
103943@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103944 return 0;
103945
103946 err_reg:
103947- if (!net_eq(net, &init_net))
103948- kfree(table);
103949+ kfree(table);
103950 err_alloc:
103951 return -ENOMEM;
103952 }
103953diff --git a/net/ipv6/route.c b/net/ipv6/route.c
103954index 1528d84..f393960 100644
103955--- a/net/ipv6/route.c
103956+++ b/net/ipv6/route.c
103957@@ -2978,7 +2978,7 @@ struct ctl_table ipv6_route_table_template[] = {
103958
103959 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
103960 {
103961- struct ctl_table *table;
103962+ ctl_table_no_const *table;
103963
103964 table = kmemdup(ipv6_route_table_template,
103965 sizeof(ipv6_route_table_template),
103966diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
103967index cdbfe5a..e13eb31 100644
103968--- a/net/ipv6/sit.c
103969+++ b/net/ipv6/sit.c
103970@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
103971 static void ipip6_dev_free(struct net_device *dev);
103972 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
103973 __be32 *v4dst);
103974-static struct rtnl_link_ops sit_link_ops __read_mostly;
103975+static struct rtnl_link_ops sit_link_ops;
103976
103977 static int sit_net_id __read_mostly;
103978 struct sit_net {
103979@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
103980 unregister_netdevice_queue(dev, head);
103981 }
103982
103983-static struct rtnl_link_ops sit_link_ops __read_mostly = {
103984+static struct rtnl_link_ops sit_link_ops = {
103985 .kind = "sit",
103986 .maxtype = IFLA_IPTUN_MAX,
103987 .policy = ipip6_policy,
103988diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
103989index c5c10fa..2577d51 100644
103990--- a/net/ipv6/sysctl_net_ipv6.c
103991+++ b/net/ipv6/sysctl_net_ipv6.c
103992@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
103993
103994 static int __net_init ipv6_sysctl_net_init(struct net *net)
103995 {
103996- struct ctl_table *ipv6_table;
103997+ ctl_table_no_const *ipv6_table;
103998 struct ctl_table *ipv6_route_table;
103999 struct ctl_table *ipv6_icmp_table;
104000 int err;
104001diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104002index 9c0b54e..5e7bd8f 100644
104003--- a/net/ipv6/tcp_ipv6.c
104004+++ b/net/ipv6/tcp_ipv6.c
104005@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104006 }
104007 }
104008
104009+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104010+extern int grsec_enable_blackhole;
104011+#endif
104012+
104013 static void tcp_v6_hash(struct sock *sk)
104014 {
104015 if (sk->sk_state != TCP_CLOSE) {
104016@@ -1343,6 +1347,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104017 return 0;
104018
104019 reset:
104020+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104021+ if (!grsec_enable_blackhole)
104022+#endif
104023 tcp_v6_send_reset(sk, skb);
104024 discard:
104025 if (opt_skb)
104026@@ -1443,12 +1450,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104027
104028 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
104029 inet6_iif(skb));
104030- if (!sk)
104031+ if (!sk) {
104032+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104033+ ret = 1;
104034+#endif
104035 goto no_tcp_socket;
104036+ }
104037
104038 process:
104039- if (sk->sk_state == TCP_TIME_WAIT)
104040+ if (sk->sk_state == TCP_TIME_WAIT) {
104041+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104042+ ret = 2;
104043+#endif
104044 goto do_time_wait;
104045+ }
104046
104047 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104048 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104049@@ -1499,6 +1514,10 @@ csum_error:
104050 bad_packet:
104051 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104052 } else {
104053+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104054+ if (!grsec_enable_blackhole || (ret == 1 &&
104055+ (skb->dev->flags & IFF_LOOPBACK)))
104056+#endif
104057 tcp_v6_send_reset(NULL, skb);
104058 }
104059
104060diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
104061index 189dc4a..458bec0 100644
104062--- a/net/ipv6/udp.c
104063+++ b/net/ipv6/udp.c
104064@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
104065 udp_ipv6_hash_secret + net_hash_mix(net));
104066 }
104067
104068+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104069+extern int grsec_enable_blackhole;
104070+#endif
104071+
104072 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104073 {
104074 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104075@@ -448,7 +452,7 @@ try_again:
104076 if (unlikely(err)) {
104077 trace_kfree_skb(skb, udpv6_recvmsg);
104078 if (!peeked) {
104079- atomic_inc(&sk->sk_drops);
104080+ atomic_inc_unchecked(&sk->sk_drops);
104081 if (is_udp4)
104082 UDP_INC_STATS_USER(sock_net(sk),
104083 UDP_MIB_INERRORS,
104084@@ -714,7 +718,7 @@ csum_error:
104085 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104086 drop:
104087 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104088- atomic_inc(&sk->sk_drops);
104089+ atomic_inc_unchecked(&sk->sk_drops);
104090 kfree_skb(skb);
104091 return -1;
104092 }
104093@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104094 if (likely(skb1 == NULL))
104095 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104096 if (!skb1) {
104097- atomic_inc(&sk->sk_drops);
104098+ atomic_inc_unchecked(&sk->sk_drops);
104099 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104100 IS_UDPLITE(sk));
104101 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104102@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104103 goto csum_error;
104104
104105 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104106+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104107+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104108+#endif
104109 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104110
104111 kfree_skb(skb);
104112diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104113index 48bf5a0..691985a 100644
104114--- a/net/ipv6/xfrm6_policy.c
104115+++ b/net/ipv6/xfrm6_policy.c
104116@@ -223,11 +223,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104117 }
104118 }
104119
104120-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104121+static int xfrm6_garbage_collect(struct dst_ops *ops)
104122 {
104123 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104124
104125- xfrm6_policy_afinfo.garbage_collect(net);
104126+ xfrm_garbage_collect_deferred(net);
104127 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104128 }
104129
104130@@ -340,19 +340,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104131
104132 static int __net_init xfrm6_net_init(struct net *net)
104133 {
104134- struct ctl_table *table;
104135+ ctl_table_no_const *table = NULL;
104136 struct ctl_table_header *hdr;
104137
104138- table = xfrm6_policy_table;
104139 if (!net_eq(net, &init_net)) {
104140- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104141+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104142 if (!table)
104143 goto err_alloc;
104144
104145 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104146- }
104147+ hdr = register_net_sysctl(net, "net/ipv6", table);
104148+ } else
104149+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104150
104151- hdr = register_net_sysctl(net, "net/ipv6", table);
104152 if (!hdr)
104153 goto err_reg;
104154
104155@@ -360,8 +360,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104156 return 0;
104157
104158 err_reg:
104159- if (!net_eq(net, &init_net))
104160- kfree(table);
104161+ kfree(table);
104162 err_alloc:
104163 return -ENOMEM;
104164 }
104165diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104166index c1d247e..9e5949d 100644
104167--- a/net/ipx/ipx_proc.c
104168+++ b/net/ipx/ipx_proc.c
104169@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104170 struct proc_dir_entry *p;
104171 int rc = -ENOMEM;
104172
104173- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104174+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104175
104176 if (!ipx_proc_dir)
104177 goto out;
104178diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104179index 4efe486..dee966e 100644
104180--- a/net/irda/ircomm/ircomm_tty.c
104181+++ b/net/irda/ircomm/ircomm_tty.c
104182@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104183 add_wait_queue(&port->open_wait, &wait);
104184
104185 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
104186- __FILE__, __LINE__, tty->driver->name, port->count);
104187+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104188
104189 spin_lock_irqsave(&port->lock, flags);
104190- port->count--;
104191+ atomic_dec(&port->count);
104192 port->blocked_open++;
104193 spin_unlock_irqrestore(&port->lock, flags);
104194
104195@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104196 }
104197
104198 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
104199- __FILE__, __LINE__, tty->driver->name, port->count);
104200+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104201
104202 schedule();
104203 }
104204@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104205
104206 spin_lock_irqsave(&port->lock, flags);
104207 if (!tty_hung_up_p(filp))
104208- port->count++;
104209+ atomic_inc(&port->count);
104210 port->blocked_open--;
104211 spin_unlock_irqrestore(&port->lock, flags);
104212
104213 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
104214- __FILE__, __LINE__, tty->driver->name, port->count);
104215+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104216
104217 if (!retval)
104218 port->flags |= ASYNC_NORMAL_ACTIVE;
104219@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
104220
104221 /* ++ is not atomic, so this should be protected - Jean II */
104222 spin_lock_irqsave(&self->port.lock, flags);
104223- self->port.count++;
104224+ atomic_inc(&self->port.count);
104225 spin_unlock_irqrestore(&self->port.lock, flags);
104226 tty_port_tty_set(&self->port, tty);
104227
104228 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
104229- self->line, self->port.count);
104230+ self->line, atomic_read(&self->port.count));
104231
104232 /* Not really used by us, but lets do it anyway */
104233 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
104234@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
104235 tty_kref_put(port->tty);
104236 }
104237 port->tty = NULL;
104238- port->count = 0;
104239+ atomic_set(&port->count, 0);
104240 spin_unlock_irqrestore(&port->lock, flags);
104241
104242 wake_up_interruptible(&port->open_wait);
104243@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
104244 seq_putc(m, '\n');
104245
104246 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
104247- seq_printf(m, "Open count: %d\n", self->port.count);
104248+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
104249 seq_printf(m, "Max data size: %d\n", self->max_data_size);
104250 seq_printf(m, "Max header size: %d\n", self->max_header_size);
104251
104252diff --git a/net/irda/irproc.c b/net/irda/irproc.c
104253index b9ac598..f88cc56 100644
104254--- a/net/irda/irproc.c
104255+++ b/net/irda/irproc.c
104256@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
104257 {
104258 int i;
104259
104260- proc_irda = proc_mkdir("irda", init_net.proc_net);
104261+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
104262 if (proc_irda == NULL)
104263 return;
104264
104265diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
104266index 2e9953b..ed06350 100644
104267--- a/net/iucv/af_iucv.c
104268+++ b/net/iucv/af_iucv.c
104269@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
104270 {
104271 char name[12];
104272
104273- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
104274+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104275 while (__iucv_get_sock_by_name(name)) {
104276 sprintf(name, "%08x",
104277- atomic_inc_return(&iucv_sk_list.autobind_name));
104278+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104279 }
104280 memcpy(iucv->src_name, name, 8);
104281 }
104282diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
104283index 2a6a1fd..6c112b0 100644
104284--- a/net/iucv/iucv.c
104285+++ b/net/iucv/iucv.c
104286@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
104287 return NOTIFY_OK;
104288 }
104289
104290-static struct notifier_block __refdata iucv_cpu_notifier = {
104291+static struct notifier_block iucv_cpu_notifier = {
104292 .notifier_call = iucv_cpu_notify,
104293 };
104294
104295diff --git a/net/key/af_key.c b/net/key/af_key.c
104296index f8ac939..1e189bf 100644
104297--- a/net/key/af_key.c
104298+++ b/net/key/af_key.c
104299@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
104300 static u32 get_acqseq(void)
104301 {
104302 u32 res;
104303- static atomic_t acqseq;
104304+ static atomic_unchecked_t acqseq;
104305
104306 do {
104307- res = atomic_inc_return(&acqseq);
104308+ res = atomic_inc_return_unchecked(&acqseq);
104309 } while (!res);
104310 return res;
104311 }
104312diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
104313index 781b3a2..73a7434 100644
104314--- a/net/l2tp/l2tp_eth.c
104315+++ b/net/l2tp/l2tp_eth.c
104316@@ -42,12 +42,12 @@ struct l2tp_eth {
104317 struct sock *tunnel_sock;
104318 struct l2tp_session *session;
104319 struct list_head list;
104320- atomic_long_t tx_bytes;
104321- atomic_long_t tx_packets;
104322- atomic_long_t tx_dropped;
104323- atomic_long_t rx_bytes;
104324- atomic_long_t rx_packets;
104325- atomic_long_t rx_errors;
104326+ atomic_long_unchecked_t tx_bytes;
104327+ atomic_long_unchecked_t tx_packets;
104328+ atomic_long_unchecked_t tx_dropped;
104329+ atomic_long_unchecked_t rx_bytes;
104330+ atomic_long_unchecked_t rx_packets;
104331+ atomic_long_unchecked_t rx_errors;
104332 };
104333
104334 /* via l2tp_session_priv() */
104335@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
104336 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
104337
104338 if (likely(ret == NET_XMIT_SUCCESS)) {
104339- atomic_long_add(len, &priv->tx_bytes);
104340- atomic_long_inc(&priv->tx_packets);
104341+ atomic_long_add_unchecked(len, &priv->tx_bytes);
104342+ atomic_long_inc_unchecked(&priv->tx_packets);
104343 } else {
104344- atomic_long_inc(&priv->tx_dropped);
104345+ atomic_long_inc_unchecked(&priv->tx_dropped);
104346 }
104347 return NETDEV_TX_OK;
104348 }
104349@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
104350 {
104351 struct l2tp_eth *priv = netdev_priv(dev);
104352
104353- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
104354- stats->tx_packets = atomic_long_read(&priv->tx_packets);
104355- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
104356- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
104357- stats->rx_packets = atomic_long_read(&priv->rx_packets);
104358- stats->rx_errors = atomic_long_read(&priv->rx_errors);
104359+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
104360+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
104361+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
104362+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
104363+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
104364+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
104365 return stats;
104366 }
104367
104368@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
104369 nf_reset(skb);
104370
104371 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
104372- atomic_long_inc(&priv->rx_packets);
104373- atomic_long_add(data_len, &priv->rx_bytes);
104374+ atomic_long_inc_unchecked(&priv->rx_packets);
104375+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
104376 } else {
104377- atomic_long_inc(&priv->rx_errors);
104378+ atomic_long_inc_unchecked(&priv->rx_errors);
104379 }
104380 return;
104381
104382 error:
104383- atomic_long_inc(&priv->rx_errors);
104384+ atomic_long_inc_unchecked(&priv->rx_errors);
104385 kfree_skb(skb);
104386 }
104387
104388diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
104389index 1a3c7e0..80f8b0c 100644
104390--- a/net/llc/llc_proc.c
104391+++ b/net/llc/llc_proc.c
104392@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
104393 int rc = -ENOMEM;
104394 struct proc_dir_entry *p;
104395
104396- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
104397+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
104398 if (!llc_proc_dir)
104399 goto out;
104400
104401diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
104402index e75d5c5..429fc95 100644
104403--- a/net/mac80211/cfg.c
104404+++ b/net/mac80211/cfg.c
104405@@ -543,7 +543,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
104406 ret = ieee80211_vif_use_channel(sdata, chandef,
104407 IEEE80211_CHANCTX_EXCLUSIVE);
104408 }
104409- } else if (local->open_count == local->monitors) {
104410+ } else if (local_read(&local->open_count) == local->monitors) {
104411 local->_oper_chandef = *chandef;
104412 ieee80211_hw_config(local, 0);
104413 }
104414@@ -3416,7 +3416,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
104415 else
104416 local->probe_req_reg--;
104417
104418- if (!local->open_count)
104419+ if (!local_read(&local->open_count))
104420 break;
104421
104422 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
104423@@ -3551,8 +3551,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
104424 if (chanctx_conf) {
104425 *chandef = sdata->vif.bss_conf.chandef;
104426 ret = 0;
104427- } else if (local->open_count > 0 &&
104428- local->open_count == local->monitors &&
104429+ } else if (local_read(&local->open_count) > 0 &&
104430+ local_read(&local->open_count) == local->monitors &&
104431 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
104432 if (local->use_chanctx)
104433 *chandef = local->monitor_chandef;
104434diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
104435index cc6e964..029a3a3 100644
104436--- a/net/mac80211/ieee80211_i.h
104437+++ b/net/mac80211/ieee80211_i.h
104438@@ -29,6 +29,7 @@
104439 #include <net/ieee80211_radiotap.h>
104440 #include <net/cfg80211.h>
104441 #include <net/mac80211.h>
104442+#include <asm/local.h>
104443 #include "key.h"
104444 #include "sta_info.h"
104445 #include "debug.h"
104446@@ -1114,7 +1115,7 @@ struct ieee80211_local {
104447 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
104448 spinlock_t queue_stop_reason_lock;
104449
104450- int open_count;
104451+ local_t open_count;
104452 int monitors, cooked_mntrs;
104453 /* number of interfaces with corresponding FIF_ flags */
104454 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
104455diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
104456index 4173553..e3b5a3f 100644
104457--- a/net/mac80211/iface.c
104458+++ b/net/mac80211/iface.c
104459@@ -543,7 +543,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104460 break;
104461 }
104462
104463- if (local->open_count == 0) {
104464+ if (local_read(&local->open_count) == 0) {
104465 res = drv_start(local);
104466 if (res)
104467 goto err_del_bss;
104468@@ -590,7 +590,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104469 res = drv_add_interface(local, sdata);
104470 if (res)
104471 goto err_stop;
104472- } else if (local->monitors == 0 && local->open_count == 0) {
104473+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
104474 res = ieee80211_add_virtual_monitor(local);
104475 if (res)
104476 goto err_stop;
104477@@ -700,7 +700,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104478 atomic_inc(&local->iff_promiscs);
104479
104480 if (coming_up)
104481- local->open_count++;
104482+ local_inc(&local->open_count);
104483
104484 if (hw_reconf_flags)
104485 ieee80211_hw_config(local, hw_reconf_flags);
104486@@ -738,7 +738,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104487 err_del_interface:
104488 drv_remove_interface(local, sdata);
104489 err_stop:
104490- if (!local->open_count)
104491+ if (!local_read(&local->open_count))
104492 drv_stop(local);
104493 err_del_bss:
104494 sdata->bss = NULL;
104495@@ -906,7 +906,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104496 }
104497
104498 if (going_down)
104499- local->open_count--;
104500+ local_dec(&local->open_count);
104501
104502 switch (sdata->vif.type) {
104503 case NL80211_IFTYPE_AP_VLAN:
104504@@ -968,7 +968,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104505 }
104506 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
104507
104508- if (local->open_count == 0)
104509+ if (local_read(&local->open_count) == 0)
104510 ieee80211_clear_tx_pending(local);
104511
104512 /*
104513@@ -1011,7 +1011,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104514 if (cancel_scan)
104515 flush_delayed_work(&local->scan_work);
104516
104517- if (local->open_count == 0) {
104518+ if (local_read(&local->open_count) == 0) {
104519 ieee80211_stop_device(local);
104520
104521 /* no reconfiguring after stop! */
104522@@ -1022,7 +1022,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104523 ieee80211_configure_filter(local);
104524 ieee80211_hw_config(local, hw_reconf_flags);
104525
104526- if (local->monitors == local->open_count)
104527+ if (local->monitors == local_read(&local->open_count))
104528 ieee80211_add_virtual_monitor(local);
104529 }
104530
104531diff --git a/net/mac80211/main.c b/net/mac80211/main.c
104532index 6ab99da..f9502d4 100644
104533--- a/net/mac80211/main.c
104534+++ b/net/mac80211/main.c
104535@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
104536 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
104537 IEEE80211_CONF_CHANGE_POWER);
104538
104539- if (changed && local->open_count) {
104540+ if (changed && local_read(&local->open_count)) {
104541 ret = drv_config(local, changed);
104542 /*
104543 * Goal:
104544diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
104545index 4a95fe3..0bfd713 100644
104546--- a/net/mac80211/pm.c
104547+++ b/net/mac80211/pm.c
104548@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104549 struct ieee80211_sub_if_data *sdata;
104550 struct sta_info *sta;
104551
104552- if (!local->open_count)
104553+ if (!local_read(&local->open_count))
104554 goto suspend;
104555
104556 ieee80211_scan_cancel(local);
104557@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104558 cancel_work_sync(&local->dynamic_ps_enable_work);
104559 del_timer_sync(&local->dynamic_ps_timer);
104560
104561- local->wowlan = wowlan && local->open_count;
104562+ local->wowlan = wowlan && local_read(&local->open_count);
104563 if (local->wowlan) {
104564 int err = drv_suspend(local, wowlan);
104565 if (err < 0) {
104566@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104567 WARN_ON(!list_empty(&local->chanctx_list));
104568
104569 /* stop hardware - this must stop RX */
104570- if (local->open_count)
104571+ if (local_read(&local->open_count))
104572 ieee80211_stop_device(local);
104573
104574 suspend:
104575diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
104576index d53355b..21f583a 100644
104577--- a/net/mac80211/rate.c
104578+++ b/net/mac80211/rate.c
104579@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
104580
104581 ASSERT_RTNL();
104582
104583- if (local->open_count)
104584+ if (local_read(&local->open_count))
104585 return -EBUSY;
104586
104587 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
104588diff --git a/net/mac80211/util.c b/net/mac80211/util.c
104589index 974ebe7..57bcd3c 100644
104590--- a/net/mac80211/util.c
104591+++ b/net/mac80211/util.c
104592@@ -1757,7 +1757,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104593 }
104594 #endif
104595 /* everything else happens only if HW was up & running */
104596- if (!local->open_count)
104597+ if (!local_read(&local->open_count))
104598 goto wake_up;
104599
104600 /*
104601@@ -1987,7 +1987,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104602 local->in_reconfig = false;
104603 barrier();
104604
104605- if (local->monitors == local->open_count && local->monitors > 0)
104606+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
104607 ieee80211_add_virtual_monitor(local);
104608
104609 /*
104610diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
104611index b02660f..c0f791c 100644
104612--- a/net/netfilter/Kconfig
104613+++ b/net/netfilter/Kconfig
104614@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
104615
104616 To compile it as a module, choose M here. If unsure, say N.
104617
104618+config NETFILTER_XT_MATCH_GRADM
104619+ tristate '"gradm" match support'
104620+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
104621+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
104622+ ---help---
104623+ The gradm match allows to match on grsecurity RBAC being enabled.
104624+ It is useful when iptables rules are applied early on bootup to
104625+ prevent connections to the machine (except from a trusted host)
104626+ while the RBAC system is disabled.
104627+
104628 config NETFILTER_XT_MATCH_HASHLIMIT
104629 tristate '"hashlimit" match support'
104630 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
104631diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
104632index 89f73a9..e4e5bd9 100644
104633--- a/net/netfilter/Makefile
104634+++ b/net/netfilter/Makefile
104635@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
104636 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
104637 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
104638 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
104639+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
104640 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
104641 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
104642 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
104643diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
104644index d259da3..6a32b2c 100644
104645--- a/net/netfilter/ipset/ip_set_core.c
104646+++ b/net/netfilter/ipset/ip_set_core.c
104647@@ -1952,7 +1952,7 @@ done:
104648 return ret;
104649 }
104650
104651-static struct nf_sockopt_ops so_set __read_mostly = {
104652+static struct nf_sockopt_ops so_set = {
104653 .pf = PF_INET,
104654 .get_optmin = SO_IP_SET,
104655 .get_optmax = SO_IP_SET + 1,
104656diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
104657index b0f7b62..0541842 100644
104658--- a/net/netfilter/ipvs/ip_vs_conn.c
104659+++ b/net/netfilter/ipvs/ip_vs_conn.c
104660@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
104661 /* Increase the refcnt counter of the dest */
104662 ip_vs_dest_hold(dest);
104663
104664- conn_flags = atomic_read(&dest->conn_flags);
104665+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
104666 if (cp->protocol != IPPROTO_UDP)
104667 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
104668 flags = cp->flags;
104669@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
104670
104671 cp->control = NULL;
104672 atomic_set(&cp->n_control, 0);
104673- atomic_set(&cp->in_pkts, 0);
104674+ atomic_set_unchecked(&cp->in_pkts, 0);
104675
104676 cp->packet_xmit = NULL;
104677 cp->app = NULL;
104678@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
104679
104680 /* Don't drop the entry if its number of incoming packets is not
104681 located in [0, 8] */
104682- i = atomic_read(&cp->in_pkts);
104683+ i = atomic_read_unchecked(&cp->in_pkts);
104684 if (i > 8 || i < 0) return 0;
104685
104686 if (!todrop_rate[i]) return 0;
104687diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
104688index b87ca32..76c7799 100644
104689--- a/net/netfilter/ipvs/ip_vs_core.c
104690+++ b/net/netfilter/ipvs/ip_vs_core.c
104691@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
104692 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
104693 /* do not touch skb anymore */
104694
104695- atomic_inc(&cp->in_pkts);
104696+ atomic_inc_unchecked(&cp->in_pkts);
104697 ip_vs_conn_put(cp);
104698 return ret;
104699 }
104700@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
104701 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
104702 pkts = sysctl_sync_threshold(ipvs);
104703 else
104704- pkts = atomic_add_return(1, &cp->in_pkts);
104705+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104706
104707 if (ipvs->sync_state & IP_VS_STATE_MASTER)
104708 ip_vs_sync_conn(net, cp, pkts);
104709diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
104710index b8295a4..17ff579 100644
104711--- a/net/netfilter/ipvs/ip_vs_ctl.c
104712+++ b/net/netfilter/ipvs/ip_vs_ctl.c
104713@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
104714 */
104715 ip_vs_rs_hash(ipvs, dest);
104716 }
104717- atomic_set(&dest->conn_flags, conn_flags);
104718+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
104719
104720 /* bind the service */
104721 old_svc = rcu_dereference_protected(dest->svc, 1);
104722@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
104723 * align with netns init in ip_vs_control_net_init()
104724 */
104725
104726-static struct ctl_table vs_vars[] = {
104727+static ctl_table_no_const vs_vars[] __read_only = {
104728 {
104729 .procname = "amemthresh",
104730 .maxlen = sizeof(int),
104731@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104732 " %-7s %-6d %-10d %-10d\n",
104733 &dest->addr.in6,
104734 ntohs(dest->port),
104735- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104736+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104737 atomic_read(&dest->weight),
104738 atomic_read(&dest->activeconns),
104739 atomic_read(&dest->inactconns));
104740@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104741 "%-7s %-6d %-10d %-10d\n",
104742 ntohl(dest->addr.ip),
104743 ntohs(dest->port),
104744- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104745+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104746 atomic_read(&dest->weight),
104747 atomic_read(&dest->activeconns),
104748 atomic_read(&dest->inactconns));
104749@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
104750
104751 entry.addr = dest->addr.ip;
104752 entry.port = dest->port;
104753- entry.conn_flags = atomic_read(&dest->conn_flags);
104754+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
104755 entry.weight = atomic_read(&dest->weight);
104756 entry.u_threshold = dest->u_threshold;
104757 entry.l_threshold = dest->l_threshold;
104758@@ -3039,7 +3039,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
104759 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
104760 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
104761 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
104762- (atomic_read(&dest->conn_flags) &
104763+ (atomic_read_unchecked(&dest->conn_flags) &
104764 IP_VS_CONN_F_FWD_MASK)) ||
104765 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
104766 atomic_read(&dest->weight)) ||
104767@@ -3672,7 +3672,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
104768 {
104769 int idx;
104770 struct netns_ipvs *ipvs = net_ipvs(net);
104771- struct ctl_table *tbl;
104772+ ctl_table_no_const *tbl;
104773
104774 atomic_set(&ipvs->dropentry, 0);
104775 spin_lock_init(&ipvs->dropentry_lock);
104776diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
104777index 127f140..553d652 100644
104778--- a/net/netfilter/ipvs/ip_vs_lblc.c
104779+++ b/net/netfilter/ipvs/ip_vs_lblc.c
104780@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
104781 * IPVS LBLC sysctl table
104782 */
104783 #ifdef CONFIG_SYSCTL
104784-static struct ctl_table vs_vars_table[] = {
104785+static ctl_table_no_const vs_vars_table[] __read_only = {
104786 {
104787 .procname = "lblc_expiration",
104788 .data = NULL,
104789diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
104790index 2229d2d..b32b785 100644
104791--- a/net/netfilter/ipvs/ip_vs_lblcr.c
104792+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
104793@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
104794 * IPVS LBLCR sysctl table
104795 */
104796
104797-static struct ctl_table vs_vars_table[] = {
104798+static ctl_table_no_const vs_vars_table[] __read_only = {
104799 {
104800 .procname = "lblcr_expiration",
104801 .data = NULL,
104802diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
104803index c47ffd7..d233a81 100644
104804--- a/net/netfilter/ipvs/ip_vs_sync.c
104805+++ b/net/netfilter/ipvs/ip_vs_sync.c
104806@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
104807 cp = cp->control;
104808 if (cp) {
104809 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104810- pkts = atomic_add_return(1, &cp->in_pkts);
104811+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104812 else
104813 pkts = sysctl_sync_threshold(ipvs);
104814 ip_vs_sync_conn(net, cp->control, pkts);
104815@@ -771,7 +771,7 @@ control:
104816 if (!cp)
104817 return;
104818 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104819- pkts = atomic_add_return(1, &cp->in_pkts);
104820+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104821 else
104822 pkts = sysctl_sync_threshold(ipvs);
104823 goto sloop;
104824@@ -900,7 +900,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
104825
104826 if (opt)
104827 memcpy(&cp->in_seq, opt, sizeof(*opt));
104828- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104829+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104830 cp->state = state;
104831 cp->old_state = cp->state;
104832 /*
104833diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
104834index 3aedbda..6a63567 100644
104835--- a/net/netfilter/ipvs/ip_vs_xmit.c
104836+++ b/net/netfilter/ipvs/ip_vs_xmit.c
104837@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
104838 else
104839 rc = NF_ACCEPT;
104840 /* do not touch skb anymore */
104841- atomic_inc(&cp->in_pkts);
104842+ atomic_inc_unchecked(&cp->in_pkts);
104843 goto out;
104844 }
104845
104846@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
104847 else
104848 rc = NF_ACCEPT;
104849 /* do not touch skb anymore */
104850- atomic_inc(&cp->in_pkts);
104851+ atomic_inc_unchecked(&cp->in_pkts);
104852 goto out;
104853 }
104854
104855diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
104856index a4b5e2a..13b1de3 100644
104857--- a/net/netfilter/nf_conntrack_acct.c
104858+++ b/net/netfilter/nf_conntrack_acct.c
104859@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
104860 #ifdef CONFIG_SYSCTL
104861 static int nf_conntrack_acct_init_sysctl(struct net *net)
104862 {
104863- struct ctl_table *table;
104864+ ctl_table_no_const *table;
104865
104866 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
104867 GFP_KERNEL);
104868diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
104869index 46d1b26..b7f3b76 100644
104870--- a/net/netfilter/nf_conntrack_core.c
104871+++ b/net/netfilter/nf_conntrack_core.c
104872@@ -1734,6 +1734,10 @@ void nf_conntrack_init_end(void)
104873 #define DYING_NULLS_VAL ((1<<30)+1)
104874 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
104875
104876+#ifdef CONFIG_GRKERNSEC_HIDESYM
104877+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
104878+#endif
104879+
104880 int nf_conntrack_init_net(struct net *net)
104881 {
104882 int ret = -ENOMEM;
104883@@ -1759,7 +1763,11 @@ int nf_conntrack_init_net(struct net *net)
104884 if (!net->ct.stat)
104885 goto err_pcpu_lists;
104886
104887+#ifdef CONFIG_GRKERNSEC_HIDESYM
104888+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
104889+#else
104890 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
104891+#endif
104892 if (!net->ct.slabname)
104893 goto err_slabname;
104894
104895diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
104896index 4e78c57..ec8fb74 100644
104897--- a/net/netfilter/nf_conntrack_ecache.c
104898+++ b/net/netfilter/nf_conntrack_ecache.c
104899@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
104900 #ifdef CONFIG_SYSCTL
104901 static int nf_conntrack_event_init_sysctl(struct net *net)
104902 {
104903- struct ctl_table *table;
104904+ ctl_table_no_const *table;
104905
104906 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
104907 GFP_KERNEL);
104908diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
104909index bd9d315..989947e 100644
104910--- a/net/netfilter/nf_conntrack_helper.c
104911+++ b/net/netfilter/nf_conntrack_helper.c
104912@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
104913
104914 static int nf_conntrack_helper_init_sysctl(struct net *net)
104915 {
104916- struct ctl_table *table;
104917+ ctl_table_no_const *table;
104918
104919 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
104920 GFP_KERNEL);
104921diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
104922index b65d586..beec902 100644
104923--- a/net/netfilter/nf_conntrack_proto.c
104924+++ b/net/netfilter/nf_conntrack_proto.c
104925@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
104926
104927 static void
104928 nf_ct_unregister_sysctl(struct ctl_table_header **header,
104929- struct ctl_table **table,
104930+ ctl_table_no_const **table,
104931 unsigned int users)
104932 {
104933 if (users > 0)
104934diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
104935index fc823fa..8311af3 100644
104936--- a/net/netfilter/nf_conntrack_standalone.c
104937+++ b/net/netfilter/nf_conntrack_standalone.c
104938@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
104939
104940 static int nf_conntrack_standalone_init_sysctl(struct net *net)
104941 {
104942- struct ctl_table *table;
104943+ ctl_table_no_const *table;
104944
104945 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
104946 GFP_KERNEL);
104947diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
104948index 7a394df..bd91a8a 100644
104949--- a/net/netfilter/nf_conntrack_timestamp.c
104950+++ b/net/netfilter/nf_conntrack_timestamp.c
104951@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
104952 #ifdef CONFIG_SYSCTL
104953 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
104954 {
104955- struct ctl_table *table;
104956+ ctl_table_no_const *table;
104957
104958 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
104959 GFP_KERNEL);
104960diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
104961index 43c926c..a5731d8 100644
104962--- a/net/netfilter/nf_log.c
104963+++ b/net/netfilter/nf_log.c
104964@@ -362,7 +362,7 @@ static const struct file_operations nflog_file_ops = {
104965
104966 #ifdef CONFIG_SYSCTL
104967 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
104968-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
104969+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
104970
104971 static int nf_log_proc_dostring(struct ctl_table *table, int write,
104972 void __user *buffer, size_t *lenp, loff_t *ppos)
104973@@ -393,13 +393,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
104974 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
104975 mutex_unlock(&nf_log_mutex);
104976 } else {
104977+ ctl_table_no_const nf_log_table = *table;
104978+
104979 mutex_lock(&nf_log_mutex);
104980 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
104981 if (!logger)
104982- table->data = "NONE";
104983+ nf_log_table.data = "NONE";
104984 else
104985- table->data = logger->name;
104986- r = proc_dostring(table, write, buffer, lenp, ppos);
104987+ nf_log_table.data = logger->name;
104988+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
104989 mutex_unlock(&nf_log_mutex);
104990 }
104991
104992diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
104993index c68c1e5..8b5d670 100644
104994--- a/net/netfilter/nf_sockopt.c
104995+++ b/net/netfilter/nf_sockopt.c
104996@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
104997 }
104998 }
104999
105000- list_add(&reg->list, &nf_sockopts);
105001+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105002 out:
105003 mutex_unlock(&nf_sockopt_mutex);
105004 return ret;
105005@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105006 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105007 {
105008 mutex_lock(&nf_sockopt_mutex);
105009- list_del(&reg->list);
105010+ pax_list_del((struct list_head *)&reg->list);
105011 mutex_unlock(&nf_sockopt_mutex);
105012 }
105013 EXPORT_SYMBOL(nf_unregister_sockopt);
105014diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
105015index 11d85b3..7fcc420 100644
105016--- a/net/netfilter/nfnetlink_log.c
105017+++ b/net/netfilter/nfnetlink_log.c
105018@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
105019 struct nfnl_log_net {
105020 spinlock_t instances_lock;
105021 struct hlist_head instance_table[INSTANCE_BUCKETS];
105022- atomic_t global_seq;
105023+ atomic_unchecked_t global_seq;
105024 };
105025
105026 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
105027@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
105028 /* global sequence number */
105029 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
105030 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
105031- htonl(atomic_inc_return(&log->global_seq))))
105032+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
105033 goto nla_put_failure;
105034
105035 if (data_len) {
105036diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
105037new file mode 100644
105038index 0000000..c566332
105039--- /dev/null
105040+++ b/net/netfilter/xt_gradm.c
105041@@ -0,0 +1,51 @@
105042+/*
105043+ * gradm match for netfilter
105044