]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-3.19.3-201503270049.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-3.19.3-201503270049.patch
CommitLineData
fed340c0
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index a311db8..415b28c 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 176d4fe..17ceefa 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1191,6 +1191,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+
327 hashdist= [KNL,NUMA] Large hashes allocated during boot
328 are distributed across NUMA nodes. Defaults on
329 for 64-bit NUMA, off otherwise.
330@@ -2283,6 +2287,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
331 noexec=on: enable non-executable mappings (default)
332 noexec=off: disable non-executable mappings
333
334+ nopcid [X86-64]
335+ Disable PCID (Process-Context IDentifier) even if it
336+ is supported by the processor.
337+
338 nosmap [X86]
339 Disable SMAP (Supervisor Mode Access Prevention)
340 even if it is supported by processor.
341@@ -2584,6 +2592,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
342 the specified number of seconds. This is to be used if
343 your oopses keep scrolling off the screen.
344
345+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
346+ virtualization environments that don't cope well with the
347+ expand down segment used by UDEREF on X86-32 or the frequent
348+ page table updates on X86-64.
349+
350+ pax_sanitize_slab=
351+ Format: { 0 | 1 | off | fast | full }
352+ Options '0' and '1' are only provided for backward
353+ compatibility, 'off' or 'fast' should be used instead.
354+ 0|off : disable slab object sanitization
355+ 1|fast: enable slab object sanitization excluding
356+ whitelisted slabs (default)
357+ full : sanitize all slabs, even the whitelisted ones
358+
359+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
360+
361+ pax_extra_latent_entropy
362+ Enable a very simple form of latent entropy extraction
363+ from the first 4GB of memory as the bootmem allocator
364+ passes the memory pages to the buddy allocator.
365+
366+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
367+ when the processor supports PCID.
368+
369 pcbit= [HW,ISDN]
370
371 pcd. [PARIDE]
372diff --git a/Makefile b/Makefile
373index 713bf26..9ceae96 100644
374--- a/Makefile
375+++ b/Makefile
376@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
377 HOSTCC = gcc
378 HOSTCXX = g++
379 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
380-HOSTCXXFLAGS = -O2
381+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
382+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
383+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
384
385 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
386 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
387@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
388 # Rules shared between *config targets and build targets
389
390 # Basic helpers built in scripts/
391-PHONY += scripts_basic
392-scripts_basic:
393+PHONY += scripts_basic gcc-plugins
394+scripts_basic: gcc-plugins
395 $(Q)$(MAKE) $(build)=scripts/basic
396 $(Q)rm -f .tmp_quiet_recordmcount
397
398@@ -622,6 +624,72 @@ endif
399 # Tell gcc to never replace conditional load with a non-conditional one
400 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
401
402+ifndef DISABLE_PAX_PLUGINS
403+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
404+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
405+else
406+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
407+endif
408+ifneq ($(PLUGINCC),)
409+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
410+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
411+endif
412+ifdef CONFIG_PAX_MEMORY_STACKLEAK
413+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
414+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
415+endif
416+ifdef CONFIG_KALLOCSTAT_PLUGIN
417+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
418+endif
419+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
420+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
421+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
422+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
423+endif
424+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
425+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
426+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
427+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
428+endif
429+endif
430+ifdef CONFIG_CHECKER_PLUGIN
431+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
432+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
433+endif
434+endif
435+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
436+ifdef CONFIG_PAX_SIZE_OVERFLOW
437+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
438+endif
439+ifdef CONFIG_PAX_LATENT_ENTROPY
440+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
441+endif
442+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
443+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
444+endif
445+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
446+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
447+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
448+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
450+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
451+ifeq ($(KBUILD_EXTMOD),)
452+gcc-plugins:
453+ $(Q)$(MAKE) $(build)=tools/gcc
454+else
455+gcc-plugins: ;
456+endif
457+else
458+gcc-plugins:
459+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
460+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
461+else
462+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
463+endif
464+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
465+endif
466+endif
467+
468 ifdef CONFIG_READABLE_ASM
469 # Disable optimizations that make assembler listings hard to read.
470 # reorder blocks reorders the control in the function
471@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
472 else
473 KBUILD_CFLAGS += -g
474 endif
475-KBUILD_AFLAGS += -Wa,-gdwarf-2
476+KBUILD_AFLAGS += -Wa,--gdwarf-2
477 endif
478 ifdef CONFIG_DEBUG_INFO_DWARF4
479 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
480@@ -879,7 +947,7 @@ export mod_sign_cmd
481
482
483 ifeq ($(KBUILD_EXTMOD),)
484-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
485+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
486
487 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
488 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
489@@ -926,6 +994,8 @@ endif
490
491 # The actual objects are generated when descending,
492 # make sure no implicit rule kicks in
493+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
494+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
495 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
496
497 # Handle descending into subdirectories listed in $(vmlinux-dirs)
498@@ -935,7 +1005,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499 # Error messages still appears in the original language
500
501 PHONY += $(vmlinux-dirs)
502-$(vmlinux-dirs): prepare scripts
503+$(vmlinux-dirs): gcc-plugins prepare scripts
504 $(Q)$(MAKE) $(build)=$@
505
506 define filechk_kernel.release
507@@ -978,10 +1048,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
508
509 archprepare: archheaders archscripts prepare1 scripts_basic
510
511+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
512+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
513 prepare0: archprepare FORCE
514 $(Q)$(MAKE) $(build)=.
515
516 # All the preparing..
517+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
518 prepare: prepare0
519
520 # Generate some files
521@@ -1095,6 +1168,8 @@ all: modules
522 # using awk while concatenating to the final file.
523
524 PHONY += modules
525+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
526+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
527 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
528 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
529 @$(kecho) ' Building modules, stage 2.';
530@@ -1110,7 +1185,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
531
532 # Target to prepare building external modules
533 PHONY += modules_prepare
534-modules_prepare: prepare scripts
535+modules_prepare: gcc-plugins prepare scripts
536
537 # Target to install modules
538 PHONY += modules_install
539@@ -1176,7 +1251,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
540 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
541 signing_key.priv signing_key.x509 x509.genkey \
542 extra_certificates signing_key.x509.keyid \
543- signing_key.x509.signer
544+ signing_key.x509.signer \
545+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
546+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
547+ tools/gcc/randomize_layout_seed.h
548
549 # clean - Delete most, but leave enough to build external modules
550 #
551@@ -1215,7 +1293,7 @@ distclean: mrproper
552 @find $(srctree) $(RCS_FIND_IGNORE) \
553 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
554 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
555- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
556+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
557 -type f -print | xargs rm -f
558
559
560@@ -1381,6 +1459,8 @@ PHONY += $(module-dirs) modules
561 $(module-dirs): crmodverdir $(objtree)/Module.symvers
562 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
563
564+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
565+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
566 modules: $(module-dirs)
567 @$(kecho) ' Building modules, stage 2.';
568 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
569@@ -1521,17 +1601,21 @@ else
570 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
571 endif
572
573-%.s: %.c prepare scripts FORCE
574+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
575+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
576+%.s: %.c gcc-plugins prepare scripts FORCE
577 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
578 %.i: %.c prepare scripts FORCE
579 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
580-%.o: %.c prepare scripts FORCE
581+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
582+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
583+%.o: %.c gcc-plugins prepare scripts FORCE
584 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
585 %.lst: %.c prepare scripts FORCE
586 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
587-%.s: %.S prepare scripts FORCE
588+%.s: %.S gcc-plugins prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.o: %.S prepare scripts FORCE
591+%.o: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593 %.symtypes: %.c prepare scripts FORCE
594 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
595@@ -1543,11 +1627,15 @@ endif
596 $(build)=$(build-dir)
597 # Make sure the latest headers are built for Documentation
598 Documentation/: headers_install
599-%/: prepare scripts FORCE
600+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
601+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
602+%/: gcc-plugins prepare scripts FORCE
603 $(cmd_crmodverdir)
604 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
605 $(build)=$(build-dir)
606-%.ko: prepare scripts FORCE
607+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
608+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
609+%.ko: gcc-plugins prepare scripts FORCE
610 $(cmd_crmodverdir)
611 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
612 $(build)=$(build-dir) $(@:.ko=.o)
613diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
614index 8f8eafb..3405f46 100644
615--- a/arch/alpha/include/asm/atomic.h
616+++ b/arch/alpha/include/asm/atomic.h
617@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
618 #define atomic_dec(v) atomic_sub(1,(v))
619 #define atomic64_dec(v) atomic64_sub(1,(v))
620
621+#define atomic64_read_unchecked(v) atomic64_read(v)
622+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
623+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
624+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
625+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
626+#define atomic64_inc_unchecked(v) atomic64_inc(v)
627+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
628+#define atomic64_dec_unchecked(v) atomic64_dec(v)
629+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
630+
631 #endif /* _ALPHA_ATOMIC_H */
632diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
633index ad368a9..fbe0f25 100644
634--- a/arch/alpha/include/asm/cache.h
635+++ b/arch/alpha/include/asm/cache.h
636@@ -4,19 +4,19 @@
637 #ifndef __ARCH_ALPHA_CACHE_H
638 #define __ARCH_ALPHA_CACHE_H
639
640+#include <linux/const.h>
641
642 /* Bytes per L1 (data) cache line. */
643 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
644-# define L1_CACHE_BYTES 64
645 # define L1_CACHE_SHIFT 6
646 #else
647 /* Both EV4 and EV5 are write-through, read-allocate,
648 direct-mapped, physical.
649 */
650-# define L1_CACHE_BYTES 32
651 # define L1_CACHE_SHIFT 5
652 #endif
653
654+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
655 #define SMP_CACHE_BYTES L1_CACHE_BYTES
656
657 #endif
658diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
659index 968d999..d36b2df 100644
660--- a/arch/alpha/include/asm/elf.h
661+++ b/arch/alpha/include/asm/elf.h
662@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
663
664 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
665
666+#ifdef CONFIG_PAX_ASLR
667+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
668+
669+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
670+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
671+#endif
672+
673 /* $0 is set by ld.so to a pointer to a function which might be
674 registered using atexit. This provides a mean for the dynamic
675 linker to call DT_FINI functions for shared libraries that have
676diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
677index aab14a0..b4fa3e7 100644
678--- a/arch/alpha/include/asm/pgalloc.h
679+++ b/arch/alpha/include/asm/pgalloc.h
680@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
681 pgd_set(pgd, pmd);
682 }
683
684+static inline void
685+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
686+{
687+ pgd_populate(mm, pgd, pmd);
688+}
689+
690 extern pgd_t *pgd_alloc(struct mm_struct *mm);
691
692 static inline void
693diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
694index d8f9b7e..f6222fa 100644
695--- a/arch/alpha/include/asm/pgtable.h
696+++ b/arch/alpha/include/asm/pgtable.h
697@@ -102,6 +102,17 @@ struct vm_area_struct;
698 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
699 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
700 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
704+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
705+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
706+#else
707+# define PAGE_SHARED_NOEXEC PAGE_SHARED
708+# define PAGE_COPY_NOEXEC PAGE_COPY
709+# define PAGE_READONLY_NOEXEC PAGE_READONLY
710+#endif
711+
712 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
713
714 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
715diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
716index 2fd00b7..cfd5069 100644
717--- a/arch/alpha/kernel/module.c
718+++ b/arch/alpha/kernel/module.c
719@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
720
721 /* The small sections were sorted to the end of the segment.
722 The following should definitely cover them. */
723- gp = (u64)me->module_core + me->core_size - 0x8000;
724+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
725 got = sechdrs[me->arch.gotsecindex].sh_addr;
726
727 for (i = 0; i < n; i++) {
728diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
729index e51f578..16c64a3 100644
730--- a/arch/alpha/kernel/osf_sys.c
731+++ b/arch/alpha/kernel/osf_sys.c
732@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
733 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
734
735 static unsigned long
736-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
737- unsigned long limit)
738+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
739+ unsigned long limit, unsigned long flags)
740 {
741 struct vm_unmapped_area_info info;
742+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
743
744 info.flags = 0;
745 info.length = len;
746@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
747 info.high_limit = limit;
748 info.align_mask = 0;
749 info.align_offset = 0;
750+ info.threadstack_offset = offset;
751 return vm_unmapped_area(&info);
752 }
753
754@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
755 merely specific addresses, but regions of memory -- perhaps
756 this feature should be incorporated into all ports? */
757
758+#ifdef CONFIG_PAX_RANDMMAP
759+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
760+#endif
761+
762 if (addr) {
763- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
764+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
765 if (addr != (unsigned long) -ENOMEM)
766 return addr;
767 }
768
769 /* Next, try allocating at TASK_UNMAPPED_BASE. */
770- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
771- len, limit);
772+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
773+
774 if (addr != (unsigned long) -ENOMEM)
775 return addr;
776
777 /* Finally, try allocating in low memory. */
778- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
779+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
780
781 return addr;
782 }
783diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
784index 9d0ac09..479a962 100644
785--- a/arch/alpha/mm/fault.c
786+++ b/arch/alpha/mm/fault.c
787@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
788 __reload_thread(pcb);
789 }
790
791+#ifdef CONFIG_PAX_PAGEEXEC
792+/*
793+ * PaX: decide what to do with offenders (regs->pc = fault address)
794+ *
795+ * returns 1 when task should be killed
796+ * 2 when patched PLT trampoline was detected
797+ * 3 when unpatched PLT trampoline was detected
798+ */
799+static int pax_handle_fetch_fault(struct pt_regs *regs)
800+{
801+
802+#ifdef CONFIG_PAX_EMUPLT
803+ int err;
804+
805+ do { /* PaX: patched PLT emulation #1 */
806+ unsigned int ldah, ldq, jmp;
807+
808+ err = get_user(ldah, (unsigned int *)regs->pc);
809+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
810+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
811+
812+ if (err)
813+ break;
814+
815+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
816+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
817+ jmp == 0x6BFB0000U)
818+ {
819+ unsigned long r27, addr;
820+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
821+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
822+
823+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
824+ err = get_user(r27, (unsigned long *)addr);
825+ if (err)
826+ break;
827+
828+ regs->r27 = r27;
829+ regs->pc = r27;
830+ return 2;
831+ }
832+ } while (0);
833+
834+ do { /* PaX: patched PLT emulation #2 */
835+ unsigned int ldah, lda, br;
836+
837+ err = get_user(ldah, (unsigned int *)regs->pc);
838+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
839+ err |= get_user(br, (unsigned int *)(regs->pc+8));
840+
841+ if (err)
842+ break;
843+
844+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
845+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
846+ (br & 0xFFE00000U) == 0xC3E00000U)
847+ {
848+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
849+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
850+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
851+
852+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
853+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
854+ return 2;
855+ }
856+ } while (0);
857+
858+ do { /* PaX: unpatched PLT emulation */
859+ unsigned int br;
860+
861+ err = get_user(br, (unsigned int *)regs->pc);
862+
863+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
864+ unsigned int br2, ldq, nop, jmp;
865+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
866+
867+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
868+ err = get_user(br2, (unsigned int *)addr);
869+ err |= get_user(ldq, (unsigned int *)(addr+4));
870+ err |= get_user(nop, (unsigned int *)(addr+8));
871+ err |= get_user(jmp, (unsigned int *)(addr+12));
872+ err |= get_user(resolver, (unsigned long *)(addr+16));
873+
874+ if (err)
875+ break;
876+
877+ if (br2 == 0xC3600000U &&
878+ ldq == 0xA77B000CU &&
879+ nop == 0x47FF041FU &&
880+ jmp == 0x6B7B0000U)
881+ {
882+ regs->r28 = regs->pc+4;
883+ regs->r27 = addr+16;
884+ regs->pc = resolver;
885+ return 3;
886+ }
887+ }
888+ } while (0);
889+#endif
890+
891+ return 1;
892+}
893+
894+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
895+{
896+ unsigned long i;
897+
898+ printk(KERN_ERR "PAX: bytes at PC: ");
899+ for (i = 0; i < 5; i++) {
900+ unsigned int c;
901+ if (get_user(c, (unsigned int *)pc+i))
902+ printk(KERN_CONT "???????? ");
903+ else
904+ printk(KERN_CONT "%08x ", c);
905+ }
906+ printk("\n");
907+}
908+#endif
909
910 /*
911 * This routine handles page faults. It determines the address,
912@@ -133,8 +251,29 @@ retry:
913 good_area:
914 si_code = SEGV_ACCERR;
915 if (cause < 0) {
916- if (!(vma->vm_flags & VM_EXEC))
917+ if (!(vma->vm_flags & VM_EXEC)) {
918+
919+#ifdef CONFIG_PAX_PAGEEXEC
920+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
921+ goto bad_area;
922+
923+ up_read(&mm->mmap_sem);
924+ switch (pax_handle_fetch_fault(regs)) {
925+
926+#ifdef CONFIG_PAX_EMUPLT
927+ case 2:
928+ case 3:
929+ return;
930+#endif
931+
932+ }
933+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
934+ do_group_exit(SIGKILL);
935+#else
936 goto bad_area;
937+#endif
938+
939+ }
940 } else if (!cause) {
941 /* Allow reads even for write-only mappings */
942 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
943diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
944index 97d07ed..2931f2b 100644
945--- a/arch/arm/Kconfig
946+++ b/arch/arm/Kconfig
947@@ -1727,7 +1727,7 @@ config ALIGNMENT_TRAP
948
949 config UACCESS_WITH_MEMCPY
950 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
951- depends on MMU
952+ depends on MMU && !PAX_MEMORY_UDEREF
953 default y if CPU_FEROCEON
954 help
955 Implement faster copy_to_user and clear_user methods for CPU
956@@ -1991,6 +1991,7 @@ config XIP_PHYS_ADDR
957 config KEXEC
958 bool "Kexec system call (EXPERIMENTAL)"
959 depends on (!SMP || PM_SLEEP_SMP)
960+ depends on !GRKERNSEC_KMEM
961 help
962 kexec is a system call that implements the ability to shutdown your
963 current kernel, and to start another kernel. It is like a reboot
964diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
965index e22c119..eaa807d 100644
966--- a/arch/arm/include/asm/atomic.h
967+++ b/arch/arm/include/asm/atomic.h
968@@ -18,17 +18,41 @@
969 #include <asm/barrier.h>
970 #include <asm/cmpxchg.h>
971
972+#ifdef CONFIG_GENERIC_ATOMIC64
973+#include <asm-generic/atomic64.h>
974+#endif
975+
976 #define ATOMIC_INIT(i) { (i) }
977
978 #ifdef __KERNEL__
979
980+#ifdef CONFIG_THUMB2_KERNEL
981+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
982+#else
983+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
984+#endif
985+
986+#define _ASM_EXTABLE(from, to) \
987+" .pushsection __ex_table,\"a\"\n"\
988+" .align 3\n" \
989+" .long " #from ", " #to"\n" \
990+" .popsection"
991+
992 /*
993 * On ARM, ordinary assignment (str instruction) doesn't clear the local
994 * strex/ldrex monitor on some implementations. The reason we can use it for
995 * atomic_set() is the clrex or dummy strex done on every exception return.
996 */
997 #define atomic_read(v) ACCESS_ONCE((v)->counter)
998+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
999+{
1000+ return ACCESS_ONCE(v->counter);
1001+}
1002 #define atomic_set(v,i) (((v)->counter) = (i))
1003+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1004+{
1005+ v->counter = i;
1006+}
1007
1008 #if __LINUX_ARM_ARCH__ >= 6
1009
1010@@ -38,26 +62,50 @@
1011 * to ensure that the update happens.
1012 */
1013
1014-#define ATOMIC_OP(op, c_op, asm_op) \
1015-static inline void atomic_##op(int i, atomic_t *v) \
1016+#ifdef CONFIG_PAX_REFCOUNT
1017+#define __OVERFLOW_POST \
1018+ " bvc 3f\n" \
1019+ "2: " REFCOUNT_TRAP_INSN "\n"\
1020+ "3:\n"
1021+#define __OVERFLOW_POST_RETURN \
1022+ " bvc 3f\n" \
1023+" mov %0, %1\n" \
1024+ "2: " REFCOUNT_TRAP_INSN "\n"\
1025+ "3:\n"
1026+#define __OVERFLOW_EXTABLE \
1027+ "4:\n" \
1028+ _ASM_EXTABLE(2b, 4b)
1029+#else
1030+#define __OVERFLOW_POST
1031+#define __OVERFLOW_POST_RETURN
1032+#define __OVERFLOW_EXTABLE
1033+#endif
1034+
1035+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1036+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1037 { \
1038 unsigned long tmp; \
1039 int result; \
1040 \
1041 prefetchw(&v->counter); \
1042- __asm__ __volatile__("@ atomic_" #op "\n" \
1043+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1044 "1: ldrex %0, [%3]\n" \
1045 " " #asm_op " %0, %0, %4\n" \
1046+ post_op \
1047 " strex %1, %0, [%3]\n" \
1048 " teq %1, #0\n" \
1049-" bne 1b" \
1050+" bne 1b\n" \
1051+ extable \
1052 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1053 : "r" (&v->counter), "Ir" (i) \
1054 : "cc"); \
1055 } \
1056
1057-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1058-static inline int atomic_##op##_return(int i, atomic_t *v) \
1059+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op, , )\
1060+ __ATOMIC_OP(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1061+
1062+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1063+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1064 { \
1065 unsigned long tmp; \
1066 int result; \
1067@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1068 smp_mb(); \
1069 prefetchw(&v->counter); \
1070 \
1071- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1072+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1073 "1: ldrex %0, [%3]\n" \
1074 " " #asm_op " %0, %0, %4\n" \
1075+ post_op \
1076 " strex %1, %0, [%3]\n" \
1077 " teq %1, #0\n" \
1078-" bne 1b" \
1079+" bne 1b\n" \
1080+ extable \
1081 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1082 : "r" (&v->counter), "Ir" (i) \
1083 : "cc"); \
1084@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1085 return result; \
1086 }
1087
1088+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op, , )\
1089+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1090+
1091 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1092 {
1093 int oldval;
1094@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1095 __asm__ __volatile__ ("@ atomic_add_unless\n"
1096 "1: ldrex %0, [%4]\n"
1097 " teq %0, %5\n"
1098-" beq 2f\n"
1099-" add %1, %0, %6\n"
1100+" beq 4f\n"
1101+" adds %1, %0, %6\n"
1102+
1103+#ifdef CONFIG_PAX_REFCOUNT
1104+" bvc 3f\n"
1105+"2: " REFCOUNT_TRAP_INSN "\n"
1106+"3:\n"
1107+#endif
1108+
1109 " strex %2, %1, [%4]\n"
1110 " teq %2, #0\n"
1111 " bne 1b\n"
1112-"2:"
1113+"4:"
1114+
1115+#ifdef CONFIG_PAX_REFCOUNT
1116+ _ASM_EXTABLE(2b, 4b)
1117+#endif
1118+
1119 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1120 : "r" (&v->counter), "r" (u), "r" (a)
1121 : "cc");
1122@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1123 return oldval;
1124 }
1125
1126+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1127+{
1128+ unsigned long oldval, res;
1129+
1130+ smp_mb();
1131+
1132+ do {
1133+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1134+ "ldrex %1, [%3]\n"
1135+ "mov %0, #0\n"
1136+ "teq %1, %4\n"
1137+ "strexeq %0, %5, [%3]\n"
1138+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1139+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1140+ : "cc");
1141+ } while (res);
1142+
1143+ smp_mb();
1144+
1145+ return oldval;
1146+}
1147+
1148 #else /* ARM_ARCH_6 */
1149
1150 #ifdef CONFIG_SMP
1151 #error SMP not supported on pre-ARMv6 CPUs
1152 #endif
1153
1154-#define ATOMIC_OP(op, c_op, asm_op) \
1155-static inline void atomic_##op(int i, atomic_t *v) \
1156+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1157+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1158 { \
1159 unsigned long flags; \
1160 \
1161@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1162 raw_local_irq_restore(flags); \
1163 } \
1164
1165-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1166-static inline int atomic_##op##_return(int i, atomic_t *v) \
1167+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1168+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1169+
1170+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1171+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1172 { \
1173 unsigned long flags; \
1174 int val; \
1175@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1176 return val; \
1177 }
1178
1179+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1180+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1181+
1182 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1183 {
1184 int ret;
1185@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 return ret;
1187 }
1188
1189+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1190+{
1191+ return atomic_cmpxchg((atomic_t *)v, old, new);
1192+}
1193+
1194 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1195 {
1196 int c, old;
1197@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1198
1199 #undef ATOMIC_OPS
1200 #undef ATOMIC_OP_RETURN
1201+#undef __ATOMIC_OP_RETURN
1202 #undef ATOMIC_OP
1203+#undef __ATOMIC_OP
1204
1205 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1206+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1207+{
1208+ return xchg(&v->counter, new);
1209+}
1210
1211 #define atomic_inc(v) atomic_add(1, v)
1212+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1213+{
1214+ atomic_add_unchecked(1, v);
1215+}
1216 #define atomic_dec(v) atomic_sub(1, v)
1217+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1218+{
1219+ atomic_sub_unchecked(1, v);
1220+}
1221
1222 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1223+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1224+{
1225+ return atomic_add_return_unchecked(1, v) == 0;
1226+}
1227 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1228 #define atomic_inc_return(v) (atomic_add_return(1, v))
1229+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1230+{
1231+ return atomic_add_return_unchecked(1, v);
1232+}
1233 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1234 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1235
1236@@ -216,6 +336,14 @@ typedef struct {
1237 long long counter;
1238 } atomic64_t;
1239
1240+#ifdef CONFIG_PAX_REFCOUNT
1241+typedef struct {
1242+ long long counter;
1243+} atomic64_unchecked_t;
1244+#else
1245+typedef atomic64_t atomic64_unchecked_t;
1246+#endif
1247+
1248 #define ATOMIC64_INIT(i) { (i) }
1249
1250 #ifdef CONFIG_ARM_LPAE
1251@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1252 return result;
1253 }
1254
1255+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1256+{
1257+ long long result;
1258+
1259+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1260+" ldrd %0, %H0, [%1]"
1261+ : "=&r" (result)
1262+ : "r" (&v->counter), "Qo" (v->counter)
1263+ );
1264+
1265+ return result;
1266+}
1267+
1268 static inline void atomic64_set(atomic64_t *v, long long i)
1269 {
1270 __asm__ __volatile__("@ atomic64_set\n"
1271@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1272 : "r" (&v->counter), "r" (i)
1273 );
1274 }
1275+
1276+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1277+{
1278+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1279+" strd %2, %H2, [%1]"
1280+ : "=Qo" (v->counter)
1281+ : "r" (&v->counter), "r" (i)
1282+ );
1283+}
1284 #else
1285 static inline long long atomic64_read(const atomic64_t *v)
1286 {
1287@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1288 return result;
1289 }
1290
1291+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1292+{
1293+ long long result;
1294+
1295+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1296+" ldrexd %0, %H0, [%1]"
1297+ : "=&r" (result)
1298+ : "r" (&v->counter), "Qo" (v->counter)
1299+ );
1300+
1301+ return result;
1302+}
1303+
1304 static inline void atomic64_set(atomic64_t *v, long long i)
1305 {
1306 long long tmp;
1307@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1308 : "r" (&v->counter), "r" (i)
1309 : "cc");
1310 }
1311+
1312+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1313+{
1314+ long long tmp;
1315+
1316+ prefetchw(&v->counter);
1317+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1318+"1: ldrexd %0, %H0, [%2]\n"
1319+" strexd %0, %3, %H3, [%2]\n"
1320+" teq %0, #0\n"
1321+" bne 1b"
1322+ : "=&r" (tmp), "=Qo" (v->counter)
1323+ : "r" (&v->counter), "r" (i)
1324+ : "cc");
1325+}
1326 #endif
1327
1328-#define ATOMIC64_OP(op, op1, op2) \
1329-static inline void atomic64_##op(long long i, atomic64_t *v) \
1330+#undef __OVERFLOW_POST_RETURN
1331+#define __OVERFLOW_POST_RETURN \
1332+ " bvc 3f\n" \
1333+" mov %0, %1\n" \
1334+" mov %H0, %H1\n" \
1335+ "2: " REFCOUNT_TRAP_INSN "\n"\
1336+ "3:\n"
1337+
1338+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1339+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1340 { \
1341 long long result; \
1342 unsigned long tmp; \
1343 \
1344 prefetchw(&v->counter); \
1345- __asm__ __volatile__("@ atomic64_" #op "\n" \
1346+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1347 "1: ldrexd %0, %H0, [%3]\n" \
1348 " " #op1 " %Q0, %Q0, %Q4\n" \
1349 " " #op2 " %R0, %R0, %R4\n" \
1350+ post_op \
1351 " strexd %1, %0, %H0, [%3]\n" \
1352 " teq %1, #0\n" \
1353-" bne 1b" \
1354+" bne 1b\n" \
1355+ extable \
1356 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1357 : "r" (&v->counter), "r" (i) \
1358 : "cc"); \
1359 } \
1360
1361-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1362-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1363+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, , op1, op2, , ) \
1364+ __ATOMIC64_OP(op, _unchecked, op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1365+
1366+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1367+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1368 { \
1369 long long result; \
1370 unsigned long tmp; \
1371@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1372 smp_mb(); \
1373 prefetchw(&v->counter); \
1374 \
1375- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1376+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1377 "1: ldrexd %0, %H0, [%3]\n" \
1378 " " #op1 " %Q0, %Q0, %Q4\n" \
1379 " " #op2 " %R0, %R0, %R4\n" \
1380+ post_op \
1381 " strexd %1, %0, %H0, [%3]\n" \
1382 " teq %1, #0\n" \
1383-" bne 1b" \
1384+" bne 1b\n" \
1385+ extable \
1386 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1387 : "r" (&v->counter), "r" (i) \
1388 : "cc"); \
1389@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1390 return result; \
1391 }
1392
1393+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, , op1, op2, , ) \
1394+ __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1395+
1396 #define ATOMIC64_OPS(op, op1, op2) \
1397 ATOMIC64_OP(op, op1, op2) \
1398 ATOMIC64_OP_RETURN(op, op1, op2)
1399@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1400
1401 #undef ATOMIC64_OPS
1402 #undef ATOMIC64_OP_RETURN
1403+#undef __ATOMIC64_OP_RETURN
1404 #undef ATOMIC64_OP
1405+#undef __ATOMIC64_OP
1406+#undef __OVERFLOW_EXTABLE
1407+#undef __OVERFLOW_POST_RETURN
1408+#undef __OVERFLOW_POST
1409
1410 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1411 long long new)
1412@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1413 return oldval;
1414 }
1415
1416+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1417+ long long new)
1418+{
1419+ long long oldval;
1420+ unsigned long res;
1421+
1422+ smp_mb();
1423+
1424+ do {
1425+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1426+ "ldrexd %1, %H1, [%3]\n"
1427+ "mov %0, #0\n"
1428+ "teq %1, %4\n"
1429+ "teqeq %H1, %H4\n"
1430+ "strexdeq %0, %5, %H5, [%3]"
1431+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1432+ : "r" (&ptr->counter), "r" (old), "r" (new)
1433+ : "cc");
1434+ } while (res);
1435+
1436+ smp_mb();
1437+
1438+ return oldval;
1439+}
1440+
1441 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1442 {
1443 long long result;
1444@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1446 {
1447 long long result;
1448- unsigned long tmp;
1449+ u64 tmp;
1450
1451 smp_mb();
1452 prefetchw(&v->counter);
1453
1454 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1455-"1: ldrexd %0, %H0, [%3]\n"
1456-" subs %Q0, %Q0, #1\n"
1457-" sbc %R0, %R0, #0\n"
1458+"1: ldrexd %1, %H1, [%3]\n"
1459+" subs %Q0, %Q1, #1\n"
1460+" sbcs %R0, %R1, #0\n"
1461+
1462+#ifdef CONFIG_PAX_REFCOUNT
1463+" bvc 3f\n"
1464+" mov %Q0, %Q1\n"
1465+" mov %R0, %R1\n"
1466+"2: " REFCOUNT_TRAP_INSN "\n"
1467+"3:\n"
1468+#endif
1469+
1470 " teq %R0, #0\n"
1471-" bmi 2f\n"
1472+" bmi 4f\n"
1473 " strexd %1, %0, %H0, [%3]\n"
1474 " teq %1, #0\n"
1475 " bne 1b\n"
1476-"2:"
1477+"4:\n"
1478+
1479+#ifdef CONFIG_PAX_REFCOUNT
1480+ _ASM_EXTABLE(2b, 4b)
1481+#endif
1482+
1483 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1484 : "r" (&v->counter)
1485 : "cc");
1486@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1487 " teq %0, %5\n"
1488 " teqeq %H0, %H5\n"
1489 " moveq %1, #0\n"
1490-" beq 2f\n"
1491+" beq 4f\n"
1492 " adds %Q0, %Q0, %Q6\n"
1493-" adc %R0, %R0, %R6\n"
1494+" adcs %R0, %R0, %R6\n"
1495+
1496+#ifdef CONFIG_PAX_REFCOUNT
1497+" bvc 3f\n"
1498+"2: " REFCOUNT_TRAP_INSN "\n"
1499+"3:\n"
1500+#endif
1501+
1502 " strexd %2, %0, %H0, [%4]\n"
1503 " teq %2, #0\n"
1504 " bne 1b\n"
1505-"2:"
1506+"4:\n"
1507+
1508+#ifdef CONFIG_PAX_REFCOUNT
1509+ _ASM_EXTABLE(2b, 4b)
1510+#endif
1511+
1512 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1513 : "r" (&v->counter), "r" (u), "r" (a)
1514 : "cc");
1515@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1516
1517 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1518 #define atomic64_inc(v) atomic64_add(1LL, (v))
1519+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1520 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1521+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1522 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1523 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1524 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1525+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1526 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1527 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1528 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1529diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1530index d2f81e6..3c4dba5 100644
1531--- a/arch/arm/include/asm/barrier.h
1532+++ b/arch/arm/include/asm/barrier.h
1533@@ -67,7 +67,7 @@
1534 do { \
1535 compiletime_assert_atomic_type(*p); \
1536 smp_mb(); \
1537- ACCESS_ONCE(*p) = (v); \
1538+ ACCESS_ONCE_RW(*p) = (v); \
1539 } while (0)
1540
1541 #define smp_load_acquire(p) \
1542diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1543index 75fe66b..ba3dee4 100644
1544--- a/arch/arm/include/asm/cache.h
1545+++ b/arch/arm/include/asm/cache.h
1546@@ -4,8 +4,10 @@
1547 #ifndef __ASMARM_CACHE_H
1548 #define __ASMARM_CACHE_H
1549
1550+#include <linux/const.h>
1551+
1552 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1553-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1554+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1555
1556 /*
1557 * Memory returned by kmalloc() may be used for DMA, so we must make
1558@@ -24,5 +26,6 @@
1559 #endif
1560
1561 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1562+#define __read_only __attribute__ ((__section__(".data..read_only")))
1563
1564 #endif
1565diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1566index 2d46862..a35415b 100644
1567--- a/arch/arm/include/asm/cacheflush.h
1568+++ b/arch/arm/include/asm/cacheflush.h
1569@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1570 void (*dma_unmap_area)(const void *, size_t, int);
1571
1572 void (*dma_flush_range)(const void *, const void *);
1573-};
1574+} __no_const;
1575
1576 /*
1577 * Select the calling method
1578diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1579index 5233151..87a71fa 100644
1580--- a/arch/arm/include/asm/checksum.h
1581+++ b/arch/arm/include/asm/checksum.h
1582@@ -37,7 +37,19 @@ __wsum
1583 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1584
1585 __wsum
1586-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1587+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1588+
1589+static inline __wsum
1590+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1591+{
1592+ __wsum ret;
1593+ pax_open_userland();
1594+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1595+ pax_close_userland();
1596+ return ret;
1597+}
1598+
1599+
1600
1601 /*
1602 * Fold a partial checksum without adding pseudo headers
1603diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1604index abb2c37..96db950 100644
1605--- a/arch/arm/include/asm/cmpxchg.h
1606+++ b/arch/arm/include/asm/cmpxchg.h
1607@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1608
1609 #define xchg(ptr,x) \
1610 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1611+#define xchg_unchecked(ptr,x) \
1612+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1613
1614 #include <asm-generic/cmpxchg-local.h>
1615
1616diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1617index 6ddbe44..b5e38b1a 100644
1618--- a/arch/arm/include/asm/domain.h
1619+++ b/arch/arm/include/asm/domain.h
1620@@ -48,18 +48,37 @@
1621 * Domain types
1622 */
1623 #define DOMAIN_NOACCESS 0
1624-#define DOMAIN_CLIENT 1
1625 #ifdef CONFIG_CPU_USE_DOMAINS
1626+#define DOMAIN_USERCLIENT 1
1627+#define DOMAIN_KERNELCLIENT 1
1628 #define DOMAIN_MANAGER 3
1629+#define DOMAIN_VECTORS DOMAIN_USER
1630 #else
1631+
1632+#ifdef CONFIG_PAX_KERNEXEC
1633 #define DOMAIN_MANAGER 1
1634+#define DOMAIN_KERNEXEC 3
1635+#else
1636+#define DOMAIN_MANAGER 1
1637+#endif
1638+
1639+#ifdef CONFIG_PAX_MEMORY_UDEREF
1640+#define DOMAIN_USERCLIENT 0
1641+#define DOMAIN_UDEREF 1
1642+#define DOMAIN_VECTORS DOMAIN_KERNEL
1643+#else
1644+#define DOMAIN_USERCLIENT 1
1645+#define DOMAIN_VECTORS DOMAIN_USER
1646+#endif
1647+#define DOMAIN_KERNELCLIENT 1
1648+
1649 #endif
1650
1651 #define domain_val(dom,type) ((type) << (2*(dom)))
1652
1653 #ifndef __ASSEMBLY__
1654
1655-#ifdef CONFIG_CPU_USE_DOMAINS
1656+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1657 static inline void set_domain(unsigned val)
1658 {
1659 asm volatile(
1660@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1661 isb();
1662 }
1663
1664-#define modify_domain(dom,type) \
1665- do { \
1666- struct thread_info *thread = current_thread_info(); \
1667- unsigned int domain = thread->cpu_domain; \
1668- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1669- thread->cpu_domain = domain | domain_val(dom, type); \
1670- set_domain(thread->cpu_domain); \
1671- } while (0)
1672-
1673+extern void modify_domain(unsigned int dom, unsigned int type);
1674 #else
1675 static inline void set_domain(unsigned val) { }
1676 static inline void modify_domain(unsigned dom, unsigned type) { }
1677diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1678index afb9caf..9a0bac0 100644
1679--- a/arch/arm/include/asm/elf.h
1680+++ b/arch/arm/include/asm/elf.h
1681@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1682 the loader. We need to make sure that it is out of the way of the program
1683 that it will "exec", and that there is sufficient room for the brk. */
1684
1685-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1686+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1687+
1688+#ifdef CONFIG_PAX_ASLR
1689+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1690+
1691+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1692+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1693+#endif
1694
1695 /* When the program starts, a1 contains a pointer to a function to be
1696 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1697@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1698 extern void elf_set_personality(const struct elf32_hdr *);
1699 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1700
1701-struct mm_struct;
1702-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1703-#define arch_randomize_brk arch_randomize_brk
1704-
1705 #ifdef CONFIG_MMU
1706 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1707 struct linux_binprm;
1708diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1709index de53547..52b9a28 100644
1710--- a/arch/arm/include/asm/fncpy.h
1711+++ b/arch/arm/include/asm/fncpy.h
1712@@ -81,7 +81,9 @@
1713 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1714 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1715 \
1716+ pax_open_kernel(); \
1717 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1718+ pax_close_kernel(); \
1719 flush_icache_range((unsigned long)(dest_buf), \
1720 (unsigned long)(dest_buf) + (size)); \
1721 \
1722diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1723index 53e69da..3fdc896 100644
1724--- a/arch/arm/include/asm/futex.h
1725+++ b/arch/arm/include/asm/futex.h
1726@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1727 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1728 return -EFAULT;
1729
1730+ pax_open_userland();
1731+
1732 smp_mb();
1733 /* Prefetching cannot fault */
1734 prefetchw(uaddr);
1735@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1736 : "cc", "memory");
1737 smp_mb();
1738
1739+ pax_close_userland();
1740+
1741 *uval = val;
1742 return ret;
1743 }
1744@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1745 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1746 return -EFAULT;
1747
1748+ pax_open_userland();
1749+
1750 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1751 "1: " TUSER(ldr) " %1, [%4]\n"
1752 " teq %1, %2\n"
1753@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1754 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1755 : "cc", "memory");
1756
1757+ pax_close_userland();
1758+
1759 *uval = val;
1760 return ret;
1761 }
1762@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1763 return -EFAULT;
1764
1765 pagefault_disable(); /* implies preempt_disable() */
1766+ pax_open_userland();
1767
1768 switch (op) {
1769 case FUTEX_OP_SET:
1770@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1771 ret = -ENOSYS;
1772 }
1773
1774+ pax_close_userland();
1775 pagefault_enable(); /* subsumes preempt_enable() */
1776
1777 if (!ret) {
1778diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1779index 83eb2f7..ed77159 100644
1780--- a/arch/arm/include/asm/kmap_types.h
1781+++ b/arch/arm/include/asm/kmap_types.h
1782@@ -4,6 +4,6 @@
1783 /*
1784 * This is the "bare minimum". AIO seems to require this.
1785 */
1786-#define KM_TYPE_NR 16
1787+#define KM_TYPE_NR 17
1788
1789 #endif
1790diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1791index 9e614a1..3302cca 100644
1792--- a/arch/arm/include/asm/mach/dma.h
1793+++ b/arch/arm/include/asm/mach/dma.h
1794@@ -22,7 +22,7 @@ struct dma_ops {
1795 int (*residue)(unsigned int, dma_t *); /* optional */
1796 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1797 const char *type;
1798-};
1799+} __do_const;
1800
1801 struct dma_struct {
1802 void *addr; /* single DMA address */
1803diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1804index f98c7f3..e5c626d 100644
1805--- a/arch/arm/include/asm/mach/map.h
1806+++ b/arch/arm/include/asm/mach/map.h
1807@@ -23,17 +23,19 @@ struct map_desc {
1808
1809 /* types 0-3 are defined in asm/io.h */
1810 enum {
1811- MT_UNCACHED = 4,
1812- MT_CACHECLEAN,
1813- MT_MINICLEAN,
1814+ MT_UNCACHED_RW = 4,
1815+ MT_CACHECLEAN_RO,
1816+ MT_MINICLEAN_RO,
1817 MT_LOW_VECTORS,
1818 MT_HIGH_VECTORS,
1819- MT_MEMORY_RWX,
1820+ __MT_MEMORY_RWX,
1821 MT_MEMORY_RW,
1822- MT_ROM,
1823- MT_MEMORY_RWX_NONCACHED,
1824+ MT_MEMORY_RX,
1825+ MT_ROM_RX,
1826+ MT_MEMORY_RW_NONCACHED,
1827+ MT_MEMORY_RX_NONCACHED,
1828 MT_MEMORY_RW_DTCM,
1829- MT_MEMORY_RWX_ITCM,
1830+ MT_MEMORY_RX_ITCM,
1831 MT_MEMORY_RW_SO,
1832 MT_MEMORY_DMA_READY,
1833 };
1834diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1835index 891a56b..48f337e 100644
1836--- a/arch/arm/include/asm/outercache.h
1837+++ b/arch/arm/include/asm/outercache.h
1838@@ -36,7 +36,7 @@ struct outer_cache_fns {
1839
1840 /* This is an ARM L2C thing */
1841 void (*write_sec)(unsigned long, unsigned);
1842-};
1843+} __no_const;
1844
1845 extern struct outer_cache_fns outer_cache;
1846
1847diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1848index 4355f0e..cd9168e 100644
1849--- a/arch/arm/include/asm/page.h
1850+++ b/arch/arm/include/asm/page.h
1851@@ -23,6 +23,7 @@
1852
1853 #else
1854
1855+#include <linux/compiler.h>
1856 #include <asm/glue.h>
1857
1858 /*
1859@@ -114,7 +115,7 @@ struct cpu_user_fns {
1860 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1861 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1862 unsigned long vaddr, struct vm_area_struct *vma);
1863-};
1864+} __no_const;
1865
1866 #ifdef MULTI_USER
1867 extern struct cpu_user_fns cpu_user;
1868diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1869index 19cfab5..3f5c7e9 100644
1870--- a/arch/arm/include/asm/pgalloc.h
1871+++ b/arch/arm/include/asm/pgalloc.h
1872@@ -17,6 +17,7 @@
1873 #include <asm/processor.h>
1874 #include <asm/cacheflush.h>
1875 #include <asm/tlbflush.h>
1876+#include <asm/system_info.h>
1877
1878 #define check_pgt_cache() do { } while (0)
1879
1880@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1881 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1882 }
1883
1884+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1885+{
1886+ pud_populate(mm, pud, pmd);
1887+}
1888+
1889 #else /* !CONFIG_ARM_LPAE */
1890
1891 /*
1892@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1893 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1894 #define pmd_free(mm, pmd) do { } while (0)
1895 #define pud_populate(mm,pmd,pte) BUG()
1896+#define pud_populate_kernel(mm,pmd,pte) BUG()
1897
1898 #endif /* CONFIG_ARM_LPAE */
1899
1900@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1901 __free_page(pte);
1902 }
1903
1904+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1905+{
1906+#ifdef CONFIG_ARM_LPAE
1907+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1908+#else
1909+ if (addr & SECTION_SIZE)
1910+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1911+ else
1912+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1913+#endif
1914+ flush_pmd_entry(pmdp);
1915+}
1916+
1917 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1918 pmdval_t prot)
1919 {
1920diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1921index 5e68278..1869bae 100644
1922--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1923+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924@@ -27,7 +27,7 @@
1925 /*
1926 * - section
1927 */
1928-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1929+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1930 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1931 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1932 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1933@@ -39,6 +39,7 @@
1934 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1935 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1936 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1937+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1938
1939 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1940 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1941@@ -68,6 +69,7 @@
1942 * - extended small page/tiny page
1943 */
1944 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1945+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1946 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1947 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1948 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1949diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1950index f027941..f36ce30 100644
1951--- a/arch/arm/include/asm/pgtable-2level.h
1952+++ b/arch/arm/include/asm/pgtable-2level.h
1953@@ -126,6 +126,9 @@
1954 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1955 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1956
1957+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1958+#define L_PTE_PXN (_AT(pteval_t, 0))
1959+
1960 /*
1961 * These are the memory types, defined to be compatible with
1962 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1963diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1964index a31ecdad..95e98d4 100644
1965--- a/arch/arm/include/asm/pgtable-3level.h
1966+++ b/arch/arm/include/asm/pgtable-3level.h
1967@@ -81,6 +81,7 @@
1968 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1969 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1970 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1971+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1972 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1973 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1974 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1975@@ -92,10 +93,12 @@
1976 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1977 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1978 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1979+#define PMD_SECT_RDONLY PMD_SECT_AP2
1980
1981 /*
1982 * To be used in assembly code with the upper page attributes.
1983 */
1984+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1985 #define L_PTE_XN_HIGH (1 << (54 - 32))
1986 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1987
1988diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1989index d5cac54..906ea3e 100644
1990--- a/arch/arm/include/asm/pgtable.h
1991+++ b/arch/arm/include/asm/pgtable.h
1992@@ -33,6 +33,9 @@
1993 #include <asm/pgtable-2level.h>
1994 #endif
1995
1996+#define ktla_ktva(addr) (addr)
1997+#define ktva_ktla(addr) (addr)
1998+
1999 /*
2000 * Just any arbitrary offset to the start of the vmalloc VM area: the
2001 * current 8MB value just means that there will be a 8MB "hole" after the
2002@@ -48,6 +51,9 @@
2003 #define LIBRARY_TEXT_START 0x0c000000
2004
2005 #ifndef __ASSEMBLY__
2006+extern pteval_t __supported_pte_mask;
2007+extern pmdval_t __supported_pmd_mask;
2008+
2009 extern void __pte_error(const char *file, int line, pte_t);
2010 extern void __pmd_error(const char *file, int line, pmd_t);
2011 extern void __pgd_error(const char *file, int line, pgd_t);
2012@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2013 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2014 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2015
2016+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2017+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2018+
2019+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2020+#include <asm/domain.h>
2021+#include <linux/thread_info.h>
2022+#include <linux/preempt.h>
2023+
2024+static inline int test_domain(int domain, int domaintype)
2025+{
2026+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2027+}
2028+#endif
2029+
2030+#ifdef CONFIG_PAX_KERNEXEC
2031+static inline unsigned long pax_open_kernel(void) {
2032+#ifdef CONFIG_ARM_LPAE
2033+ /* TODO */
2034+#else
2035+ preempt_disable();
2036+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2037+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2038+#endif
2039+ return 0;
2040+}
2041+
2042+static inline unsigned long pax_close_kernel(void) {
2043+#ifdef CONFIG_ARM_LPAE
2044+ /* TODO */
2045+#else
2046+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2047+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2048+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2049+ preempt_enable_no_resched();
2050+#endif
2051+ return 0;
2052+}
2053+#else
2054+static inline unsigned long pax_open_kernel(void) { return 0; }
2055+static inline unsigned long pax_close_kernel(void) { return 0; }
2056+#endif
2057+
2058 /*
2059 * This is the lowest virtual address we can permit any user space
2060 * mapping to be mapped at. This is particularly important for
2061@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2062 /*
2063 * The pgprot_* and protection_map entries will be fixed up in runtime
2064 * to include the cachable and bufferable bits based on memory policy,
2065- * as well as any architecture dependent bits like global/ASID and SMP
2066- * shared mapping bits.
2067+ * as well as any architecture dependent bits like global/ASID, PXN,
2068+ * and SMP shared mapping bits.
2069 */
2070 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2071
2072@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2073 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2074 {
2075 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2076- L_PTE_NONE | L_PTE_VALID;
2077+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2078 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2079 return pte;
2080 }
2081diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2082index c25ef3e..735f14b 100644
2083--- a/arch/arm/include/asm/psci.h
2084+++ b/arch/arm/include/asm/psci.h
2085@@ -32,7 +32,7 @@ struct psci_operations {
2086 int (*affinity_info)(unsigned long target_affinity,
2087 unsigned long lowest_affinity_level);
2088 int (*migrate_info_type)(void);
2089-};
2090+} __no_const;
2091
2092 extern struct psci_operations psci_ops;
2093 extern struct smp_operations psci_smp_ops;
2094diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2095index 18f5a55..5072a40 100644
2096--- a/arch/arm/include/asm/smp.h
2097+++ b/arch/arm/include/asm/smp.h
2098@@ -107,7 +107,7 @@ struct smp_operations {
2099 int (*cpu_disable)(unsigned int cpu);
2100 #endif
2101 #endif
2102-};
2103+} __no_const;
2104
2105 struct of_cpu_method {
2106 const char *method;
2107diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2108index d890e41..3921292 100644
2109--- a/arch/arm/include/asm/thread_info.h
2110+++ b/arch/arm/include/asm/thread_info.h
2111@@ -78,9 +78,9 @@ struct thread_info {
2112 .flags = 0, \
2113 .preempt_count = INIT_PREEMPT_COUNT, \
2114 .addr_limit = KERNEL_DS, \
2115- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2116- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2117- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2118+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2119+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2120+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2121 .restart_block = { \
2122 .fn = do_no_restart_syscall, \
2123 }, \
2124@@ -159,7 +159,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2125 #define TIF_SYSCALL_AUDIT 9
2126 #define TIF_SYSCALL_TRACEPOINT 10
2127 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2128-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2129+/* within 8 bits of TIF_SYSCALL_TRACE
2130+ * to meet flexible second operand requirements
2131+ */
2132+#define TIF_GRSEC_SETXID 12
2133+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2134 #define TIF_USING_IWMMXT 17
2135 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2136 #define TIF_RESTORE_SIGMASK 20
2137@@ -173,10 +177,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2138 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2139 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2140 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2141+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2142
2143 /* Checks for any syscall work in entry-common.S */
2144 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2145- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2146+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2147
2148 /*
2149 * Change these and you break ASM code in entry-common.S
2150diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2151index 5f833f7..76e6644 100644
2152--- a/arch/arm/include/asm/tls.h
2153+++ b/arch/arm/include/asm/tls.h
2154@@ -3,6 +3,7 @@
2155
2156 #include <linux/compiler.h>
2157 #include <asm/thread_info.h>
2158+#include <asm/pgtable.h>
2159
2160 #ifdef __ASSEMBLY__
2161 #include <asm/asm-offsets.h>
2162@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2163 * at 0xffff0fe0 must be used instead. (see
2164 * entry-armv.S for details)
2165 */
2166+ pax_open_kernel();
2167 *((unsigned int *)0xffff0ff0) = val;
2168+ pax_close_kernel();
2169 #endif
2170 }
2171
2172diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2173index 4767eb9..bf00668 100644
2174--- a/arch/arm/include/asm/uaccess.h
2175+++ b/arch/arm/include/asm/uaccess.h
2176@@ -18,6 +18,7 @@
2177 #include <asm/domain.h>
2178 #include <asm/unified.h>
2179 #include <asm/compiler.h>
2180+#include <asm/pgtable.h>
2181
2182 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2183 #include <asm-generic/uaccess-unaligned.h>
2184@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2185 static inline void set_fs(mm_segment_t fs)
2186 {
2187 current_thread_info()->addr_limit = fs;
2188- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2189+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2190 }
2191
2192 #define segment_eq(a,b) ((a) == (b))
2193
2194+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2195+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2196+
2197+static inline void pax_open_userland(void)
2198+{
2199+
2200+#ifdef CONFIG_PAX_MEMORY_UDEREF
2201+ if (segment_eq(get_fs(), USER_DS)) {
2202+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2203+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2204+ }
2205+#endif
2206+
2207+}
2208+
2209+static inline void pax_close_userland(void)
2210+{
2211+
2212+#ifdef CONFIG_PAX_MEMORY_UDEREF
2213+ if (segment_eq(get_fs(), USER_DS)) {
2214+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2215+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2216+ }
2217+#endif
2218+
2219+}
2220+
2221 #define __addr_ok(addr) ({ \
2222 unsigned long flag; \
2223 __asm__("cmp %2, %0; movlo %0, #0" \
2224@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2225
2226 #define get_user(x,p) \
2227 ({ \
2228+ int __e; \
2229 might_fault(); \
2230- __get_user_check(x,p); \
2231+ pax_open_userland(); \
2232+ __e = __get_user_check(x,p); \
2233+ pax_close_userland(); \
2234+ __e; \
2235 })
2236
2237 extern int __put_user_1(void *, unsigned int);
2238@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2239
2240 #define put_user(x,p) \
2241 ({ \
2242+ int __e; \
2243 might_fault(); \
2244- __put_user_check(x,p); \
2245+ pax_open_userland(); \
2246+ __e = __put_user_check(x,p); \
2247+ pax_close_userland(); \
2248+ __e; \
2249 })
2250
2251 #else /* CONFIG_MMU */
2252@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2253
2254 #endif /* CONFIG_MMU */
2255
2256+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2257 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2258
2259 #define user_addr_max() \
2260@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2261 #define __get_user(x,ptr) \
2262 ({ \
2263 long __gu_err = 0; \
2264+ pax_open_userland(); \
2265 __get_user_err((x),(ptr),__gu_err); \
2266+ pax_close_userland(); \
2267 __gu_err; \
2268 })
2269
2270 #define __get_user_error(x,ptr,err) \
2271 ({ \
2272+ pax_open_userland(); \
2273 __get_user_err((x),(ptr),err); \
2274+ pax_close_userland(); \
2275 (void) 0; \
2276 })
2277
2278@@ -368,13 +409,17 @@ do { \
2279 #define __put_user(x,ptr) \
2280 ({ \
2281 long __pu_err = 0; \
2282+ pax_open_userland(); \
2283 __put_user_err((x),(ptr),__pu_err); \
2284+ pax_close_userland(); \
2285 __pu_err; \
2286 })
2287
2288 #define __put_user_error(x,ptr,err) \
2289 ({ \
2290+ pax_open_userland(); \
2291 __put_user_err((x),(ptr),err); \
2292+ pax_close_userland(); \
2293 (void) 0; \
2294 })
2295
2296@@ -474,11 +519,44 @@ do { \
2297
2298
2299 #ifdef CONFIG_MMU
2300-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2301-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2302+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2303+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2304+
2305+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2306+{
2307+ unsigned long ret;
2308+
2309+ check_object_size(to, n, false);
2310+ pax_open_userland();
2311+ ret = ___copy_from_user(to, from, n);
2312+ pax_close_userland();
2313+ return ret;
2314+}
2315+
2316+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2317+{
2318+ unsigned long ret;
2319+
2320+ check_object_size(from, n, true);
2321+ pax_open_userland();
2322+ ret = ___copy_to_user(to, from, n);
2323+ pax_close_userland();
2324+ return ret;
2325+}
2326+
2327 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2328-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2329+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2330 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2331+
2332+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2333+{
2334+ unsigned long ret;
2335+ pax_open_userland();
2336+ ret = ___clear_user(addr, n);
2337+ pax_close_userland();
2338+ return ret;
2339+}
2340+
2341 #else
2342 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2343 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2344@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2345
2346 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2347 {
2348+ if ((long)n < 0)
2349+ return n;
2350+
2351 if (access_ok(VERIFY_READ, from, n))
2352 n = __copy_from_user(to, from, n);
2353 else /* security hole - plug it */
2354@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2355
2356 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2357 {
2358+ if ((long)n < 0)
2359+ return n;
2360+
2361 if (access_ok(VERIFY_WRITE, to, n))
2362 n = __copy_to_user(to, from, n);
2363 return n;
2364diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2365index 5af0ed1..cea83883 100644
2366--- a/arch/arm/include/uapi/asm/ptrace.h
2367+++ b/arch/arm/include/uapi/asm/ptrace.h
2368@@ -92,7 +92,7 @@
2369 * ARMv7 groups of PSR bits
2370 */
2371 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2372-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2373+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2374 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2375 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2376
2377diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2378index a88671c..1cc895e 100644
2379--- a/arch/arm/kernel/armksyms.c
2380+++ b/arch/arm/kernel/armksyms.c
2381@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2382
2383 /* networking */
2384 EXPORT_SYMBOL(csum_partial);
2385-EXPORT_SYMBOL(csum_partial_copy_from_user);
2386+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2387 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2388 EXPORT_SYMBOL(__csum_ipv6_magic);
2389
2390@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2391 #ifdef CONFIG_MMU
2392 EXPORT_SYMBOL(copy_page);
2393
2394-EXPORT_SYMBOL(__copy_from_user);
2395-EXPORT_SYMBOL(__copy_to_user);
2396-EXPORT_SYMBOL(__clear_user);
2397+EXPORT_SYMBOL(___copy_from_user);
2398+EXPORT_SYMBOL(___copy_to_user);
2399+EXPORT_SYMBOL(___clear_user);
2400
2401 EXPORT_SYMBOL(__get_user_1);
2402 EXPORT_SYMBOL(__get_user_2);
2403diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2404index 2f5555d..d493c91 100644
2405--- a/arch/arm/kernel/entry-armv.S
2406+++ b/arch/arm/kernel/entry-armv.S
2407@@ -47,6 +47,87 @@
2408 9997:
2409 .endm
2410
2411+ .macro pax_enter_kernel
2412+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2413+ @ make aligned space for saved DACR
2414+ sub sp, sp, #8
2415+ @ save regs
2416+ stmdb sp!, {r1, r2}
2417+ @ read DACR from cpu_domain into r1
2418+ mov r2, sp
2419+ @ assume 8K pages, since we have to split the immediate in two
2420+ bic r2, r2, #(0x1fc0)
2421+ bic r2, r2, #(0x3f)
2422+ ldr r1, [r2, #TI_CPU_DOMAIN]
2423+ @ store old DACR on stack
2424+ str r1, [sp, #8]
2425+#ifdef CONFIG_PAX_KERNEXEC
2426+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2427+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2428+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2429+#endif
2430+#ifdef CONFIG_PAX_MEMORY_UDEREF
2431+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2432+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2433+#endif
2434+ @ write r1 to current_thread_info()->cpu_domain
2435+ str r1, [r2, #TI_CPU_DOMAIN]
2436+ @ write r1 to DACR
2437+ mcr p15, 0, r1, c3, c0, 0
2438+ @ instruction sync
2439+ instr_sync
2440+ @ restore regs
2441+ ldmia sp!, {r1, r2}
2442+#endif
2443+ .endm
2444+
2445+ .macro pax_open_userland
2446+#ifdef CONFIG_PAX_MEMORY_UDEREF
2447+ @ save regs
2448+ stmdb sp!, {r0, r1}
2449+ @ read DACR from cpu_domain into r1
2450+ mov r0, sp
2451+ @ assume 8K pages, since we have to split the immediate in two
2452+ bic r0, r0, #(0x1fc0)
2453+ bic r0, r0, #(0x3f)
2454+ ldr r1, [r0, #TI_CPU_DOMAIN]
2455+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2456+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2457+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2458+ @ write r1 to current_thread_info()->cpu_domain
2459+ str r1, [r0, #TI_CPU_DOMAIN]
2460+ @ write r1 to DACR
2461+ mcr p15, 0, r1, c3, c0, 0
2462+ @ instruction sync
2463+ instr_sync
2464+ @ restore regs
2465+ ldmia sp!, {r0, r1}
2466+#endif
2467+ .endm
2468+
2469+ .macro pax_close_userland
2470+#ifdef CONFIG_PAX_MEMORY_UDEREF
2471+ @ save regs
2472+ stmdb sp!, {r0, r1}
2473+ @ read DACR from cpu_domain into r1
2474+ mov r0, sp
2475+ @ assume 8K pages, since we have to split the immediate in two
2476+ bic r0, r0, #(0x1fc0)
2477+ bic r0, r0, #(0x3f)
2478+ ldr r1, [r0, #TI_CPU_DOMAIN]
2479+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2480+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2481+ @ write r1 to current_thread_info()->cpu_domain
2482+ str r1, [r0, #TI_CPU_DOMAIN]
2483+ @ write r1 to DACR
2484+ mcr p15, 0, r1, c3, c0, 0
2485+ @ instruction sync
2486+ instr_sync
2487+ @ restore regs
2488+ ldmia sp!, {r0, r1}
2489+#endif
2490+ .endm
2491+
2492 .macro pabt_helper
2493 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2494 #ifdef MULTI_PABORT
2495@@ -89,11 +170,15 @@
2496 * Invalid mode handlers
2497 */
2498 .macro inv_entry, reason
2499+
2500+ pax_enter_kernel
2501+
2502 sub sp, sp, #S_FRAME_SIZE
2503 ARM( stmib sp, {r1 - lr} )
2504 THUMB( stmia sp, {r0 - r12} )
2505 THUMB( str sp, [sp, #S_SP] )
2506 THUMB( str lr, [sp, #S_LR] )
2507+
2508 mov r1, #\reason
2509 .endm
2510
2511@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2512 .macro svc_entry, stack_hole=0, trace=1
2513 UNWIND(.fnstart )
2514 UNWIND(.save {r0 - pc} )
2515+
2516+ pax_enter_kernel
2517+
2518 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2519+
2520 #ifdef CONFIG_THUMB2_KERNEL
2521 SPFIX( str r0, [sp] ) @ temporarily saved
2522 SPFIX( mov r0, sp )
2523@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2524 ldmia r0, {r3 - r5}
2525 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2526 mov r6, #-1 @ "" "" "" ""
2527+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2528+ @ offset sp by 8 as done in pax_enter_kernel
2529+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2530+#else
2531 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2532+#endif
2533 SPFIX( addeq r2, r2, #4 )
2534 str r3, [sp, #-4]! @ save the "real" r0 copied
2535 @ from the exception stack
2536@@ -368,6 +462,9 @@ ENDPROC(__fiq_abt)
2537 .macro usr_entry, trace=1
2538 UNWIND(.fnstart )
2539 UNWIND(.cantunwind ) @ don't unwind the user space
2540+
2541+ pax_enter_kernel_user
2542+
2543 sub sp, sp, #S_FRAME_SIZE
2544 ARM( stmib sp, {r1 - r12} )
2545 THUMB( stmia sp, {r0 - r12} )
2546@@ -478,7 +575,9 @@ __und_usr:
2547 tst r3, #PSR_T_BIT @ Thumb mode?
2548 bne __und_usr_thumb
2549 sub r4, r2, #4 @ ARM instr at LR - 4
2550+ pax_open_userland
2551 1: ldrt r0, [r4]
2552+ pax_close_userland
2553 ARM_BE8(rev r0, r0) @ little endian instruction
2554
2555 @ r0 = 32-bit ARM instruction which caused the exception
2556@@ -512,11 +611,15 @@ __und_usr_thumb:
2557 */
2558 .arch armv6t2
2559 #endif
2560+ pax_open_userland
2561 2: ldrht r5, [r4]
2562+ pax_close_userland
2563 ARM_BE8(rev16 r5, r5) @ little endian instruction
2564 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2565 blo __und_usr_fault_16 @ 16bit undefined instruction
2566+ pax_open_userland
2567 3: ldrht r0, [r2]
2568+ pax_close_userland
2569 ARM_BE8(rev16 r0, r0) @ little endian instruction
2570 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2571 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2572@@ -546,7 +649,8 @@ ENDPROC(__und_usr)
2573 */
2574 .pushsection .fixup, "ax"
2575 .align 2
2576-4: str r4, [sp, #S_PC] @ retry current instruction
2577+4: pax_close_userland
2578+ str r4, [sp, #S_PC] @ retry current instruction
2579 ret r9
2580 .popsection
2581 .pushsection __ex_table,"a"
2582@@ -766,7 +870,7 @@ ENTRY(__switch_to)
2583 THUMB( str lr, [ip], #4 )
2584 ldr r4, [r2, #TI_TP_VALUE]
2585 ldr r5, [r2, #TI_TP_VALUE + 4]
2586-#ifdef CONFIG_CPU_USE_DOMAINS
2587+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2588 ldr r6, [r2, #TI_CPU_DOMAIN]
2589 #endif
2590 switch_tls r1, r4, r5, r3, r7
2591@@ -775,7 +879,7 @@ ENTRY(__switch_to)
2592 ldr r8, =__stack_chk_guard
2593 ldr r7, [r7, #TSK_STACK_CANARY]
2594 #endif
2595-#ifdef CONFIG_CPU_USE_DOMAINS
2596+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2597 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2598 #endif
2599 mov r5, r0
2600diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2601index f8ccc21..83d192f 100644
2602--- a/arch/arm/kernel/entry-common.S
2603+++ b/arch/arm/kernel/entry-common.S
2604@@ -11,18 +11,46 @@
2605 #include <asm/assembler.h>
2606 #include <asm/unistd.h>
2607 #include <asm/ftrace.h>
2608+#include <asm/domain.h>
2609 #include <asm/unwind.h>
2610
2611+#include "entry-header.S"
2612+
2613 #ifdef CONFIG_NEED_RET_TO_USER
2614 #include <mach/entry-macro.S>
2615 #else
2616 .macro arch_ret_to_user, tmp1, tmp2
2617+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2618+ @ save regs
2619+ stmdb sp!, {r1, r2}
2620+ @ read DACR from cpu_domain into r1
2621+ mov r2, sp
2622+ @ assume 8K pages, since we have to split the immediate in two
2623+ bic r2, r2, #(0x1fc0)
2624+ bic r2, r2, #(0x3f)
2625+ ldr r1, [r2, #TI_CPU_DOMAIN]
2626+#ifdef CONFIG_PAX_KERNEXEC
2627+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2628+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2629+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2630+#endif
2631+#ifdef CONFIG_PAX_MEMORY_UDEREF
2632+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2633+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2634+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2635+#endif
2636+ @ write r1 to current_thread_info()->cpu_domain
2637+ str r1, [r2, #TI_CPU_DOMAIN]
2638+ @ write r1 to DACR
2639+ mcr p15, 0, r1, c3, c0, 0
2640+ @ instruction sync
2641+ instr_sync
2642+ @ restore regs
2643+ ldmia sp!, {r1, r2}
2644+#endif
2645 .endm
2646 #endif
2647
2648-#include "entry-header.S"
2649-
2650-
2651 .align 5
2652 /*
2653 * This is the fast syscall return path. We do as little as
2654@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2655 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2656 #endif
2657
2658+ /*
2659+ * do this here to avoid a performance hit of wrapping the code above
2660+ * that directly dereferences userland to parse the SWI instruction
2661+ */
2662+ pax_enter_kernel_user
2663+
2664 adr tbl, sys_call_table @ load syscall table pointer
2665
2666 #if defined(CONFIG_OABI_COMPAT)
2667diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2668index 1a0045a..9b4f34d 100644
2669--- a/arch/arm/kernel/entry-header.S
2670+++ b/arch/arm/kernel/entry-header.S
2671@@ -196,6 +196,60 @@
2672 msr cpsr_c, \rtemp @ switch back to the SVC mode
2673 .endm
2674
2675+ .macro pax_enter_kernel_user
2676+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2677+ @ save regs
2678+ stmdb sp!, {r0, r1}
2679+ @ read DACR from cpu_domain into r1
2680+ mov r0, sp
2681+ @ assume 8K pages, since we have to split the immediate in two
2682+ bic r0, r0, #(0x1fc0)
2683+ bic r0, r0, #(0x3f)
2684+ ldr r1, [r0, #TI_CPU_DOMAIN]
2685+#ifdef CONFIG_PAX_MEMORY_UDEREF
2686+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2687+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2688+#endif
2689+#ifdef CONFIG_PAX_KERNEXEC
2690+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2691+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2692+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2693+#endif
2694+ @ write r1 to current_thread_info()->cpu_domain
2695+ str r1, [r0, #TI_CPU_DOMAIN]
2696+ @ write r1 to DACR
2697+ mcr p15, 0, r1, c3, c0, 0
2698+ @ instruction sync
2699+ instr_sync
2700+ @ restore regs
2701+ ldmia sp!, {r0, r1}
2702+#endif
2703+ .endm
2704+
2705+ .macro pax_exit_kernel
2706+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2707+ @ save regs
2708+ stmdb sp!, {r0, r1}
2709+ @ read old DACR from stack into r1
2710+ ldr r1, [sp, #(8 + S_SP)]
2711+ sub r1, r1, #8
2712+ ldr r1, [r1]
2713+
2714+ @ write r1 to current_thread_info()->cpu_domain
2715+ mov r0, sp
2716+ @ assume 8K pages, since we have to split the immediate in two
2717+ bic r0, r0, #(0x1fc0)
2718+ bic r0, r0, #(0x3f)
2719+ str r1, [r0, #TI_CPU_DOMAIN]
2720+ @ write r1 to DACR
2721+ mcr p15, 0, r1, c3, c0, 0
2722+ @ instruction sync
2723+ instr_sync
2724+ @ restore regs
2725+ ldmia sp!, {r0, r1}
2726+#endif
2727+ .endm
2728+
2729 #ifndef CONFIG_THUMB2_KERNEL
2730 .macro svc_exit, rpsr, irq = 0
2731 .if \irq != 0
2732@@ -215,6 +269,9 @@
2733 blne trace_hardirqs_off
2734 #endif
2735 .endif
2736+
2737+ pax_exit_kernel
2738+
2739 msr spsr_cxsf, \rpsr
2740 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2741 @ We must avoid clrex due to Cortex-A15 erratum #830321
2742@@ -291,6 +348,9 @@
2743 blne trace_hardirqs_off
2744 #endif
2745 .endif
2746+
2747+ pax_exit_kernel
2748+
2749 ldr lr, [sp, #S_SP] @ top of the stack
2750 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2751
2752diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2753index 059c3da..8e45cfc 100644
2754--- a/arch/arm/kernel/fiq.c
2755+++ b/arch/arm/kernel/fiq.c
2756@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2757 void *base = vectors_page;
2758 unsigned offset = FIQ_OFFSET;
2759
2760+ pax_open_kernel();
2761 memcpy(base + offset, start, length);
2762+ pax_close_kernel();
2763+
2764 if (!cache_is_vipt_nonaliasing())
2765 flush_icache_range((unsigned long)base + offset, offset +
2766 length);
2767diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2768index 664eee8..f470938 100644
2769--- a/arch/arm/kernel/head.S
2770+++ b/arch/arm/kernel/head.S
2771@@ -437,7 +437,7 @@ __enable_mmu:
2772 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2773 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2774 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2775- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2776+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2777 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2778 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2779 #endif
2780diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2781index bea7db9..a210d10 100644
2782--- a/arch/arm/kernel/module.c
2783+++ b/arch/arm/kernel/module.c
2784@@ -38,12 +38,39 @@
2785 #endif
2786
2787 #ifdef CONFIG_MMU
2788-void *module_alloc(unsigned long size)
2789+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2790 {
2791+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2792+ return NULL;
2793 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2794- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2795+ GFP_KERNEL, prot, NUMA_NO_NODE,
2796 __builtin_return_address(0));
2797 }
2798+
2799+void *module_alloc(unsigned long size)
2800+{
2801+
2802+#ifdef CONFIG_PAX_KERNEXEC
2803+ return __module_alloc(size, PAGE_KERNEL);
2804+#else
2805+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2806+#endif
2807+
2808+}
2809+
2810+#ifdef CONFIG_PAX_KERNEXEC
2811+void module_memfree_exec(void *module_region)
2812+{
2813+ module_memfree(module_region);
2814+}
2815+EXPORT_SYMBOL(module_memfree_exec);
2816+
2817+void *module_alloc_exec(unsigned long size)
2818+{
2819+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2820+}
2821+EXPORT_SYMBOL(module_alloc_exec);
2822+#endif
2823 #endif
2824
2825 int
2826diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2827index 5038960..4aa71d8 100644
2828--- a/arch/arm/kernel/patch.c
2829+++ b/arch/arm/kernel/patch.c
2830@@ -67,6 +67,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2831 else
2832 __acquire(&patch_lock);
2833
2834+ pax_open_kernel();
2835 if (thumb2 && __opcode_is_thumb16(insn)) {
2836 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2837 size = sizeof(u16);
2838@@ -98,6 +99,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2839 *(u32 *)waddr = insn;
2840 size = sizeof(u32);
2841 }
2842+ pax_close_kernel();
2843
2844 if (waddr != addr) {
2845 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2846diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2847index fdfa3a7..5d208b8 100644
2848--- a/arch/arm/kernel/process.c
2849+++ b/arch/arm/kernel/process.c
2850@@ -207,6 +207,7 @@ void machine_power_off(void)
2851
2852 if (pm_power_off)
2853 pm_power_off();
2854+ BUG();
2855 }
2856
2857 /*
2858@@ -220,7 +221,7 @@ void machine_power_off(void)
2859 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2860 * to use. Implementing such co-ordination would be essentially impossible.
2861 */
2862-void machine_restart(char *cmd)
2863+__noreturn void machine_restart(char *cmd)
2864 {
2865 local_irq_disable();
2866 smp_send_stop();
2867@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
2868
2869 show_regs_print_info(KERN_DEFAULT);
2870
2871- print_symbol("PC is at %s\n", instruction_pointer(regs));
2872- print_symbol("LR is at %s\n", regs->ARM_lr);
2873+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2874+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2875 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2876 "sp : %08lx ip : %08lx fp : %08lx\n",
2877 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2878@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
2879 return 0;
2880 }
2881
2882-unsigned long arch_randomize_brk(struct mm_struct *mm)
2883-{
2884- unsigned long range_end = mm->brk + 0x02000000;
2885- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2886-}
2887-
2888 #ifdef CONFIG_MMU
2889 #ifdef CONFIG_KUSER_HELPERS
2890 /*
2891@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
2892
2893 static int __init gate_vma_init(void)
2894 {
2895- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2896+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2897 return 0;
2898 }
2899 arch_initcall(gate_vma_init);
2900@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2901 return is_gate_vma(vma) ? "[vectors]" : NULL;
2902 }
2903
2904-/* If possible, provide a placement hint at a random offset from the
2905- * stack for the signal page.
2906- */
2907-static unsigned long sigpage_addr(const struct mm_struct *mm,
2908- unsigned int npages)
2909-{
2910- unsigned long offset;
2911- unsigned long first;
2912- unsigned long last;
2913- unsigned long addr;
2914- unsigned int slots;
2915-
2916- first = PAGE_ALIGN(mm->start_stack);
2917-
2918- last = TASK_SIZE - (npages << PAGE_SHIFT);
2919-
2920- /* No room after stack? */
2921- if (first > last)
2922- return 0;
2923-
2924- /* Just enough room? */
2925- if (first == last)
2926- return first;
2927-
2928- slots = ((last - first) >> PAGE_SHIFT) + 1;
2929-
2930- offset = get_random_int() % slots;
2931-
2932- addr = first + (offset << PAGE_SHIFT);
2933-
2934- return addr;
2935-}
2936-
2937-static struct page *signal_page;
2938-extern struct page *get_signal_page(void);
2939-
2940-static const struct vm_special_mapping sigpage_mapping = {
2941- .name = "[sigpage]",
2942- .pages = &signal_page,
2943-};
2944-
2945 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2946 {
2947 struct mm_struct *mm = current->mm;
2948- struct vm_area_struct *vma;
2949- unsigned long addr;
2950- unsigned long hint;
2951- int ret = 0;
2952-
2953- if (!signal_page)
2954- signal_page = get_signal_page();
2955- if (!signal_page)
2956- return -ENOMEM;
2957
2958 down_write(&mm->mmap_sem);
2959- hint = sigpage_addr(mm, 1);
2960- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2961- if (IS_ERR_VALUE(addr)) {
2962- ret = addr;
2963- goto up_fail;
2964- }
2965-
2966- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2967- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2968- &sigpage_mapping);
2969-
2970- if (IS_ERR(vma)) {
2971- ret = PTR_ERR(vma);
2972- goto up_fail;
2973- }
2974-
2975- mm->context.sigpage = addr;
2976-
2977- up_fail:
2978+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2979 up_write(&mm->mmap_sem);
2980- return ret;
2981+ return 0;
2982 }
2983 #endif
2984diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2985index f73891b..cf3004e 100644
2986--- a/arch/arm/kernel/psci.c
2987+++ b/arch/arm/kernel/psci.c
2988@@ -28,7 +28,7 @@
2989 #include <asm/psci.h>
2990 #include <asm/system_misc.h>
2991
2992-struct psci_operations psci_ops;
2993+struct psci_operations psci_ops __read_only;
2994
2995 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2996 typedef int (*psci_initcall_t)(const struct device_node *);
2997diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2998index ef9119f..31995a3 100644
2999--- a/arch/arm/kernel/ptrace.c
3000+++ b/arch/arm/kernel/ptrace.c
3001@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3002 regs->ARM_ip = ip;
3003 }
3004
3005+#ifdef CONFIG_GRKERNSEC_SETXID
3006+extern void gr_delayed_cred_worker(void);
3007+#endif
3008+
3009 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3010 {
3011 current_thread_info()->syscall = scno;
3012
3013+#ifdef CONFIG_GRKERNSEC_SETXID
3014+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3015+ gr_delayed_cred_worker();
3016+#endif
3017+
3018 /* Do the secure computing check first; failures should be fast. */
3019 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3020 if (secure_computing() == -1)
3021diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3022index e55408e..14d9998 100644
3023--- a/arch/arm/kernel/setup.c
3024+++ b/arch/arm/kernel/setup.c
3025@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3026 unsigned int elf_hwcap2 __read_mostly;
3027 EXPORT_SYMBOL(elf_hwcap2);
3028
3029+pteval_t __supported_pte_mask __read_only;
3030+pmdval_t __supported_pmd_mask __read_only;
3031
3032 #ifdef MULTI_CPU
3033-struct processor processor __read_mostly;
3034+struct processor processor __read_only;
3035 #endif
3036 #ifdef MULTI_TLB
3037-struct cpu_tlb_fns cpu_tlb __read_mostly;
3038+struct cpu_tlb_fns cpu_tlb __read_only;
3039 #endif
3040 #ifdef MULTI_USER
3041-struct cpu_user_fns cpu_user __read_mostly;
3042+struct cpu_user_fns cpu_user __read_only;
3043 #endif
3044 #ifdef MULTI_CACHE
3045-struct cpu_cache_fns cpu_cache __read_mostly;
3046+struct cpu_cache_fns cpu_cache __read_only;
3047 #endif
3048 #ifdef CONFIG_OUTER_CACHE
3049-struct outer_cache_fns outer_cache __read_mostly;
3050+struct outer_cache_fns outer_cache __read_only;
3051 EXPORT_SYMBOL(outer_cache);
3052 #endif
3053
3054@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
3055 asm("mrc p15, 0, %0, c0, c1, 4"
3056 : "=r" (mmfr0));
3057 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3058- (mmfr0 & 0x000000f0) >= 0x00000030)
3059+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3060 cpu_arch = CPU_ARCH_ARMv7;
3061- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3062+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3063+ __supported_pte_mask |= L_PTE_PXN;
3064+ __supported_pmd_mask |= PMD_PXNTABLE;
3065+ }
3066+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3067 (mmfr0 & 0x000000f0) == 0x00000020)
3068 cpu_arch = CPU_ARCH_ARMv6;
3069 else
3070diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3071index 8aa6f1b..0899e08 100644
3072--- a/arch/arm/kernel/signal.c
3073+++ b/arch/arm/kernel/signal.c
3074@@ -24,8 +24,6 @@
3075
3076 extern const unsigned long sigreturn_codes[7];
3077
3078-static unsigned long signal_return_offset;
3079-
3080 #ifdef CONFIG_CRUNCH
3081 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3082 {
3083@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3084 * except when the MPU has protected the vectors
3085 * page from PL0
3086 */
3087- retcode = mm->context.sigpage + signal_return_offset +
3088- (idx << 2) + thumb;
3089+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3090 } else
3091 #endif
3092 {
3093@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3094 } while (thread_flags & _TIF_WORK_MASK);
3095 return 0;
3096 }
3097-
3098-struct page *get_signal_page(void)
3099-{
3100- unsigned long ptr;
3101- unsigned offset;
3102- struct page *page;
3103- void *addr;
3104-
3105- page = alloc_pages(GFP_KERNEL, 0);
3106-
3107- if (!page)
3108- return NULL;
3109-
3110- addr = page_address(page);
3111-
3112- /* Give the signal return code some randomness */
3113- offset = 0x200 + (get_random_int() & 0x7fc);
3114- signal_return_offset = offset;
3115-
3116- /*
3117- * Copy signal return handlers into the vector page, and
3118- * set sigreturn to be a pointer to these.
3119- */
3120- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3121-
3122- ptr = (unsigned long)addr + offset;
3123- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3124-
3125- return page;
3126-}
3127diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3128index 86ef244..c518451 100644
3129--- a/arch/arm/kernel/smp.c
3130+++ b/arch/arm/kernel/smp.c
3131@@ -76,7 +76,7 @@ enum ipi_msg_type {
3132
3133 static DECLARE_COMPLETION(cpu_running);
3134
3135-static struct smp_operations smp_ops;
3136+static struct smp_operations smp_ops __read_only;
3137
3138 void __init smp_set_ops(struct smp_operations *ops)
3139 {
3140diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3141index 7a3be1d..b00c7de 100644
3142--- a/arch/arm/kernel/tcm.c
3143+++ b/arch/arm/kernel/tcm.c
3144@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3145 .virtual = ITCM_OFFSET,
3146 .pfn = __phys_to_pfn(ITCM_OFFSET),
3147 .length = 0,
3148- .type = MT_MEMORY_RWX_ITCM,
3149+ .type = MT_MEMORY_RX_ITCM,
3150 }
3151 };
3152
3153@@ -267,7 +267,9 @@ no_dtcm:
3154 start = &__sitcm_text;
3155 end = &__eitcm_text;
3156 ram = &__itcm_start;
3157+ pax_open_kernel();
3158 memcpy(start, ram, itcm_code_sz);
3159+ pax_close_kernel();
3160 pr_debug("CPU ITCM: copied code from %p - %p\n",
3161 start, end);
3162 itcm_present = true;
3163diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3164index 788e23f..6fa06a1 100644
3165--- a/arch/arm/kernel/traps.c
3166+++ b/arch/arm/kernel/traps.c
3167@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3168 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3169 {
3170 #ifdef CONFIG_KALLSYMS
3171- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3172+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3173 #else
3174 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3175 #endif
3176@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3177 static int die_owner = -1;
3178 static unsigned int die_nest_count;
3179
3180+extern void gr_handle_kernel_exploit(void);
3181+
3182 static unsigned long oops_begin(void)
3183 {
3184 int cpu;
3185@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3186 panic("Fatal exception in interrupt");
3187 if (panic_on_oops)
3188 panic("Fatal exception");
3189+
3190+ gr_handle_kernel_exploit();
3191+
3192 if (signr)
3193 do_exit(signr);
3194 }
3195@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3196 kuser_init(vectors_base);
3197
3198 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3199- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3200+
3201+#ifndef CONFIG_PAX_MEMORY_UDEREF
3202+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3203+#endif
3204+
3205 #else /* ifndef CONFIG_CPU_V7M */
3206 /*
3207 * on V7-M there is no need to copy the vector table to a dedicated
3208diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3209index b31aa73..cc4b7a1 100644
3210--- a/arch/arm/kernel/vmlinux.lds.S
3211+++ b/arch/arm/kernel/vmlinux.lds.S
3212@@ -37,7 +37,7 @@
3213 #endif
3214
3215 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3216- defined(CONFIG_GENERIC_BUG)
3217+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3218 #define ARM_EXIT_KEEP(x) x
3219 #define ARM_EXIT_DISCARD(x)
3220 #else
3221@@ -123,6 +123,8 @@ SECTIONS
3222 #ifdef CONFIG_DEBUG_RODATA
3223 . = ALIGN(1<<SECTION_SHIFT);
3224 #endif
3225+ _etext = .; /* End of text section */
3226+
3227 RO_DATA(PAGE_SIZE)
3228
3229 . = ALIGN(4);
3230@@ -153,8 +155,6 @@ SECTIONS
3231
3232 NOTES
3233
3234- _etext = .; /* End of text and rodata section */
3235-
3236 #ifndef CONFIG_XIP_KERNEL
3237 # ifdef CONFIG_ARM_KERNMEM_PERMS
3238 . = ALIGN(1<<SECTION_SHIFT);
3239diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3240index 0b0d58a..988cb45 100644
3241--- a/arch/arm/kvm/arm.c
3242+++ b/arch/arm/kvm/arm.c
3243@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3244 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3245
3246 /* The VMID used in the VTTBR */
3247-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3248+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3249 static u8 kvm_next_vmid;
3250 static DEFINE_SPINLOCK(kvm_vmid_lock);
3251
3252@@ -351,7 +351,7 @@ void force_vm_exit(const cpumask_t *mask)
3253 */
3254 static bool need_new_vmid_gen(struct kvm *kvm)
3255 {
3256- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3257+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3258 }
3259
3260 /**
3261@@ -384,7 +384,7 @@ static void update_vttbr(struct kvm *kvm)
3262
3263 /* First user of a new VMID generation? */
3264 if (unlikely(kvm_next_vmid == 0)) {
3265- atomic64_inc(&kvm_vmid_gen);
3266+ atomic64_inc_unchecked(&kvm_vmid_gen);
3267 kvm_next_vmid = 1;
3268
3269 /*
3270@@ -401,7 +401,7 @@ static void update_vttbr(struct kvm *kvm)
3271 kvm_call_hyp(__kvm_flush_vm_context);
3272 }
3273
3274- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3275+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3276 kvm->arch.vmid = kvm_next_vmid;
3277 kvm_next_vmid++;
3278
3279@@ -1038,7 +1038,7 @@ static void check_kvm_target_cpu(void *ret)
3280 /**
3281 * Initialize Hyp-mode and memory mappings on all CPUs.
3282 */
3283-int kvm_arch_init(void *opaque)
3284+int kvm_arch_init(const void *opaque)
3285 {
3286 int err;
3287 int ret, cpu;
3288diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3289index 14a0d98..7771a7d 100644
3290--- a/arch/arm/lib/clear_user.S
3291+++ b/arch/arm/lib/clear_user.S
3292@@ -12,14 +12,14 @@
3293
3294 .text
3295
3296-/* Prototype: int __clear_user(void *addr, size_t sz)
3297+/* Prototype: int ___clear_user(void *addr, size_t sz)
3298 * Purpose : clear some user memory
3299 * Params : addr - user memory address to clear
3300 * : sz - number of bytes to clear
3301 * Returns : number of bytes NOT cleared
3302 */
3303 ENTRY(__clear_user_std)
3304-WEAK(__clear_user)
3305+WEAK(___clear_user)
3306 stmfd sp!, {r1, lr}
3307 mov r2, #0
3308 cmp r1, #4
3309@@ -44,7 +44,7 @@ WEAK(__clear_user)
3310 USER( strnebt r2, [r0])
3311 mov r0, #0
3312 ldmfd sp!, {r1, pc}
3313-ENDPROC(__clear_user)
3314+ENDPROC(___clear_user)
3315 ENDPROC(__clear_user_std)
3316
3317 .pushsection .fixup,"ax"
3318diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3319index 7a235b9..73a0556 100644
3320--- a/arch/arm/lib/copy_from_user.S
3321+++ b/arch/arm/lib/copy_from_user.S
3322@@ -17,7 +17,7 @@
3323 /*
3324 * Prototype:
3325 *
3326- * size_t __copy_from_user(void *to, const void *from, size_t n)
3327+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3328 *
3329 * Purpose:
3330 *
3331@@ -89,11 +89,11 @@
3332
3333 .text
3334
3335-ENTRY(__copy_from_user)
3336+ENTRY(___copy_from_user)
3337
3338 #include "copy_template.S"
3339
3340-ENDPROC(__copy_from_user)
3341+ENDPROC(___copy_from_user)
3342
3343 .pushsection .fixup,"ax"
3344 .align 0
3345diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3346index 6ee2f67..d1cce76 100644
3347--- a/arch/arm/lib/copy_page.S
3348+++ b/arch/arm/lib/copy_page.S
3349@@ -10,6 +10,7 @@
3350 * ASM optimised string functions
3351 */
3352 #include <linux/linkage.h>
3353+#include <linux/const.h>
3354 #include <asm/assembler.h>
3355 #include <asm/asm-offsets.h>
3356 #include <asm/cache.h>
3357diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3358index a9d3db1..164b089 100644
3359--- a/arch/arm/lib/copy_to_user.S
3360+++ b/arch/arm/lib/copy_to_user.S
3361@@ -17,7 +17,7 @@
3362 /*
3363 * Prototype:
3364 *
3365- * size_t __copy_to_user(void *to, const void *from, size_t n)
3366+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3367 *
3368 * Purpose:
3369 *
3370@@ -93,11 +93,11 @@
3371 .text
3372
3373 ENTRY(__copy_to_user_std)
3374-WEAK(__copy_to_user)
3375+WEAK(___copy_to_user)
3376
3377 #include "copy_template.S"
3378
3379-ENDPROC(__copy_to_user)
3380+ENDPROC(___copy_to_user)
3381 ENDPROC(__copy_to_user_std)
3382
3383 .pushsection .fixup,"ax"
3384diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3385index 7d08b43..f7ca7ea 100644
3386--- a/arch/arm/lib/csumpartialcopyuser.S
3387+++ b/arch/arm/lib/csumpartialcopyuser.S
3388@@ -57,8 +57,8 @@
3389 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3390 */
3391
3392-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3393-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3394+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3395+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3396
3397 #include "csumpartialcopygeneric.S"
3398
3399diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3400index 312d43e..21d2322 100644
3401--- a/arch/arm/lib/delay.c
3402+++ b/arch/arm/lib/delay.c
3403@@ -29,7 +29,7 @@
3404 /*
3405 * Default to the loop-based delay implementation.
3406 */
3407-struct arm_delay_ops arm_delay_ops = {
3408+struct arm_delay_ops arm_delay_ops __read_only = {
3409 .delay = __loop_delay,
3410 .const_udelay = __loop_const_udelay,
3411 .udelay = __loop_udelay,
3412diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3413index 3e58d71..029817c 100644
3414--- a/arch/arm/lib/uaccess_with_memcpy.c
3415+++ b/arch/arm/lib/uaccess_with_memcpy.c
3416@@ -136,7 +136,7 @@ out:
3417 }
3418
3419 unsigned long
3420-__copy_to_user(void __user *to, const void *from, unsigned long n)
3421+___copy_to_user(void __user *to, const void *from, unsigned long n)
3422 {
3423 /*
3424 * This test is stubbed out of the main function above to keep
3425@@ -190,7 +190,7 @@ out:
3426 return n;
3427 }
3428
3429-unsigned long __clear_user(void __user *addr, unsigned long n)
3430+unsigned long ___clear_user(void __user *addr, unsigned long n)
3431 {
3432 /* See rational for this in __copy_to_user() above. */
3433 if (n < 64)
3434diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3435index ce25e85..3dd7850 100644
3436--- a/arch/arm/mach-at91/setup.c
3437+++ b/arch/arm/mach-at91/setup.c
3438@@ -57,7 +57,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3439
3440 desc->pfn = __phys_to_pfn(base);
3441 desc->length = length;
3442- desc->type = MT_MEMORY_RWX_NONCACHED;
3443+ desc->type = MT_MEMORY_RW_NONCACHED;
3444
3445 pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3446 base, length, desc->virtual);
3447diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3448index f8e7dcd..17ee921 100644
3449--- a/arch/arm/mach-exynos/suspend.c
3450+++ b/arch/arm/mach-exynos/suspend.c
3451@@ -18,6 +18,7 @@
3452 #include <linux/syscore_ops.h>
3453 #include <linux/cpu_pm.h>
3454 #include <linux/io.h>
3455+#include <linux/irq.h>
3456 #include <linux/irqchip/arm-gic.h>
3457 #include <linux/err.h>
3458 #include <linux/regulator/machine.h>
3459@@ -558,8 +559,10 @@ void __init exynos_pm_init(void)
3460 tmp |= pm_data->wake_disable_mask;
3461 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3462
3463- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3464- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3465+ pax_open_kernel();
3466+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3467+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3468+ pax_close_kernel();
3469
3470 register_syscore_ops(&exynos_pm_syscore_ops);
3471 suspend_set_ops(&exynos_suspend_ops);
3472diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3473index 7f352de..6dc0929 100644
3474--- a/arch/arm/mach-keystone/keystone.c
3475+++ b/arch/arm/mach-keystone/keystone.c
3476@@ -27,7 +27,7 @@
3477
3478 #include "keystone.h"
3479
3480-static struct notifier_block platform_nb;
3481+static notifier_block_no_const platform_nb;
3482 static unsigned long keystone_dma_pfn_offset __read_mostly;
3483
3484 static int keystone_platform_notifier(struct notifier_block *nb,
3485diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3486index ccef880..5dfad80 100644
3487--- a/arch/arm/mach-mvebu/coherency.c
3488+++ b/arch/arm/mach-mvebu/coherency.c
3489@@ -164,7 +164,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3490
3491 /*
3492 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3493- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3494+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3495 * is needed as a workaround for a deadlock issue between the PCIe
3496 * interface and the cache controller.
3497 */
3498@@ -177,7 +177,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3499 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3500
3501 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3502- mtype = MT_UNCACHED;
3503+ mtype = MT_UNCACHED_RW;
3504
3505 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3506 }
3507diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3508index b6443a4..20a0b74 100644
3509--- a/arch/arm/mach-omap2/board-n8x0.c
3510+++ b/arch/arm/mach-omap2/board-n8x0.c
3511@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3512 }
3513 #endif
3514
3515-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3516+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3517 .late_init = n8x0_menelaus_late_init,
3518 };
3519
3520diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3521index 79f49d9..70bf184 100644
3522--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3523+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3524@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3525 void (*resume)(void);
3526 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3527 void (*hotplug_restart)(void);
3528-};
3529+} __no_const;
3530
3531 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3532 static struct powerdomain *mpuss_pd;
3533@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3534 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3535 {}
3536
3537-struct cpu_pm_ops omap_pm_ops = {
3538+static struct cpu_pm_ops omap_pm_ops __read_only = {
3539 .finish_suspend = default_finish_suspend,
3540 .resume = dummy_cpu_resume,
3541 .scu_prepare = dummy_scu_prepare,
3542diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3543index 5305ec7..6d74045 100644
3544--- a/arch/arm/mach-omap2/omap-smp.c
3545+++ b/arch/arm/mach-omap2/omap-smp.c
3546@@ -19,6 +19,7 @@
3547 #include <linux/device.h>
3548 #include <linux/smp.h>
3549 #include <linux/io.h>
3550+#include <linux/irq.h>
3551 #include <linux/irqchip/arm-gic.h>
3552
3553 #include <asm/smp_scu.h>
3554diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3555index f961c46..4a453dc 100644
3556--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3557+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3558@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3559 return NOTIFY_OK;
3560 }
3561
3562-static struct notifier_block __refdata irq_hotplug_notifier = {
3563+static struct notifier_block irq_hotplug_notifier = {
3564 .notifier_call = irq_cpu_hotplug_notify,
3565 };
3566
3567diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3568index be9541e..821805f 100644
3569--- a/arch/arm/mach-omap2/omap_device.c
3570+++ b/arch/arm/mach-omap2/omap_device.c
3571@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3572 struct platform_device __init *omap_device_build(const char *pdev_name,
3573 int pdev_id,
3574 struct omap_hwmod *oh,
3575- void *pdata, int pdata_len)
3576+ const void *pdata, int pdata_len)
3577 {
3578 struct omap_hwmod *ohs[] = { oh };
3579
3580@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3581 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3582 int pdev_id,
3583 struct omap_hwmod **ohs,
3584- int oh_cnt, void *pdata,
3585+ int oh_cnt, const void *pdata,
3586 int pdata_len)
3587 {
3588 int ret = -ENOMEM;
3589diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3590index 78c02b3..c94109a 100644
3591--- a/arch/arm/mach-omap2/omap_device.h
3592+++ b/arch/arm/mach-omap2/omap_device.h
3593@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3594 /* Core code interface */
3595
3596 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3597- struct omap_hwmod *oh, void *pdata,
3598+ struct omap_hwmod *oh, const void *pdata,
3599 int pdata_len);
3600
3601 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3602 struct omap_hwmod **oh, int oh_cnt,
3603- void *pdata, int pdata_len);
3604+ const void *pdata, int pdata_len);
3605
3606 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3607 struct omap_hwmod **ohs, int oh_cnt);
3608diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3609index 9025fff..3555702 100644
3610--- a/arch/arm/mach-omap2/omap_hwmod.c
3611+++ b/arch/arm/mach-omap2/omap_hwmod.c
3612@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3613 int (*init_clkdm)(struct omap_hwmod *oh);
3614 void (*update_context_lost)(struct omap_hwmod *oh);
3615 int (*get_context_lost)(struct omap_hwmod *oh);
3616-};
3617+} __no_const;
3618
3619 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3620-static struct omap_hwmod_soc_ops soc_ops;
3621+static struct omap_hwmod_soc_ops soc_ops __read_only;
3622
3623 /* omap_hwmod_list contains all registered struct omap_hwmods */
3624 static LIST_HEAD(omap_hwmod_list);
3625diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3626index 95fee54..cfa9cf1 100644
3627--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3628+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3629@@ -10,6 +10,7 @@
3630
3631 #include <linux/kernel.h>
3632 #include <linux/init.h>
3633+#include <asm/pgtable.h>
3634
3635 #include "powerdomain.h"
3636
3637@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3638
3639 void __init am43xx_powerdomains_init(void)
3640 {
3641- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3642+ pax_open_kernel();
3643+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3644+ pax_close_kernel();
3645 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3646 pwrdm_register_pwrdms(powerdomains_am43xx);
3647 pwrdm_complete_init();
3648diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3649index ff0a68c..b312aa0 100644
3650--- a/arch/arm/mach-omap2/wd_timer.c
3651+++ b/arch/arm/mach-omap2/wd_timer.c
3652@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3653 struct omap_hwmod *oh;
3654 char *oh_name = "wd_timer2";
3655 char *dev_name = "omap_wdt";
3656- struct omap_wd_timer_platform_data pdata;
3657+ static struct omap_wd_timer_platform_data pdata = {
3658+ .read_reset_sources = prm_read_reset_sources
3659+ };
3660
3661 if (!cpu_class_is_omap2() || of_have_populated_dt())
3662 return 0;
3663@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3664 return -EINVAL;
3665 }
3666
3667- pdata.read_reset_sources = prm_read_reset_sources;
3668-
3669 pdev = omap_device_build(dev_name, id, oh, &pdata,
3670 sizeof(struct omap_wd_timer_platform_data));
3671 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3672diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3673index 4f25a7c..a81be85 100644
3674--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3675+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3676@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3677 bool entered_lp2 = false;
3678
3679 if (tegra_pending_sgi())
3680- ACCESS_ONCE(abort_flag) = true;
3681+ ACCESS_ONCE_RW(abort_flag) = true;
3682
3683 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3684
3685diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3686index ab95f53..4b977a7 100644
3687--- a/arch/arm/mach-tegra/irq.c
3688+++ b/arch/arm/mach-tegra/irq.c
3689@@ -20,6 +20,7 @@
3690 #include <linux/cpu_pm.h>
3691 #include <linux/interrupt.h>
3692 #include <linux/io.h>
3693+#include <linux/irq.h>
3694 #include <linux/irqchip/arm-gic.h>
3695 #include <linux/irq.h>
3696 #include <linux/kernel.h>
3697diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3698index 2cb587b..6ddfebf 100644
3699--- a/arch/arm/mach-ux500/pm.c
3700+++ b/arch/arm/mach-ux500/pm.c
3701@@ -10,6 +10,7 @@
3702 */
3703
3704 #include <linux/kernel.h>
3705+#include <linux/irq.h>
3706 #include <linux/irqchip/arm-gic.h>
3707 #include <linux/delay.h>
3708 #include <linux/io.h>
3709diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3710index 2dea8b5..6499da2 100644
3711--- a/arch/arm/mach-ux500/setup.h
3712+++ b/arch/arm/mach-ux500/setup.h
3713@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3714 .type = MT_DEVICE, \
3715 }
3716
3717-#define __MEM_DEV_DESC(x, sz) { \
3718- .virtual = IO_ADDRESS(x), \
3719- .pfn = __phys_to_pfn(x), \
3720- .length = sz, \
3721- .type = MT_MEMORY_RWX, \
3722-}
3723-
3724 extern struct smp_operations ux500_smp_ops;
3725 extern void ux500_cpu_die(unsigned int cpu);
3726
3727diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3728index 52d768f..5f93180 100644
3729--- a/arch/arm/mach-zynq/platsmp.c
3730+++ b/arch/arm/mach-zynq/platsmp.c
3731@@ -24,6 +24,7 @@
3732 #include <linux/io.h>
3733 #include <asm/cacheflush.h>
3734 #include <asm/smp_scu.h>
3735+#include <linux/irq.h>
3736 #include <linux/irqchip/arm-gic.h>
3737 #include "common.h"
3738
3739diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3740index c43c714..4f8f7b9 100644
3741--- a/arch/arm/mm/Kconfig
3742+++ b/arch/arm/mm/Kconfig
3743@@ -446,6 +446,7 @@ config CPU_32v5
3744
3745 config CPU_32v6
3746 bool
3747+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3748 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3749
3750 config CPU_32v6K
3751@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3752
3753 config CPU_USE_DOMAINS
3754 bool
3755+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3756 help
3757 This option enables or disables the use of domain switching
3758 via the set_fs() function.
3759@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3760
3761 config KUSER_HELPERS
3762 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3763- depends on MMU
3764+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3765 default y
3766 help
3767 Warning: disabling this option may break user programs.
3768@@ -812,7 +814,7 @@ config KUSER_HELPERS
3769 See Documentation/arm/kernel_user_helpers.txt for details.
3770
3771 However, the fixed address nature of these helpers can be used
3772- by ROP (return orientated programming) authors when creating
3773+ by ROP (Return Oriented Programming) authors when creating
3774 exploits.
3775
3776 If all of the binaries and libraries which run on your platform
3777diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3778index 2c0c541..4585df9 100644
3779--- a/arch/arm/mm/alignment.c
3780+++ b/arch/arm/mm/alignment.c
3781@@ -216,10 +216,12 @@ union offset_union {
3782 #define __get16_unaligned_check(ins,val,addr) \
3783 do { \
3784 unsigned int err = 0, v, a = addr; \
3785+ pax_open_userland(); \
3786 __get8_unaligned_check(ins,v,a,err); \
3787 val = v << ((BE) ? 8 : 0); \
3788 __get8_unaligned_check(ins,v,a,err); \
3789 val |= v << ((BE) ? 0 : 8); \
3790+ pax_close_userland(); \
3791 if (err) \
3792 goto fault; \
3793 } while (0)
3794@@ -233,6 +235,7 @@ union offset_union {
3795 #define __get32_unaligned_check(ins,val,addr) \
3796 do { \
3797 unsigned int err = 0, v, a = addr; \
3798+ pax_open_userland(); \
3799 __get8_unaligned_check(ins,v,a,err); \
3800 val = v << ((BE) ? 24 : 0); \
3801 __get8_unaligned_check(ins,v,a,err); \
3802@@ -241,6 +244,7 @@ union offset_union {
3803 val |= v << ((BE) ? 8 : 16); \
3804 __get8_unaligned_check(ins,v,a,err); \
3805 val |= v << ((BE) ? 0 : 24); \
3806+ pax_close_userland(); \
3807 if (err) \
3808 goto fault; \
3809 } while (0)
3810@@ -254,6 +258,7 @@ union offset_union {
3811 #define __put16_unaligned_check(ins,val,addr) \
3812 do { \
3813 unsigned int err = 0, v = val, a = addr; \
3814+ pax_open_userland(); \
3815 __asm__( FIRST_BYTE_16 \
3816 ARM( "1: "ins" %1, [%2], #1\n" ) \
3817 THUMB( "1: "ins" %1, [%2]\n" ) \
3818@@ -273,6 +278,7 @@ union offset_union {
3819 " .popsection\n" \
3820 : "=r" (err), "=&r" (v), "=&r" (a) \
3821 : "0" (err), "1" (v), "2" (a)); \
3822+ pax_close_userland(); \
3823 if (err) \
3824 goto fault; \
3825 } while (0)
3826@@ -286,6 +292,7 @@ union offset_union {
3827 #define __put32_unaligned_check(ins,val,addr) \
3828 do { \
3829 unsigned int err = 0, v = val, a = addr; \
3830+ pax_open_userland(); \
3831 __asm__( FIRST_BYTE_32 \
3832 ARM( "1: "ins" %1, [%2], #1\n" ) \
3833 THUMB( "1: "ins" %1, [%2]\n" ) \
3834@@ -315,6 +322,7 @@ union offset_union {
3835 " .popsection\n" \
3836 : "=r" (err), "=&r" (v), "=&r" (a) \
3837 : "0" (err), "1" (v), "2" (a)); \
3838+ pax_close_userland(); \
3839 if (err) \
3840 goto fault; \
3841 } while (0)
3842diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3843index 5e65ca8..879e7b3 100644
3844--- a/arch/arm/mm/cache-l2x0.c
3845+++ b/arch/arm/mm/cache-l2x0.c
3846@@ -42,7 +42,7 @@ struct l2c_init_data {
3847 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3848 void (*save)(void __iomem *);
3849 struct outer_cache_fns outer_cache;
3850-};
3851+} __do_const;
3852
3853 #define CACHE_LINE_SIZE 32
3854
3855diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3856index 845769e..4278fd7 100644
3857--- a/arch/arm/mm/context.c
3858+++ b/arch/arm/mm/context.c
3859@@ -43,7 +43,7 @@
3860 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3861
3862 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3863-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3864+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3865 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3866
3867 static DEFINE_PER_CPU(atomic64_t, active_asids);
3868@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3869 {
3870 static u32 cur_idx = 1;
3871 u64 asid = atomic64_read(&mm->context.id);
3872- u64 generation = atomic64_read(&asid_generation);
3873+ u64 generation = atomic64_read_unchecked(&asid_generation);
3874
3875 if (asid != 0) {
3876 /*
3877@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3878 */
3879 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3880 if (asid == NUM_USER_ASIDS) {
3881- generation = atomic64_add_return(ASID_FIRST_VERSION,
3882+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3883 &asid_generation);
3884 flush_context(cpu);
3885 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3886@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3887 cpu_set_reserved_ttbr0();
3888
3889 asid = atomic64_read(&mm->context.id);
3890- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3891+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3892 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3893 goto switch_mm_fastpath;
3894
3895 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3896 /* Check that our ASID belongs to the current generation. */
3897 asid = atomic64_read(&mm->context.id);
3898- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3899+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3900 asid = new_context(mm, cpu);
3901 atomic64_set(&mm->context.id, asid);
3902 }
3903diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3904index a982dc3..2d9f5f7 100644
3905--- a/arch/arm/mm/fault.c
3906+++ b/arch/arm/mm/fault.c
3907@@ -25,6 +25,7 @@
3908 #include <asm/system_misc.h>
3909 #include <asm/system_info.h>
3910 #include <asm/tlbflush.h>
3911+#include <asm/sections.h>
3912
3913 #include "fault.h"
3914
3915@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3916 if (fixup_exception(regs))
3917 return;
3918
3919+#ifdef CONFIG_PAX_MEMORY_UDEREF
3920+ if (addr < TASK_SIZE) {
3921+ if (current->signal->curr_ip)
3922+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3923+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3924+ else
3925+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3926+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3927+ }
3928+#endif
3929+
3930+#ifdef CONFIG_PAX_KERNEXEC
3931+ if ((fsr & FSR_WRITE) &&
3932+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3933+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3934+ {
3935+ if (current->signal->curr_ip)
3936+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3937+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3938+ else
3939+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3940+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3941+ }
3942+#endif
3943+
3944 /*
3945 * No handler, we'll have to terminate things with extreme prejudice.
3946 */
3947@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3948 }
3949 #endif
3950
3951+#ifdef CONFIG_PAX_PAGEEXEC
3952+ if (fsr & FSR_LNX_PF) {
3953+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3954+ do_group_exit(SIGKILL);
3955+ }
3956+#endif
3957+
3958 tsk->thread.address = addr;
3959 tsk->thread.error_code = fsr;
3960 tsk->thread.trap_no = 14;
3961@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3962 }
3963 #endif /* CONFIG_MMU */
3964
3965+#ifdef CONFIG_PAX_PAGEEXEC
3966+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3967+{
3968+ long i;
3969+
3970+ printk(KERN_ERR "PAX: bytes at PC: ");
3971+ for (i = 0; i < 20; i++) {
3972+ unsigned char c;
3973+ if (get_user(c, (__force unsigned char __user *)pc+i))
3974+ printk(KERN_CONT "?? ");
3975+ else
3976+ printk(KERN_CONT "%02x ", c);
3977+ }
3978+ printk("\n");
3979+
3980+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3981+ for (i = -1; i < 20; i++) {
3982+ unsigned long c;
3983+ if (get_user(c, (__force unsigned long __user *)sp+i))
3984+ printk(KERN_CONT "???????? ");
3985+ else
3986+ printk(KERN_CONT "%08lx ", c);
3987+ }
3988+ printk("\n");
3989+}
3990+#endif
3991+
3992 /*
3993 * First Level Translation Fault Handler
3994 *
3995@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3996 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3997 struct siginfo info;
3998
3999+#ifdef CONFIG_PAX_MEMORY_UDEREF
4000+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4001+ if (current->signal->curr_ip)
4002+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4003+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4004+ else
4005+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4006+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4007+ goto die;
4008+ }
4009+#endif
4010+
4011 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4012 return;
4013
4014+die:
4015 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4016 inf->name, fsr, addr);
4017
4018@@ -573,15 +646,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4019 ifsr_info[nr].name = name;
4020 }
4021
4022+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4023+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4024+
4025 asmlinkage void __exception
4026 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4027 {
4028 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4029 struct siginfo info;
4030+ unsigned long pc = instruction_pointer(regs);
4031+
4032+ if (user_mode(regs)) {
4033+ unsigned long sigpage = current->mm->context.sigpage;
4034+
4035+ if (sigpage <= pc && pc < sigpage + 7*4) {
4036+ if (pc < sigpage + 3*4)
4037+ sys_sigreturn(regs);
4038+ else
4039+ sys_rt_sigreturn(regs);
4040+ return;
4041+ }
4042+ if (pc == 0xffff0f60UL) {
4043+ /*
4044+ * PaX: __kuser_cmpxchg64 emulation
4045+ */
4046+ // TODO
4047+ //regs->ARM_pc = regs->ARM_lr;
4048+ //return;
4049+ }
4050+ if (pc == 0xffff0fa0UL) {
4051+ /*
4052+ * PaX: __kuser_memory_barrier emulation
4053+ */
4054+ // dmb(); implied by the exception
4055+ regs->ARM_pc = regs->ARM_lr;
4056+ return;
4057+ }
4058+ if (pc == 0xffff0fc0UL) {
4059+ /*
4060+ * PaX: __kuser_cmpxchg emulation
4061+ */
4062+ // TODO
4063+ //long new;
4064+ //int op;
4065+
4066+ //op = FUTEX_OP_SET << 28;
4067+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4068+ //regs->ARM_r0 = old != new;
4069+ //regs->ARM_pc = regs->ARM_lr;
4070+ //return;
4071+ }
4072+ if (pc == 0xffff0fe0UL) {
4073+ /*
4074+ * PaX: __kuser_get_tls emulation
4075+ */
4076+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4077+ regs->ARM_pc = regs->ARM_lr;
4078+ return;
4079+ }
4080+ }
4081+
4082+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4083+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4084+ if (current->signal->curr_ip)
4085+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4086+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4087+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4088+ else
4089+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4090+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4091+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4092+ goto die;
4093+ }
4094+#endif
4095+
4096+#ifdef CONFIG_PAX_REFCOUNT
4097+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4098+#ifdef CONFIG_THUMB2_KERNEL
4099+ unsigned short bkpt;
4100+
4101+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4102+#else
4103+ unsigned int bkpt;
4104+
4105+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4106+#endif
4107+ current->thread.error_code = ifsr;
4108+ current->thread.trap_no = 0;
4109+ pax_report_refcount_overflow(regs);
4110+ fixup_exception(regs);
4111+ return;
4112+ }
4113+ }
4114+#endif
4115
4116 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4117 return;
4118
4119+die:
4120 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4121 inf->name, ifsr, addr);
4122
4123diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4124index cf08bdf..772656c 100644
4125--- a/arch/arm/mm/fault.h
4126+++ b/arch/arm/mm/fault.h
4127@@ -3,6 +3,7 @@
4128
4129 /*
4130 * Fault status register encodings. We steal bit 31 for our own purposes.
4131+ * Set when the FSR value is from an instruction fault.
4132 */
4133 #define FSR_LNX_PF (1 << 31)
4134 #define FSR_WRITE (1 << 11)
4135@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4136 }
4137 #endif
4138
4139+/* valid for LPAE and !LPAE */
4140+static inline int is_xn_fault(unsigned int fsr)
4141+{
4142+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4143+}
4144+
4145+static inline int is_domain_fault(unsigned int fsr)
4146+{
4147+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4148+}
4149+
4150 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4151 unsigned long search_exception_table(unsigned long addr);
4152
4153diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4154index 2495c8c..415b7fc 100644
4155--- a/arch/arm/mm/init.c
4156+++ b/arch/arm/mm/init.c
4157@@ -758,7 +758,46 @@ void free_tcmmem(void)
4158 {
4159 #ifdef CONFIG_HAVE_TCM
4160 extern char __tcm_start, __tcm_end;
4161+#endif
4162
4163+#ifdef CONFIG_PAX_KERNEXEC
4164+ unsigned long addr;
4165+ pgd_t *pgd;
4166+ pud_t *pud;
4167+ pmd_t *pmd;
4168+ int cpu_arch = cpu_architecture();
4169+ unsigned int cr = get_cr();
4170+
4171+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4172+ /* make pages tables, etc before .text NX */
4173+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4174+ pgd = pgd_offset_k(addr);
4175+ pud = pud_offset(pgd, addr);
4176+ pmd = pmd_offset(pud, addr);
4177+ __section_update(pmd, addr, PMD_SECT_XN);
4178+ }
4179+ /* make init NX */
4180+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4181+ pgd = pgd_offset_k(addr);
4182+ pud = pud_offset(pgd, addr);
4183+ pmd = pmd_offset(pud, addr);
4184+ __section_update(pmd, addr, PMD_SECT_XN);
4185+ }
4186+ /* make kernel code/rodata RX */
4187+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4188+ pgd = pgd_offset_k(addr);
4189+ pud = pud_offset(pgd, addr);
4190+ pmd = pmd_offset(pud, addr);
4191+#ifdef CONFIG_ARM_LPAE
4192+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4193+#else
4194+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4195+#endif
4196+ }
4197+ }
4198+#endif
4199+
4200+#ifdef CONFIG_HAVE_TCM
4201 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4202 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4203 #endif
4204diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4205index d1e5ad7..84dcbf2 100644
4206--- a/arch/arm/mm/ioremap.c
4207+++ b/arch/arm/mm/ioremap.c
4208@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4209 unsigned int mtype;
4210
4211 if (cached)
4212- mtype = MT_MEMORY_RWX;
4213+ mtype = MT_MEMORY_RX;
4214 else
4215- mtype = MT_MEMORY_RWX_NONCACHED;
4216+ mtype = MT_MEMORY_RX_NONCACHED;
4217
4218 return __arm_ioremap_caller(phys_addr, size, mtype,
4219 __builtin_return_address(0));
4220diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4221index 5e85ed3..b10a7ed 100644
4222--- a/arch/arm/mm/mmap.c
4223+++ b/arch/arm/mm/mmap.c
4224@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4225 struct vm_area_struct *vma;
4226 int do_align = 0;
4227 int aliasing = cache_is_vipt_aliasing();
4228+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4229 struct vm_unmapped_area_info info;
4230
4231 /*
4232@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4233 if (len > TASK_SIZE)
4234 return -ENOMEM;
4235
4236+#ifdef CONFIG_PAX_RANDMMAP
4237+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4238+#endif
4239+
4240 if (addr) {
4241 if (do_align)
4242 addr = COLOUR_ALIGN(addr, pgoff);
4243@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4244 addr = PAGE_ALIGN(addr);
4245
4246 vma = find_vma(mm, addr);
4247- if (TASK_SIZE - len >= addr &&
4248- (!vma || addr + len <= vma->vm_start))
4249+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4250 return addr;
4251 }
4252
4253@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4254 info.high_limit = TASK_SIZE;
4255 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4256 info.align_offset = pgoff << PAGE_SHIFT;
4257+ info.threadstack_offset = offset;
4258 return vm_unmapped_area(&info);
4259 }
4260
4261@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4262 unsigned long addr = addr0;
4263 int do_align = 0;
4264 int aliasing = cache_is_vipt_aliasing();
4265+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4266 struct vm_unmapped_area_info info;
4267
4268 /*
4269@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4270 return addr;
4271 }
4272
4273+#ifdef CONFIG_PAX_RANDMMAP
4274+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4275+#endif
4276+
4277 /* requesting a specific address */
4278 if (addr) {
4279 if (do_align)
4280@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4281 else
4282 addr = PAGE_ALIGN(addr);
4283 vma = find_vma(mm, addr);
4284- if (TASK_SIZE - len >= addr &&
4285- (!vma || addr + len <= vma->vm_start))
4286+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4287 return addr;
4288 }
4289
4290@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4291 info.high_limit = mm->mmap_base;
4292 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4293 info.align_offset = pgoff << PAGE_SHIFT;
4294+ info.threadstack_offset = offset;
4295 addr = vm_unmapped_area(&info);
4296
4297 /*
4298@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4299 {
4300 unsigned long random_factor = 0UL;
4301
4302+#ifdef CONFIG_PAX_RANDMMAP
4303+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4304+#endif
4305+
4306 /* 8 bits of randomness in 20 address space bits */
4307 if ((current->flags & PF_RANDOMIZE) &&
4308 !(current->personality & ADDR_NO_RANDOMIZE))
4309@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4310
4311 if (mmap_is_legacy()) {
4312 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4313+
4314+#ifdef CONFIG_PAX_RANDMMAP
4315+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4316+ mm->mmap_base += mm->delta_mmap;
4317+#endif
4318+
4319 mm->get_unmapped_area = arch_get_unmapped_area;
4320 } else {
4321 mm->mmap_base = mmap_base(random_factor);
4322+
4323+#ifdef CONFIG_PAX_RANDMMAP
4324+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4325+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4326+#endif
4327+
4328 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4329 }
4330 }
4331diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4332index 4e6ef89..21c27f2 100644
4333--- a/arch/arm/mm/mmu.c
4334+++ b/arch/arm/mm/mmu.c
4335@@ -41,6 +41,22 @@
4336 #include "mm.h"
4337 #include "tcm.h"
4338
4339+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4340+void modify_domain(unsigned int dom, unsigned int type)
4341+{
4342+ struct thread_info *thread = current_thread_info();
4343+ unsigned int domain = thread->cpu_domain;
4344+ /*
4345+ * DOMAIN_MANAGER might be defined to some other value,
4346+ * use the arch-defined constant
4347+ */
4348+ domain &= ~domain_val(dom, 3);
4349+ thread->cpu_domain = domain | domain_val(dom, type);
4350+ set_domain(thread->cpu_domain);
4351+}
4352+EXPORT_SYMBOL(modify_domain);
4353+#endif
4354+
4355 /*
4356 * empty_zero_page is a special page that is used for
4357 * zero-initialized data and COW.
4358@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4359 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4360 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4361
4362-static struct mem_type mem_types[] = {
4363+#ifdef CONFIG_PAX_KERNEXEC
4364+#define L_PTE_KERNEXEC L_PTE_RDONLY
4365+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4366+#else
4367+#define L_PTE_KERNEXEC L_PTE_DIRTY
4368+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4369+#endif
4370+
4371+static struct mem_type mem_types[] __read_only = {
4372 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4373 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4374 L_PTE_SHARED,
4375@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4376 .prot_sect = PROT_SECT_DEVICE,
4377 .domain = DOMAIN_IO,
4378 },
4379- [MT_UNCACHED] = {
4380+ [MT_UNCACHED_RW] = {
4381 .prot_pte = PROT_PTE_DEVICE,
4382 .prot_l1 = PMD_TYPE_TABLE,
4383 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4384 .domain = DOMAIN_IO,
4385 },
4386- [MT_CACHECLEAN] = {
4387- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4388+ [MT_CACHECLEAN_RO] = {
4389+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4390 .domain = DOMAIN_KERNEL,
4391 },
4392 #ifndef CONFIG_ARM_LPAE
4393- [MT_MINICLEAN] = {
4394- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4395+ [MT_MINICLEAN_RO] = {
4396+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4397 .domain = DOMAIN_KERNEL,
4398 },
4399 #endif
4400@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4401 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4402 L_PTE_RDONLY,
4403 .prot_l1 = PMD_TYPE_TABLE,
4404- .domain = DOMAIN_USER,
4405+ .domain = DOMAIN_VECTORS,
4406 },
4407 [MT_HIGH_VECTORS] = {
4408 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4409 L_PTE_USER | L_PTE_RDONLY,
4410 .prot_l1 = PMD_TYPE_TABLE,
4411- .domain = DOMAIN_USER,
4412+ .domain = DOMAIN_VECTORS,
4413 },
4414- [MT_MEMORY_RWX] = {
4415+ [__MT_MEMORY_RWX] = {
4416 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4417 .prot_l1 = PMD_TYPE_TABLE,
4418 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4419@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4420 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4421 .domain = DOMAIN_KERNEL,
4422 },
4423- [MT_ROM] = {
4424- .prot_sect = PMD_TYPE_SECT,
4425+ [MT_MEMORY_RX] = {
4426+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4427+ .prot_l1 = PMD_TYPE_TABLE,
4428+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4429+ .domain = DOMAIN_KERNEL,
4430+ },
4431+ [MT_ROM_RX] = {
4432+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4433 .domain = DOMAIN_KERNEL,
4434 },
4435- [MT_MEMORY_RWX_NONCACHED] = {
4436+ [MT_MEMORY_RW_NONCACHED] = {
4437 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4438 L_PTE_MT_BUFFERABLE,
4439 .prot_l1 = PMD_TYPE_TABLE,
4440 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4441 .domain = DOMAIN_KERNEL,
4442 },
4443+ [MT_MEMORY_RX_NONCACHED] = {
4444+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4445+ L_PTE_MT_BUFFERABLE,
4446+ .prot_l1 = PMD_TYPE_TABLE,
4447+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4448+ .domain = DOMAIN_KERNEL,
4449+ },
4450 [MT_MEMORY_RW_DTCM] = {
4451 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4452 L_PTE_XN,
4453@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4454 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4455 .domain = DOMAIN_KERNEL,
4456 },
4457- [MT_MEMORY_RWX_ITCM] = {
4458- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4459+ [MT_MEMORY_RX_ITCM] = {
4460+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4461 .prot_l1 = PMD_TYPE_TABLE,
4462+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4463 .domain = DOMAIN_KERNEL,
4464 },
4465 [MT_MEMORY_RW_SO] = {
4466@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4467 * Mark cache clean areas and XIP ROM read only
4468 * from SVC mode and no access from userspace.
4469 */
4470- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4471- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4472- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4473+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4474+#ifdef CONFIG_PAX_KERNEXEC
4475+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4476+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4477+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4478+#endif
4479+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4480+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4481 #endif
4482
4483 /*
4484@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4485 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4486 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4487 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4488- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4489- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4490+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4491+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4492 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4493 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4494+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4495+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4496 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4497- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4498- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4499+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4500+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4501+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4502+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4503 }
4504 }
4505
4506@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4507 if (cpu_arch >= CPU_ARCH_ARMv6) {
4508 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4509 /* Non-cacheable Normal is XCB = 001 */
4510- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4511+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4512+ PMD_SECT_BUFFERED;
4513+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4514 PMD_SECT_BUFFERED;
4515 } else {
4516 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4517- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4518+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4519+ PMD_SECT_TEX(1);
4520+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4521 PMD_SECT_TEX(1);
4522 }
4523 } else {
4524- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4525+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4526+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4527 }
4528
4529 #ifdef CONFIG_ARM_LPAE
4530@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4531 user_pgprot |= PTE_EXT_PXN;
4532 #endif
4533
4534+ user_pgprot |= __supported_pte_mask;
4535+
4536 for (i = 0; i < 16; i++) {
4537 pteval_t v = pgprot_val(protection_map[i]);
4538 protection_map[i] = __pgprot(v | user_pgprot);
4539@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4540
4541 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4542 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4543- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4544- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4545+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4546+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4547 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4548 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4549+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4550+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4551 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4552- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4553- mem_types[MT_ROM].prot_sect |= cp->pmd;
4554+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4555+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4556+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4557
4558 switch (cp->pmd) {
4559 case PMD_SECT_WT:
4560- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4561+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4562 break;
4563 case PMD_SECT_WB:
4564 case PMD_SECT_WBWA:
4565- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4566+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4567 break;
4568 }
4569 pr_info("Memory policy: %sData cache %s\n",
4570@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4571 return;
4572 }
4573
4574- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4575+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4576 md->virtual >= PAGE_OFFSET &&
4577 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4578 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4579@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4580 * called function. This means you can't use any function or debugging
4581 * method which may touch any device, otherwise the kernel _will_ crash.
4582 */
4583+
4584+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4585+
4586 static void __init devicemaps_init(const struct machine_desc *mdesc)
4587 {
4588 struct map_desc map;
4589 unsigned long addr;
4590- void *vectors;
4591
4592- /*
4593- * Allocate the vector page early.
4594- */
4595- vectors = early_alloc(PAGE_SIZE * 2);
4596-
4597- early_trap_init(vectors);
4598+ early_trap_init(&vectors);
4599
4600 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4601 pmd_clear(pmd_off_k(addr));
4602@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4603 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4604 map.virtual = MODULES_VADDR;
4605 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4606- map.type = MT_ROM;
4607+ map.type = MT_ROM_RX;
4608 create_mapping(&map);
4609 #endif
4610
4611@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4612 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4613 map.virtual = FLUSH_BASE;
4614 map.length = SZ_1M;
4615- map.type = MT_CACHECLEAN;
4616+ map.type = MT_CACHECLEAN_RO;
4617 create_mapping(&map);
4618 #endif
4619 #ifdef FLUSH_BASE_MINICACHE
4620 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4621 map.virtual = FLUSH_BASE_MINICACHE;
4622 map.length = SZ_1M;
4623- map.type = MT_MINICLEAN;
4624+ map.type = MT_MINICLEAN_RO;
4625 create_mapping(&map);
4626 #endif
4627
4628@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4629 * location (0xffff0000). If we aren't using high-vectors, also
4630 * create a mapping at the low-vectors virtual address.
4631 */
4632- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4633+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4634 map.virtual = 0xffff0000;
4635 map.length = PAGE_SIZE;
4636 #ifdef CONFIG_KUSER_HELPERS
4637@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4638 static void __init map_lowmem(void)
4639 {
4640 struct memblock_region *reg;
4641+#ifndef CONFIG_PAX_KERNEXEC
4642 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4643 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4644+#endif
4645
4646 /* Map all the lowmem memory banks. */
4647 for_each_memblock(memory, reg) {
4648@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4649 if (start >= end)
4650 break;
4651
4652+#ifdef CONFIG_PAX_KERNEXEC
4653+ map.pfn = __phys_to_pfn(start);
4654+ map.virtual = __phys_to_virt(start);
4655+ map.length = end - start;
4656+
4657+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4658+ struct map_desc kernel;
4659+ struct map_desc initmap;
4660+
4661+ /* when freeing initmem we will make this RW */
4662+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4663+ initmap.virtual = (unsigned long)__init_begin;
4664+ initmap.length = _sdata - __init_begin;
4665+ initmap.type = __MT_MEMORY_RWX;
4666+ create_mapping(&initmap);
4667+
4668+ /* when freeing initmem we will make this RX */
4669+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4670+ kernel.virtual = (unsigned long)_stext;
4671+ kernel.length = __init_begin - _stext;
4672+ kernel.type = __MT_MEMORY_RWX;
4673+ create_mapping(&kernel);
4674+
4675+ if (map.virtual < (unsigned long)_stext) {
4676+ map.length = (unsigned long)_stext - map.virtual;
4677+ map.type = __MT_MEMORY_RWX;
4678+ create_mapping(&map);
4679+ }
4680+
4681+ map.pfn = __phys_to_pfn(__pa(_sdata));
4682+ map.virtual = (unsigned long)_sdata;
4683+ map.length = end - __pa(_sdata);
4684+ }
4685+
4686+ map.type = MT_MEMORY_RW;
4687+ create_mapping(&map);
4688+#else
4689 if (end < kernel_x_start) {
4690 map.pfn = __phys_to_pfn(start);
4691 map.virtual = __phys_to_virt(start);
4692 map.length = end - start;
4693- map.type = MT_MEMORY_RWX;
4694+ map.type = __MT_MEMORY_RWX;
4695
4696 create_mapping(&map);
4697 } else if (start >= kernel_x_end) {
4698@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4699 map.pfn = __phys_to_pfn(kernel_x_start);
4700 map.virtual = __phys_to_virt(kernel_x_start);
4701 map.length = kernel_x_end - kernel_x_start;
4702- map.type = MT_MEMORY_RWX;
4703+ map.type = __MT_MEMORY_RWX;
4704
4705 create_mapping(&map);
4706
4707@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4708 create_mapping(&map);
4709 }
4710 }
4711+#endif
4712 }
4713 }
4714
4715diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4716index e1268f9..a9755a7 100644
4717--- a/arch/arm/net/bpf_jit_32.c
4718+++ b/arch/arm/net/bpf_jit_32.c
4719@@ -20,6 +20,7 @@
4720 #include <asm/cacheflush.h>
4721 #include <asm/hwcap.h>
4722 #include <asm/opcodes.h>
4723+#include <asm/pgtable.h>
4724
4725 #include "bpf_jit_32.h"
4726
4727@@ -71,7 +72,11 @@ struct jit_ctx {
4728 #endif
4729 };
4730
4731+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4732+int bpf_jit_enable __read_only;
4733+#else
4734 int bpf_jit_enable __read_mostly;
4735+#endif
4736
4737 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4738 {
4739@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4740 {
4741 u32 *ptr;
4742 /* We are guaranteed to have aligned memory. */
4743+ pax_open_kernel();
4744 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4745 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4746+ pax_close_kernel();
4747 }
4748
4749 static void build_prologue(struct jit_ctx *ctx)
4750diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4751index 5b217f4..c23f40e 100644
4752--- a/arch/arm/plat-iop/setup.c
4753+++ b/arch/arm/plat-iop/setup.c
4754@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4755 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4756 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4757 .length = IOP3XX_PERIPHERAL_SIZE,
4758- .type = MT_UNCACHED,
4759+ .type = MT_UNCACHED_RW,
4760 },
4761 };
4762
4763diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4764index a5bc92d..0bb4730 100644
4765--- a/arch/arm/plat-omap/sram.c
4766+++ b/arch/arm/plat-omap/sram.c
4767@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4768 * Looks like we need to preserve some bootloader code at the
4769 * beginning of SRAM for jumping to flash for reboot to work...
4770 */
4771+ pax_open_kernel();
4772 memset_io(omap_sram_base + omap_sram_skip, 0,
4773 omap_sram_size - omap_sram_skip);
4774+ pax_close_kernel();
4775 }
4776diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4777index ce6d763..cfea917 100644
4778--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4779+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4780@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4781 int (*started)(unsigned ch);
4782 int (*flush)(unsigned ch);
4783 int (*stop)(unsigned ch);
4784-};
4785+} __no_const;
4786
4787 extern void *samsung_dmadev_get_ops(void);
4788 extern void *s3c_dma_get_ops(void);
4789diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4790index a5abb00..9cbca9a 100644
4791--- a/arch/arm64/include/asm/barrier.h
4792+++ b/arch/arm64/include/asm/barrier.h
4793@@ -44,7 +44,7 @@
4794 do { \
4795 compiletime_assert_atomic_type(*p); \
4796 barrier(); \
4797- ACCESS_ONCE(*p) = (v); \
4798+ ACCESS_ONCE_RW(*p) = (v); \
4799 } while (0)
4800
4801 #define smp_load_acquire(p) \
4802diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4803index 09da25b..3ea0d64 100644
4804--- a/arch/arm64/include/asm/percpu.h
4805+++ b/arch/arm64/include/asm/percpu.h
4806@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4807 {
4808 switch (size) {
4809 case 1:
4810- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4811+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4812 break;
4813 case 2:
4814- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4815+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4816 break;
4817 case 4:
4818- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4819+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4820 break;
4821 case 8:
4822- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4823+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4824 break;
4825 default:
4826 BUILD_BUG();
4827diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4828index 3bf8f4e..5dd5491 100644
4829--- a/arch/arm64/include/asm/uaccess.h
4830+++ b/arch/arm64/include/asm/uaccess.h
4831@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4832 flag; \
4833 })
4834
4835+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4836 #define access_ok(type, addr, size) __range_ok(addr, size)
4837 #define user_addr_max get_fs
4838
4839diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4840index c3a58a1..78fbf54 100644
4841--- a/arch/avr32/include/asm/cache.h
4842+++ b/arch/avr32/include/asm/cache.h
4843@@ -1,8 +1,10 @@
4844 #ifndef __ASM_AVR32_CACHE_H
4845 #define __ASM_AVR32_CACHE_H
4846
4847+#include <linux/const.h>
4848+
4849 #define L1_CACHE_SHIFT 5
4850-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4851+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4852
4853 /*
4854 * Memory returned by kmalloc() may be used for DMA, so we must make
4855diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4856index d232888..87c8df1 100644
4857--- a/arch/avr32/include/asm/elf.h
4858+++ b/arch/avr32/include/asm/elf.h
4859@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4860 the loader. We need to make sure that it is out of the way of the program
4861 that it will "exec", and that there is sufficient room for the brk. */
4862
4863-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4864+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4865
4866+#ifdef CONFIG_PAX_ASLR
4867+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4868+
4869+#define PAX_DELTA_MMAP_LEN 15
4870+#define PAX_DELTA_STACK_LEN 15
4871+#endif
4872
4873 /* This yields a mask that user programs can use to figure out what
4874 instruction set this CPU supports. This could be done in user space,
4875diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4876index 479330b..53717a8 100644
4877--- a/arch/avr32/include/asm/kmap_types.h
4878+++ b/arch/avr32/include/asm/kmap_types.h
4879@@ -2,9 +2,9 @@
4880 #define __ASM_AVR32_KMAP_TYPES_H
4881
4882 #ifdef CONFIG_DEBUG_HIGHMEM
4883-# define KM_TYPE_NR 29
4884+# define KM_TYPE_NR 30
4885 #else
4886-# define KM_TYPE_NR 14
4887+# define KM_TYPE_NR 15
4888 #endif
4889
4890 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4891diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4892index d223a8b..69c5210 100644
4893--- a/arch/avr32/mm/fault.c
4894+++ b/arch/avr32/mm/fault.c
4895@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4896
4897 int exception_trace = 1;
4898
4899+#ifdef CONFIG_PAX_PAGEEXEC
4900+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4901+{
4902+ unsigned long i;
4903+
4904+ printk(KERN_ERR "PAX: bytes at PC: ");
4905+ for (i = 0; i < 20; i++) {
4906+ unsigned char c;
4907+ if (get_user(c, (unsigned char *)pc+i))
4908+ printk(KERN_CONT "???????? ");
4909+ else
4910+ printk(KERN_CONT "%02x ", c);
4911+ }
4912+ printk("\n");
4913+}
4914+#endif
4915+
4916 /*
4917 * This routine handles page faults. It determines the address and the
4918 * problem, and then passes it off to one of the appropriate routines.
4919@@ -178,6 +195,16 @@ bad_area:
4920 up_read(&mm->mmap_sem);
4921
4922 if (user_mode(regs)) {
4923+
4924+#ifdef CONFIG_PAX_PAGEEXEC
4925+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4926+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4927+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4928+ do_group_exit(SIGKILL);
4929+ }
4930+ }
4931+#endif
4932+
4933 if (exception_trace && printk_ratelimit())
4934 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4935 "sp %08lx ecr %lu\n",
4936diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4937index 568885a..f8008df 100644
4938--- a/arch/blackfin/include/asm/cache.h
4939+++ b/arch/blackfin/include/asm/cache.h
4940@@ -7,6 +7,7 @@
4941 #ifndef __ARCH_BLACKFIN_CACHE_H
4942 #define __ARCH_BLACKFIN_CACHE_H
4943
4944+#include <linux/const.h>
4945 #include <linux/linkage.h> /* for asmlinkage */
4946
4947 /*
4948@@ -14,7 +15,7 @@
4949 * Blackfin loads 32 bytes for cache
4950 */
4951 #define L1_CACHE_SHIFT 5
4952-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4953+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4954 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4955
4956 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4957diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4958index aea2718..3639a60 100644
4959--- a/arch/cris/include/arch-v10/arch/cache.h
4960+++ b/arch/cris/include/arch-v10/arch/cache.h
4961@@ -1,8 +1,9 @@
4962 #ifndef _ASM_ARCH_CACHE_H
4963 #define _ASM_ARCH_CACHE_H
4964
4965+#include <linux/const.h>
4966 /* Etrax 100LX have 32-byte cache-lines. */
4967-#define L1_CACHE_BYTES 32
4968 #define L1_CACHE_SHIFT 5
4969+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4970
4971 #endif /* _ASM_ARCH_CACHE_H */
4972diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4973index 7caf25d..ee65ac5 100644
4974--- a/arch/cris/include/arch-v32/arch/cache.h
4975+++ b/arch/cris/include/arch-v32/arch/cache.h
4976@@ -1,11 +1,12 @@
4977 #ifndef _ASM_CRIS_ARCH_CACHE_H
4978 #define _ASM_CRIS_ARCH_CACHE_H
4979
4980+#include <linux/const.h>
4981 #include <arch/hwregs/dma.h>
4982
4983 /* A cache-line is 32 bytes. */
4984-#define L1_CACHE_BYTES 32
4985 #define L1_CACHE_SHIFT 5
4986+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4987
4988 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4989
4990diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4991index 102190a..5334cea 100644
4992--- a/arch/frv/include/asm/atomic.h
4993+++ b/arch/frv/include/asm/atomic.h
4994@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
4995 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4996 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4997
4998+#define atomic64_read_unchecked(v) atomic64_read(v)
4999+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5000+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5001+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5002+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5003+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5004+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5005+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5006+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5007+
5008 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5009 {
5010 int c, old;
5011diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5012index 2797163..c2a401df9 100644
5013--- a/arch/frv/include/asm/cache.h
5014+++ b/arch/frv/include/asm/cache.h
5015@@ -12,10 +12,11 @@
5016 #ifndef __ASM_CACHE_H
5017 #define __ASM_CACHE_H
5018
5019+#include <linux/const.h>
5020
5021 /* bytes per L1 cache line */
5022 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5023-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5024+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5025
5026 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5027 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5028diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5029index 43901f2..0d8b865 100644
5030--- a/arch/frv/include/asm/kmap_types.h
5031+++ b/arch/frv/include/asm/kmap_types.h
5032@@ -2,6 +2,6 @@
5033 #ifndef _ASM_KMAP_TYPES_H
5034 #define _ASM_KMAP_TYPES_H
5035
5036-#define KM_TYPE_NR 17
5037+#define KM_TYPE_NR 18
5038
5039 #endif
5040diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5041index 836f147..4cf23f5 100644
5042--- a/arch/frv/mm/elf-fdpic.c
5043+++ b/arch/frv/mm/elf-fdpic.c
5044@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5045 {
5046 struct vm_area_struct *vma;
5047 struct vm_unmapped_area_info info;
5048+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5049
5050 if (len > TASK_SIZE)
5051 return -ENOMEM;
5052@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5053 if (addr) {
5054 addr = PAGE_ALIGN(addr);
5055 vma = find_vma(current->mm, addr);
5056- if (TASK_SIZE - len >= addr &&
5057- (!vma || addr + len <= vma->vm_start))
5058+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5059 goto success;
5060 }
5061
5062@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5063 info.high_limit = (current->mm->start_stack - 0x00200000);
5064 info.align_mask = 0;
5065 info.align_offset = 0;
5066+ info.threadstack_offset = offset;
5067 addr = vm_unmapped_area(&info);
5068 if (!(addr & ~PAGE_MASK))
5069 goto success;
5070diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5071index 69952c1..4fa2908 100644
5072--- a/arch/hexagon/include/asm/cache.h
5073+++ b/arch/hexagon/include/asm/cache.h
5074@@ -21,9 +21,11 @@
5075 #ifndef __ASM_CACHE_H
5076 #define __ASM_CACHE_H
5077
5078+#include <linux/const.h>
5079+
5080 /* Bytes per L1 cache line */
5081-#define L1_CACHE_SHIFT (5)
5082-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5083+#define L1_CACHE_SHIFT 5
5084+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5085
5086 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5087
5088diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5089index 074e52b..76afdac 100644
5090--- a/arch/ia64/Kconfig
5091+++ b/arch/ia64/Kconfig
5092@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5093 config KEXEC
5094 bool "kexec system call"
5095 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5096+ depends on !GRKERNSEC_KMEM
5097 help
5098 kexec is a system call that implements the ability to shutdown your
5099 current kernel, and to start another kernel. It is like a reboot
5100diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5101index 970d0bd..e750b9b 100644
5102--- a/arch/ia64/Makefile
5103+++ b/arch/ia64/Makefile
5104@@ -98,5 +98,6 @@ endef
5105 archprepare: make_nr_irqs_h FORCE
5106 PHONY += make_nr_irqs_h FORCE
5107
5108+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5109 make_nr_irqs_h: FORCE
5110 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5111diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5112index 0bf0350..2ad1957 100644
5113--- a/arch/ia64/include/asm/atomic.h
5114+++ b/arch/ia64/include/asm/atomic.h
5115@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5116 #define atomic64_inc(v) atomic64_add(1, (v))
5117 #define atomic64_dec(v) atomic64_sub(1, (v))
5118
5119+#define atomic64_read_unchecked(v) atomic64_read(v)
5120+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5121+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5122+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5123+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5124+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5125+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5126+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5127+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5128+
5129 #endif /* _ASM_IA64_ATOMIC_H */
5130diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5131index f6769eb..1cdb590 100644
5132--- a/arch/ia64/include/asm/barrier.h
5133+++ b/arch/ia64/include/asm/barrier.h
5134@@ -66,7 +66,7 @@
5135 do { \
5136 compiletime_assert_atomic_type(*p); \
5137 barrier(); \
5138- ACCESS_ONCE(*p) = (v); \
5139+ ACCESS_ONCE_RW(*p) = (v); \
5140 } while (0)
5141
5142 #define smp_load_acquire(p) \
5143diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5144index 988254a..e1ee885 100644
5145--- a/arch/ia64/include/asm/cache.h
5146+++ b/arch/ia64/include/asm/cache.h
5147@@ -1,6 +1,7 @@
5148 #ifndef _ASM_IA64_CACHE_H
5149 #define _ASM_IA64_CACHE_H
5150
5151+#include <linux/const.h>
5152
5153 /*
5154 * Copyright (C) 1998-2000 Hewlett-Packard Co
5155@@ -9,7 +10,7 @@
5156
5157 /* Bytes per L1 (data) cache line. */
5158 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5159-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5160+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5161
5162 #ifdef CONFIG_SMP
5163 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5164diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5165index 5a83c5c..4d7f553 100644
5166--- a/arch/ia64/include/asm/elf.h
5167+++ b/arch/ia64/include/asm/elf.h
5168@@ -42,6 +42,13 @@
5169 */
5170 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5171
5172+#ifdef CONFIG_PAX_ASLR
5173+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5174+
5175+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5176+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5177+#endif
5178+
5179 #define PT_IA_64_UNWIND 0x70000001
5180
5181 /* IA-64 relocations: */
5182diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5183index 5767cdf..7462574 100644
5184--- a/arch/ia64/include/asm/pgalloc.h
5185+++ b/arch/ia64/include/asm/pgalloc.h
5186@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5187 pgd_val(*pgd_entry) = __pa(pud);
5188 }
5189
5190+static inline void
5191+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5192+{
5193+ pgd_populate(mm, pgd_entry, pud);
5194+}
5195+
5196 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5197 {
5198 return quicklist_alloc(0, GFP_KERNEL, NULL);
5199@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5200 pud_val(*pud_entry) = __pa(pmd);
5201 }
5202
5203+static inline void
5204+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5205+{
5206+ pud_populate(mm, pud_entry, pmd);
5207+}
5208+
5209 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5210 {
5211 return quicklist_alloc(0, GFP_KERNEL, NULL);
5212diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5213index 7935115..c0eca6a 100644
5214--- a/arch/ia64/include/asm/pgtable.h
5215+++ b/arch/ia64/include/asm/pgtable.h
5216@@ -12,7 +12,7 @@
5217 * David Mosberger-Tang <davidm@hpl.hp.com>
5218 */
5219
5220-
5221+#include <linux/const.h>
5222 #include <asm/mman.h>
5223 #include <asm/page.h>
5224 #include <asm/processor.h>
5225@@ -142,6 +142,17 @@
5226 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5227 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5228 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5229+
5230+#ifdef CONFIG_PAX_PAGEEXEC
5231+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5232+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5233+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5234+#else
5235+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5236+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5237+# define PAGE_COPY_NOEXEC PAGE_COPY
5238+#endif
5239+
5240 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5241 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5242 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5243diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5244index 45698cd..e8e2dbc 100644
5245--- a/arch/ia64/include/asm/spinlock.h
5246+++ b/arch/ia64/include/asm/spinlock.h
5247@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5248 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5249
5250 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5251- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5252+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5253 }
5254
5255 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5256diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5257index 103bedc..0210597 100644
5258--- a/arch/ia64/include/asm/uaccess.h
5259+++ b/arch/ia64/include/asm/uaccess.h
5260@@ -70,6 +70,7 @@
5261 && ((segment).seg == KERNEL_DS.seg \
5262 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5263 })
5264+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5265 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5266
5267 /*
5268@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5269 static inline unsigned long
5270 __copy_to_user (void __user *to, const void *from, unsigned long count)
5271 {
5272+ if (count > INT_MAX)
5273+ return count;
5274+
5275+ if (!__builtin_constant_p(count))
5276+ check_object_size(from, count, true);
5277+
5278 return __copy_user(to, (__force void __user *) from, count);
5279 }
5280
5281 static inline unsigned long
5282 __copy_from_user (void *to, const void __user *from, unsigned long count)
5283 {
5284+ if (count > INT_MAX)
5285+ return count;
5286+
5287+ if (!__builtin_constant_p(count))
5288+ check_object_size(to, count, false);
5289+
5290 return __copy_user((__force void __user *) to, from, count);
5291 }
5292
5293@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5294 ({ \
5295 void __user *__cu_to = (to); \
5296 const void *__cu_from = (from); \
5297- long __cu_len = (n); \
5298+ unsigned long __cu_len = (n); \
5299 \
5300- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5301+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5302+ if (!__builtin_constant_p(n)) \
5303+ check_object_size(__cu_from, __cu_len, true); \
5304 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5305+ } \
5306 __cu_len; \
5307 })
5308
5309@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5310 ({ \
5311 void *__cu_to = (to); \
5312 const void __user *__cu_from = (from); \
5313- long __cu_len = (n); \
5314+ unsigned long __cu_len = (n); \
5315 \
5316 __chk_user_ptr(__cu_from); \
5317- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5318+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5319+ if (!__builtin_constant_p(n)) \
5320+ check_object_size(__cu_to, __cu_len, false); \
5321 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5322+ } \
5323 __cu_len; \
5324 })
5325
5326diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5327index 29754aa..06d2838 100644
5328--- a/arch/ia64/kernel/module.c
5329+++ b/arch/ia64/kernel/module.c
5330@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5331 }
5332
5333 static inline int
5334+in_init_rx (const struct module *mod, uint64_t addr)
5335+{
5336+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5337+}
5338+
5339+static inline int
5340+in_init_rw (const struct module *mod, uint64_t addr)
5341+{
5342+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5343+}
5344+
5345+static inline int
5346 in_init (const struct module *mod, uint64_t addr)
5347 {
5348- return addr - (uint64_t) mod->module_init < mod->init_size;
5349+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5350+}
5351+
5352+static inline int
5353+in_core_rx (const struct module *mod, uint64_t addr)
5354+{
5355+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5356+}
5357+
5358+static inline int
5359+in_core_rw (const struct module *mod, uint64_t addr)
5360+{
5361+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5362 }
5363
5364 static inline int
5365 in_core (const struct module *mod, uint64_t addr)
5366 {
5367- return addr - (uint64_t) mod->module_core < mod->core_size;
5368+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5369 }
5370
5371 static inline int
5372@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5373 break;
5374
5375 case RV_BDREL:
5376- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5377+ if (in_init_rx(mod, val))
5378+ val -= (uint64_t) mod->module_init_rx;
5379+ else if (in_init_rw(mod, val))
5380+ val -= (uint64_t) mod->module_init_rw;
5381+ else if (in_core_rx(mod, val))
5382+ val -= (uint64_t) mod->module_core_rx;
5383+ else if (in_core_rw(mod, val))
5384+ val -= (uint64_t) mod->module_core_rw;
5385 break;
5386
5387 case RV_LTV:
5388@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5389 * addresses have been selected...
5390 */
5391 uint64_t gp;
5392- if (mod->core_size > MAX_LTOFF)
5393+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5394 /*
5395 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5396 * at the end of the module.
5397 */
5398- gp = mod->core_size - MAX_LTOFF / 2;
5399+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5400 else
5401- gp = mod->core_size / 2;
5402- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5403+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5404+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5405 mod->arch.gp = gp;
5406 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5407 }
5408diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5409index c39c3cd..3c77738 100644
5410--- a/arch/ia64/kernel/palinfo.c
5411+++ b/arch/ia64/kernel/palinfo.c
5412@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5413 return NOTIFY_OK;
5414 }
5415
5416-static struct notifier_block __refdata palinfo_cpu_notifier =
5417+static struct notifier_block palinfo_cpu_notifier =
5418 {
5419 .notifier_call = palinfo_cpu_callback,
5420 .priority = 0,
5421diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5422index 41e33f8..65180b2a 100644
5423--- a/arch/ia64/kernel/sys_ia64.c
5424+++ b/arch/ia64/kernel/sys_ia64.c
5425@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5426 unsigned long align_mask = 0;
5427 struct mm_struct *mm = current->mm;
5428 struct vm_unmapped_area_info info;
5429+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5430
5431 if (len > RGN_MAP_LIMIT)
5432 return -ENOMEM;
5433@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5434 if (REGION_NUMBER(addr) == RGN_HPAGE)
5435 addr = 0;
5436 #endif
5437+
5438+#ifdef CONFIG_PAX_RANDMMAP
5439+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5440+ addr = mm->free_area_cache;
5441+ else
5442+#endif
5443+
5444 if (!addr)
5445 addr = TASK_UNMAPPED_BASE;
5446
5447@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5448 info.high_limit = TASK_SIZE;
5449 info.align_mask = align_mask;
5450 info.align_offset = 0;
5451+ info.threadstack_offset = offset;
5452 return vm_unmapped_area(&info);
5453 }
5454
5455diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5456index 84f8a52..7c76178 100644
5457--- a/arch/ia64/kernel/vmlinux.lds.S
5458+++ b/arch/ia64/kernel/vmlinux.lds.S
5459@@ -192,7 +192,7 @@ SECTIONS {
5460 /* Per-cpu data: */
5461 . = ALIGN(PERCPU_PAGE_SIZE);
5462 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5463- __phys_per_cpu_start = __per_cpu_load;
5464+ __phys_per_cpu_start = per_cpu_load;
5465 /*
5466 * ensure percpu data fits
5467 * into percpu page size
5468diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5469index ba5ba7a..36e9d3a 100644
5470--- a/arch/ia64/mm/fault.c
5471+++ b/arch/ia64/mm/fault.c
5472@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5473 return pte_present(pte);
5474 }
5475
5476+#ifdef CONFIG_PAX_PAGEEXEC
5477+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5478+{
5479+ unsigned long i;
5480+
5481+ printk(KERN_ERR "PAX: bytes at PC: ");
5482+ for (i = 0; i < 8; i++) {
5483+ unsigned int c;
5484+ if (get_user(c, (unsigned int *)pc+i))
5485+ printk(KERN_CONT "???????? ");
5486+ else
5487+ printk(KERN_CONT "%08x ", c);
5488+ }
5489+ printk("\n");
5490+}
5491+#endif
5492+
5493 # define VM_READ_BIT 0
5494 # define VM_WRITE_BIT 1
5495 # define VM_EXEC_BIT 2
5496@@ -151,8 +168,21 @@ retry:
5497 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5498 goto bad_area;
5499
5500- if ((vma->vm_flags & mask) != mask)
5501+ if ((vma->vm_flags & mask) != mask) {
5502+
5503+#ifdef CONFIG_PAX_PAGEEXEC
5504+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5505+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5506+ goto bad_area;
5507+
5508+ up_read(&mm->mmap_sem);
5509+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5510+ do_group_exit(SIGKILL);
5511+ }
5512+#endif
5513+
5514 goto bad_area;
5515+ }
5516
5517 /*
5518 * If for any reason at all we couldn't handle the fault, make
5519diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5520index 76069c1..c2aa816 100644
5521--- a/arch/ia64/mm/hugetlbpage.c
5522+++ b/arch/ia64/mm/hugetlbpage.c
5523@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5524 unsigned long pgoff, unsigned long flags)
5525 {
5526 struct vm_unmapped_area_info info;
5527+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5528
5529 if (len > RGN_MAP_LIMIT)
5530 return -ENOMEM;
5531@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5532 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5533 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5534 info.align_offset = 0;
5535+ info.threadstack_offset = offset;
5536 return vm_unmapped_area(&info);
5537 }
5538
5539diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5540index 6b33457..88b5124 100644
5541--- a/arch/ia64/mm/init.c
5542+++ b/arch/ia64/mm/init.c
5543@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5544 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5545 vma->vm_end = vma->vm_start + PAGE_SIZE;
5546 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5547+
5548+#ifdef CONFIG_PAX_PAGEEXEC
5549+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5550+ vma->vm_flags &= ~VM_EXEC;
5551+
5552+#ifdef CONFIG_PAX_MPROTECT
5553+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5554+ vma->vm_flags &= ~VM_MAYEXEC;
5555+#endif
5556+
5557+ }
5558+#endif
5559+
5560 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5561 down_write(&current->mm->mmap_sem);
5562 if (insert_vm_struct(current->mm, vma)) {
5563@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5564 gate_vma.vm_start = FIXADDR_USER_START;
5565 gate_vma.vm_end = FIXADDR_USER_END;
5566 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5567- gate_vma.vm_page_prot = __P101;
5568+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5569
5570 return 0;
5571 }
5572diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5573index 40b3ee98..8c2c112 100644
5574--- a/arch/m32r/include/asm/cache.h
5575+++ b/arch/m32r/include/asm/cache.h
5576@@ -1,8 +1,10 @@
5577 #ifndef _ASM_M32R_CACHE_H
5578 #define _ASM_M32R_CACHE_H
5579
5580+#include <linux/const.h>
5581+
5582 /* L1 cache line size */
5583 #define L1_CACHE_SHIFT 4
5584-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5585+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5586
5587 #endif /* _ASM_M32R_CACHE_H */
5588diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5589index 82abd15..d95ae5d 100644
5590--- a/arch/m32r/lib/usercopy.c
5591+++ b/arch/m32r/lib/usercopy.c
5592@@ -14,6 +14,9 @@
5593 unsigned long
5594 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5595 {
5596+ if ((long)n < 0)
5597+ return n;
5598+
5599 prefetch(from);
5600 if (access_ok(VERIFY_WRITE, to, n))
5601 __copy_user(to,from,n);
5602@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5603 unsigned long
5604 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5605 {
5606+ if ((long)n < 0)
5607+ return n;
5608+
5609 prefetchw(to);
5610 if (access_ok(VERIFY_READ, from, n))
5611 __copy_user_zeroing(to,from,n);
5612diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5613index 0395c51..5f26031 100644
5614--- a/arch/m68k/include/asm/cache.h
5615+++ b/arch/m68k/include/asm/cache.h
5616@@ -4,9 +4,11 @@
5617 #ifndef __ARCH_M68K_CACHE_H
5618 #define __ARCH_M68K_CACHE_H
5619
5620+#include <linux/const.h>
5621+
5622 /* bytes per L1 cache line */
5623 #define L1_CACHE_SHIFT 4
5624-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5625+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5626
5627 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5628
5629diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5630index d703d8e..a8e2d70 100644
5631--- a/arch/metag/include/asm/barrier.h
5632+++ b/arch/metag/include/asm/barrier.h
5633@@ -90,7 +90,7 @@ static inline void fence(void)
5634 do { \
5635 compiletime_assert_atomic_type(*p); \
5636 smp_mb(); \
5637- ACCESS_ONCE(*p) = (v); \
5638+ ACCESS_ONCE_RW(*p) = (v); \
5639 } while (0)
5640
5641 #define smp_load_acquire(p) \
5642diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5643index 3c32075..ae0ae75 100644
5644--- a/arch/metag/mm/hugetlbpage.c
5645+++ b/arch/metag/mm/hugetlbpage.c
5646@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5647 info.high_limit = TASK_SIZE;
5648 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5649 info.align_offset = 0;
5650+ info.threadstack_offset = 0;
5651 return vm_unmapped_area(&info);
5652 }
5653
5654diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5655index 4efe96a..60e8699 100644
5656--- a/arch/microblaze/include/asm/cache.h
5657+++ b/arch/microblaze/include/asm/cache.h
5658@@ -13,11 +13,12 @@
5659 #ifndef _ASM_MICROBLAZE_CACHE_H
5660 #define _ASM_MICROBLAZE_CACHE_H
5661
5662+#include <linux/const.h>
5663 #include <asm/registers.h>
5664
5665 #define L1_CACHE_SHIFT 5
5666 /* word-granular cache in microblaze */
5667-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5668+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5669
5670 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5671
5672diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5673index 843713c..b6a87b9 100644
5674--- a/arch/mips/Kconfig
5675+++ b/arch/mips/Kconfig
5676@@ -2439,6 +2439,7 @@ source "kernel/Kconfig.preempt"
5677
5678 config KEXEC
5679 bool "Kexec system call"
5680+ depends on !GRKERNSEC_KMEM
5681 help
5682 kexec is a system call that implements the ability to shutdown your
5683 current kernel, and to start another kernel. It is like a reboot
5684diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5685index 3778655..1dff0a9 100644
5686--- a/arch/mips/cavium-octeon/dma-octeon.c
5687+++ b/arch/mips/cavium-octeon/dma-octeon.c
5688@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5689 if (dma_release_from_coherent(dev, order, vaddr))
5690 return;
5691
5692- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5693+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5694 }
5695
5696 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5697diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5698index 857da84..3f4458b 100644
5699--- a/arch/mips/include/asm/atomic.h
5700+++ b/arch/mips/include/asm/atomic.h
5701@@ -22,15 +22,39 @@
5702 #include <asm/cmpxchg.h>
5703 #include <asm/war.h>
5704
5705+#ifdef CONFIG_GENERIC_ATOMIC64
5706+#include <asm-generic/atomic64.h>
5707+#endif
5708+
5709 #define ATOMIC_INIT(i) { (i) }
5710
5711+#ifdef CONFIG_64BIT
5712+#define _ASM_EXTABLE(from, to) \
5713+" .section __ex_table,\"a\"\n" \
5714+" .dword " #from ", " #to"\n" \
5715+" .previous\n"
5716+#else
5717+#define _ASM_EXTABLE(from, to) \
5718+" .section __ex_table,\"a\"\n" \
5719+" .word " #from ", " #to"\n" \
5720+" .previous\n"
5721+#endif
5722+
5723 /*
5724 * atomic_read - read atomic variable
5725 * @v: pointer of type atomic_t
5726 *
5727 * Atomically reads the value of @v.
5728 */
5729-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5730+static inline int atomic_read(const atomic_t *v)
5731+{
5732+ return ACCESS_ONCE(v->counter);
5733+}
5734+
5735+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5736+{
5737+ return ACCESS_ONCE(v->counter);
5738+}
5739
5740 /*
5741 * atomic_set - set atomic variable
5742@@ -39,47 +63,77 @@
5743 *
5744 * Atomically sets the value of @v to @i.
5745 */
5746-#define atomic_set(v, i) ((v)->counter = (i))
5747+static inline void atomic_set(atomic_t *v, int i)
5748+{
5749+ v->counter = i;
5750+}
5751
5752-#define ATOMIC_OP(op, c_op, asm_op) \
5753-static __inline__ void atomic_##op(int i, atomic_t * v) \
5754+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5755+{
5756+ v->counter = i;
5757+}
5758+
5759+#ifdef CONFIG_PAX_REFCOUNT
5760+#define __OVERFLOW_POST \
5761+ " b 4f \n" \
5762+ " .set noreorder \n" \
5763+ "3: b 5f \n" \
5764+ " move %0, %1 \n" \
5765+ " .set reorder \n"
5766+#define __OVERFLOW_EXTABLE \
5767+ "3:\n" \
5768+ _ASM_EXTABLE(2b, 3b)
5769+#else
5770+#define __OVERFLOW_POST
5771+#define __OVERFLOW_EXTABLE
5772+#endif
5773+
5774+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5775+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5776 { \
5777 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5778 int temp; \
5779 \
5780 __asm__ __volatile__( \
5781- " .set arch=r4000 \n" \
5782- "1: ll %0, %1 # atomic_" #op " \n" \
5783- " " #asm_op " %0, %2 \n" \
5784+ " .set mips3 \n" \
5785+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5786+ "2: " #asm_op " %0, %2 \n" \
5787 " sc %0, %1 \n" \
5788 " beqzl %0, 1b \n" \
5789+ extable \
5790 " .set mips0 \n" \
5791 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5792 : "Ir" (i)); \
5793 } else if (kernel_uses_llsc) { \
5794 int temp; \
5795 \
5796- do { \
5797- __asm__ __volatile__( \
5798- " .set arch=r4000 \n" \
5799- " ll %0, %1 # atomic_" #op "\n" \
5800- " " #asm_op " %0, %2 \n" \
5801- " sc %0, %1 \n" \
5802- " .set mips0 \n" \
5803- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5804- : "Ir" (i)); \
5805- } while (unlikely(!temp)); \
5806+ __asm__ __volatile__( \
5807+ " .set mips3 \n" \
5808+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5809+ "2: " #asm_op " %0, %2 \n" \
5810+ " sc %0, %1 \n" \
5811+ " beqz %0, 1b \n" \
5812+ extable \
5813+ " .set mips0 \n" \
5814+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5815+ : "Ir" (i)); \
5816 } else { \
5817 unsigned long flags; \
5818 \
5819 raw_local_irq_save(flags); \
5820- v->counter c_op i; \
5821+ __asm__ __volatile__( \
5822+ "2: " #asm_op " %0, %1 \n" \
5823+ extable \
5824+ : "+r" (v->counter) : "Ir" (i)); \
5825 raw_local_irq_restore(flags); \
5826 } \
5827 }
5828
5829-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5830-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5831+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , asm_op##u) \
5832+ __ATOMIC_OP(op, _unchecked, asm_op)
5833+
5834+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5835+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5836 { \
5837 int result; \
5838 \
5839@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5840 int temp; \
5841 \
5842 __asm__ __volatile__( \
5843- " .set arch=r4000 \n" \
5844- "1: ll %1, %2 # atomic_" #op "_return \n" \
5845- " " #asm_op " %0, %1, %3 \n" \
5846+ " .set mips3 \n" \
5847+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5848+ "2: " #asm_op " %0, %1, %3 \n" \
5849 " sc %0, %2 \n" \
5850 " beqzl %0, 1b \n" \
5851- " " #asm_op " %0, %1, %3 \n" \
5852+ post_op \
5853+ extable \
5854+ "4: " #asm_op " %0, %1, %3 \n" \
5855+ "5: \n" \
5856 " .set mips0 \n" \
5857 : "=&r" (result), "=&r" (temp), \
5858 "+" GCC_OFF12_ASM() (v->counter) \
5859@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5860 } else if (kernel_uses_llsc) { \
5861 int temp; \
5862 \
5863- do { \
5864- __asm__ __volatile__( \
5865- " .set arch=r4000 \n" \
5866- " ll %1, %2 # atomic_" #op "_return \n" \
5867- " " #asm_op " %0, %1, %3 \n" \
5868- " sc %0, %2 \n" \
5869- " .set mips0 \n" \
5870- : "=&r" (result), "=&r" (temp), \
5871- "+" GCC_OFF12_ASM() (v->counter) \
5872- : "Ir" (i)); \
5873- } while (unlikely(!result)); \
5874+ __asm__ __volatile__( \
5875+ " .set mips3 \n" \
5876+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5877+ "2: " #asm_op " %0, %1, %3 \n" \
5878+ " sc %0, %2 \n" \
5879+ post_op \
5880+ extable \
5881+ "4: " #asm_op " %0, %1, %3 \n" \
5882+ "5: \n" \
5883+ " .set mips0 \n" \
5884+ : "=&r" (result), "=&r" (temp), \
5885+ "+" GCC_OFF12_ASM() (v->counter) \
5886+ : "Ir" (i)); \
5887 \
5888 result = temp; result c_op i; \
5889 } else { \
5890 unsigned long flags; \
5891 \
5892 raw_local_irq_save(flags); \
5893- result = v->counter; \
5894- result c_op i; \
5895- v->counter = result; \
5896+ __asm__ __volatile__( \
5897+ " lw %0, %1 \n" \
5898+ "2: " #asm_op " %0, %1, %2 \n" \
5899+ " sw %0, %1 \n" \
5900+ "3: \n" \
5901+ extable \
5902+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
5903+ : "Ir" (i)); \
5904 raw_local_irq_restore(flags); \
5905 } \
5906 \
5907@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5908 return result; \
5909 }
5910
5911-#define ATOMIC_OPS(op, c_op, asm_op) \
5912- ATOMIC_OP(op, c_op, asm_op) \
5913- ATOMIC_OP_RETURN(op, c_op, asm_op)
5914+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
5915+ __ATOMIC_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5916
5917-ATOMIC_OPS(add, +=, addu)
5918-ATOMIC_OPS(sub, -=, subu)
5919+#define ATOMIC_OPS(op, asm_op) \
5920+ ATOMIC_OP(op, asm_op) \
5921+ ATOMIC_OP_RETURN(op, asm_op)
5922+
5923+ATOMIC_OPS(add, add)
5924+ATOMIC_OPS(sub, sub)
5925
5926 #undef ATOMIC_OPS
5927 #undef ATOMIC_OP_RETURN
5928+#undef __ATOMIC_OP_RETURN
5929 #undef ATOMIC_OP
5930+#undef __ATOMIC_OP
5931
5932 /*
5933 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5934@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5935 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5936 * The function returns the old value of @v minus @i.
5937 */
5938-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5939+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5940 {
5941 int result;
5942
5943@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5944 return result;
5945 }
5946
5947-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5948-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5949+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5950+{
5951+ return cmpxchg(&v->counter, old, new);
5952+}
5953+
5954+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5955+ int new)
5956+{
5957+ return cmpxchg(&(v->counter), old, new);
5958+}
5959+
5960+static inline int atomic_xchg(atomic_t *v, int new)
5961+{
5962+ return xchg(&v->counter, new);
5963+}
5964+
5965+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5966+{
5967+ return xchg(&(v->counter), new);
5968+}
5969
5970 /**
5971 * __atomic_add_unless - add unless the number is a given value
5972@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5973
5974 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5975 #define atomic_inc_return(v) atomic_add_return(1, (v))
5976+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5977+{
5978+ return atomic_add_return_unchecked(1, v);
5979+}
5980
5981 /*
5982 * atomic_sub_and_test - subtract value from variable and test result
5983@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5984 * other cases.
5985 */
5986 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5987+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5988+{
5989+ return atomic_add_return_unchecked(1, v) == 0;
5990+}
5991
5992 /*
5993 * atomic_dec_and_test - decrement by 1 and test
5994@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5995 * Atomically increments @v by 1.
5996 */
5997 #define atomic_inc(v) atomic_add(1, (v))
5998+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
5999+{
6000+ atomic_add_unchecked(1, v);
6001+}
6002
6003 /*
6004 * atomic_dec - decrement and test
6005@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6006 * Atomically decrements @v by 1.
6007 */
6008 #define atomic_dec(v) atomic_sub(1, (v))
6009+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6010+{
6011+ atomic_sub_unchecked(1, v);
6012+}
6013
6014 /*
6015 * atomic_add_negative - add and test if negative
6016@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6017 * @v: pointer of type atomic64_t
6018 *
6019 */
6020-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6021+static inline long atomic64_read(const atomic64_t *v)
6022+{
6023+ return ACCESS_ONCE(v->counter);
6024+}
6025+
6026+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6027+{
6028+ return ACCESS_ONCE(v->counter);
6029+}
6030
6031 /*
6032 * atomic64_set - set atomic variable
6033 * @v: pointer of type atomic64_t
6034 * @i: required value
6035 */
6036-#define atomic64_set(v, i) ((v)->counter = (i))
6037+static inline void atomic64_set(atomic64_t *v, long i)
6038+{
6039+ v->counter = i;
6040+}
6041
6042-#define ATOMIC64_OP(op, c_op, asm_op) \
6043-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6044+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6045+{
6046+ v->counter = i;
6047+}
6048+
6049+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6050+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6051 { \
6052 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6053 long temp; \
6054 \
6055 __asm__ __volatile__( \
6056- " .set arch=r4000 \n" \
6057- "1: lld %0, %1 # atomic64_" #op " \n" \
6058- " " #asm_op " %0, %2 \n" \
6059+ " .set mips3 \n" \
6060+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6061+ "2: " #asm_op " %0, %2 \n" \
6062 " scd %0, %1 \n" \
6063 " beqzl %0, 1b \n" \
6064+ extable \
6065 " .set mips0 \n" \
6066 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6067 : "Ir" (i)); \
6068 } else if (kernel_uses_llsc) { \
6069 long temp; \
6070 \
6071- do { \
6072- __asm__ __volatile__( \
6073- " .set arch=r4000 \n" \
6074- " lld %0, %1 # atomic64_" #op "\n" \
6075- " " #asm_op " %0, %2 \n" \
6076- " scd %0, %1 \n" \
6077- " .set mips0 \n" \
6078- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6079- : "Ir" (i)); \
6080- } while (unlikely(!temp)); \
6081+ __asm__ __volatile__( \
6082+ " .set mips3 \n" \
6083+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6084+ "2: " #asm_op " %0, %2 \n" \
6085+ " scd %0, %1 \n" \
6086+ " beqz %0, 1b \n" \
6087+ extable \
6088+ " .set mips0 \n" \
6089+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6090+ : "Ir" (i)); \
6091 } else { \
6092 unsigned long flags; \
6093 \
6094 raw_local_irq_save(flags); \
6095- v->counter c_op i; \
6096+ __asm__ __volatile__( \
6097+ "2: " #asm_op " %0, %1 \n" \
6098+ extable \
6099+ : "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); \
6100 raw_local_irq_restore(flags); \
6101 } \
6102 }
6103
6104-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6105-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6106+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , asm_op##u) \
6107+ __ATOMIC64_OP(op, _unchecked, asm_op)
6108+
6109+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6110+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6111 { \
6112 long result; \
6113 \
6114@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6115 long temp; \
6116 \
6117 __asm__ __volatile__( \
6118- " .set arch=r4000 \n" \
6119+ " .set mips3 \n" \
6120 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6121- " " #asm_op " %0, %1, %3 \n" \
6122+ "2: " #asm_op " %0, %1, %3 \n" \
6123 " scd %0, %2 \n" \
6124 " beqzl %0, 1b \n" \
6125- " " #asm_op " %0, %1, %3 \n" \
6126+ post_op \
6127+ extable \
6128+ "4: " #asm_op " %0, %1, %3 \n" \
6129+ "5: \n" \
6130 " .set mips0 \n" \
6131 : "=&r" (result), "=&r" (temp), \
6132 "+" GCC_OFF12_ASM() (v->counter) \
6133@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6134 } else if (kernel_uses_llsc) { \
6135 long temp; \
6136 \
6137- do { \
6138- __asm__ __volatile__( \
6139- " .set arch=r4000 \n" \
6140- " lld %1, %2 # atomic64_" #op "_return\n" \
6141- " " #asm_op " %0, %1, %3 \n" \
6142- " scd %0, %2 \n" \
6143- " .set mips0 \n" \
6144- : "=&r" (result), "=&r" (temp), \
6145- "=" GCC_OFF12_ASM() (v->counter) \
6146- : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6147- : "memory"); \
6148- } while (unlikely(!result)); \
6149+ __asm__ __volatile__( \
6150+ " .set mips3 \n" \
6151+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6152+ "2: " #asm_op " %0, %1, %3 \n" \
6153+ " scd %0, %2 \n" \
6154+ " beqz %0, 1b \n" \
6155+ post_op \
6156+ extable \
6157+ "4: " #asm_op " %0, %1, %3 \n" \
6158+ "5: \n" \
6159+ " .set mips0 \n" \
6160+ : "=&r" (result), "=&r" (temp), \
6161+ "=" GCC_OFF12_ASM() (v->counter) \
6162+ : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6163+ : "memory"); \
6164 \
6165 result = temp; result c_op i; \
6166 } else { \
6167 unsigned long flags; \
6168 \
6169 raw_local_irq_save(flags); \
6170- result = v->counter; \
6171- result c_op i; \
6172- v->counter = result; \
6173+ __asm__ __volatile__( \
6174+ " ld %0, %1 \n" \
6175+ "2: " #asm_op " %0, %1, %2 \n" \
6176+ " sd %0, %1 \n" \
6177+ "3: \n" \
6178+ extable \
6179+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
6180+ : "Ir" (i)); \
6181 raw_local_irq_restore(flags); \
6182 } \
6183 \
6184@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6185 return result; \
6186 }
6187
6188-#define ATOMIC64_OPS(op, c_op, asm_op) \
6189- ATOMIC64_OP(op, c_op, asm_op) \
6190- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6191+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
6192+ __ATOMIC64_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6193
6194-ATOMIC64_OPS(add, +=, daddu)
6195-ATOMIC64_OPS(sub, -=, dsubu)
6196+#define ATOMIC64_OPS(op, asm_op) \
6197+ ATOMIC64_OP(op, asm_op) \
6198+ ATOMIC64_OP_RETURN(op, asm_op)
6199+
6200+ATOMIC64_OPS(add, dadd)
6201+ATOMIC64_OPS(sub, dsub)
6202
6203 #undef ATOMIC64_OPS
6204 #undef ATOMIC64_OP_RETURN
6205+#undef __ATOMIC64_OP_RETURN
6206 #undef ATOMIC64_OP
6207+#undef __ATOMIC64_OP
6208+#undef __OVERFLOW_EXTABLE
6209+#undef __OVERFLOW_POST
6210
6211 /*
6212 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6213@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6214 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6215 * The function returns the old value of @v minus @i.
6216 */
6217-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6218+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6219 {
6220 long result;
6221
6222@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6223 return result;
6224 }
6225
6226-#define atomic64_cmpxchg(v, o, n) \
6227- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6228-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6229+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6230+{
6231+ return cmpxchg(&v->counter, old, new);
6232+}
6233+
6234+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6235+ long new)
6236+{
6237+ return cmpxchg(&(v->counter), old, new);
6238+}
6239+
6240+static inline long atomic64_xchg(atomic64_t *v, long new)
6241+{
6242+ return xchg(&v->counter, new);
6243+}
6244+
6245+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6246+{
6247+ return xchg(&(v->counter), new);
6248+}
6249
6250 /**
6251 * atomic64_add_unless - add unless the number is a given value
6252@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6253
6254 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6255 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6256+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6257
6258 /*
6259 * atomic64_sub_and_test - subtract value from variable and test result
6260@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6261 * other cases.
6262 */
6263 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6264+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6265
6266 /*
6267 * atomic64_dec_and_test - decrement by 1 and test
6268@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6269 * Atomically increments @v by 1.
6270 */
6271 #define atomic64_inc(v) atomic64_add(1, (v))
6272+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6273
6274 /*
6275 * atomic64_dec - decrement and test
6276@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6277 * Atomically decrements @v by 1.
6278 */
6279 #define atomic64_dec(v) atomic64_sub(1, (v))
6280+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6281
6282 /*
6283 * atomic64_add_negative - add and test if negative
6284diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6285index 2b8bbbc..4556df6 100644
6286--- a/arch/mips/include/asm/barrier.h
6287+++ b/arch/mips/include/asm/barrier.h
6288@@ -133,7 +133,7 @@
6289 do { \
6290 compiletime_assert_atomic_type(*p); \
6291 smp_mb(); \
6292- ACCESS_ONCE(*p) = (v); \
6293+ ACCESS_ONCE_RW(*p) = (v); \
6294 } while (0)
6295
6296 #define smp_load_acquire(p) \
6297diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6298index b4db69f..8f3b093 100644
6299--- a/arch/mips/include/asm/cache.h
6300+++ b/arch/mips/include/asm/cache.h
6301@@ -9,10 +9,11 @@
6302 #ifndef _ASM_CACHE_H
6303 #define _ASM_CACHE_H
6304
6305+#include <linux/const.h>
6306 #include <kmalloc.h>
6307
6308 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6309-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6310+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6311
6312 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6313 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6314diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6315index eb4d95d..f2f7f93 100644
6316--- a/arch/mips/include/asm/elf.h
6317+++ b/arch/mips/include/asm/elf.h
6318@@ -405,15 +405,18 @@ extern const char *__elf_platform;
6319 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6320 #endif
6321
6322+#ifdef CONFIG_PAX_ASLR
6323+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6324+
6325+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6326+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6327+#endif
6328+
6329 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6330 struct linux_binprm;
6331 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6332 int uses_interp);
6333
6334-struct mm_struct;
6335-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6336-#define arch_randomize_brk arch_randomize_brk
6337-
6338 struct arch_elf_state {
6339 int fp_abi;
6340 int interp_fp_abi;
6341diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6342index c1f6afa..38cc6e9 100644
6343--- a/arch/mips/include/asm/exec.h
6344+++ b/arch/mips/include/asm/exec.h
6345@@ -12,6 +12,6 @@
6346 #ifndef _ASM_EXEC_H
6347 #define _ASM_EXEC_H
6348
6349-extern unsigned long arch_align_stack(unsigned long sp);
6350+#define arch_align_stack(x) ((x) & ~0xfUL)
6351
6352 #endif /* _ASM_EXEC_H */
6353diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6354index 9e8ef59..1139d6b 100644
6355--- a/arch/mips/include/asm/hw_irq.h
6356+++ b/arch/mips/include/asm/hw_irq.h
6357@@ -10,7 +10,7 @@
6358
6359 #include <linux/atomic.h>
6360
6361-extern atomic_t irq_err_count;
6362+extern atomic_unchecked_t irq_err_count;
6363
6364 /*
6365 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6366diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6367index 46dfc3c..a16b13a 100644
6368--- a/arch/mips/include/asm/local.h
6369+++ b/arch/mips/include/asm/local.h
6370@@ -12,15 +12,25 @@ typedef struct
6371 atomic_long_t a;
6372 } local_t;
6373
6374+typedef struct {
6375+ atomic_long_unchecked_t a;
6376+} local_unchecked_t;
6377+
6378 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6379
6380 #define local_read(l) atomic_long_read(&(l)->a)
6381+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6382 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6383+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6384
6385 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6386+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6387 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6388+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6389 #define local_inc(l) atomic_long_inc(&(l)->a)
6390+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6391 #define local_dec(l) atomic_long_dec(&(l)->a)
6392+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6393
6394 /*
6395 * Same as above, but return the result value
6396@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6397 return result;
6398 }
6399
6400+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6401+{
6402+ unsigned long result;
6403+
6404+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6405+ unsigned long temp;
6406+
6407+ __asm__ __volatile__(
6408+ " .set mips3 \n"
6409+ "1:" __LL "%1, %2 # local_add_return \n"
6410+ " addu %0, %1, %3 \n"
6411+ __SC "%0, %2 \n"
6412+ " beqzl %0, 1b \n"
6413+ " addu %0, %1, %3 \n"
6414+ " .set mips0 \n"
6415+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6416+ : "Ir" (i), "m" (l->a.counter)
6417+ : "memory");
6418+ } else if (kernel_uses_llsc) {
6419+ unsigned long temp;
6420+
6421+ __asm__ __volatile__(
6422+ " .set mips3 \n"
6423+ "1:" __LL "%1, %2 # local_add_return \n"
6424+ " addu %0, %1, %3 \n"
6425+ __SC "%0, %2 \n"
6426+ " beqz %0, 1b \n"
6427+ " addu %0, %1, %3 \n"
6428+ " .set mips0 \n"
6429+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6430+ : "Ir" (i), "m" (l->a.counter)
6431+ : "memory");
6432+ } else {
6433+ unsigned long flags;
6434+
6435+ local_irq_save(flags);
6436+ result = l->a.counter;
6437+ result += i;
6438+ l->a.counter = result;
6439+ local_irq_restore(flags);
6440+ }
6441+
6442+ return result;
6443+}
6444+
6445 static __inline__ long local_sub_return(long i, local_t * l)
6446 {
6447 unsigned long result;
6448@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6449
6450 #define local_cmpxchg(l, o, n) \
6451 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6452+#define local_cmpxchg_unchecked(l, o, n) \
6453+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6454 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6455
6456 /**
6457diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6458index 154b70a..426ae3d 100644
6459--- a/arch/mips/include/asm/page.h
6460+++ b/arch/mips/include/asm/page.h
6461@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6462 #ifdef CONFIG_CPU_MIPS32
6463 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6464 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6465- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6466+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6467 #else
6468 typedef struct { unsigned long long pte; } pte_t;
6469 #define pte_val(x) ((x).pte)
6470diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6471index b336037..5b874cc 100644
6472--- a/arch/mips/include/asm/pgalloc.h
6473+++ b/arch/mips/include/asm/pgalloc.h
6474@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6475 {
6476 set_pud(pud, __pud((unsigned long)pmd));
6477 }
6478+
6479+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6480+{
6481+ pud_populate(mm, pud, pmd);
6482+}
6483 #endif
6484
6485 /*
6486diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6487index 845016d..3303268 100644
6488--- a/arch/mips/include/asm/pgtable.h
6489+++ b/arch/mips/include/asm/pgtable.h
6490@@ -20,6 +20,9 @@
6491 #include <asm/io.h>
6492 #include <asm/pgtable-bits.h>
6493
6494+#define ktla_ktva(addr) (addr)
6495+#define ktva_ktla(addr) (addr)
6496+
6497 struct mm_struct;
6498 struct vm_area_struct;
6499
6500diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6501index e4440f9..8fb0005 100644
6502--- a/arch/mips/include/asm/thread_info.h
6503+++ b/arch/mips/include/asm/thread_info.h
6504@@ -106,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6505 #define TIF_SECCOMP 4 /* secure computing */
6506 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6507 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6508+/* li takes a 32bit immediate */
6509+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6510+
6511 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6512 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6513 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6514@@ -141,14 +144,16 @@ static inline struct thread_info *current_thread_info(void)
6515 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6516 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6517 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6518+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6519
6520 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6521 _TIF_SYSCALL_AUDIT | \
6522- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6523+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6524+ _TIF_GRSEC_SETXID)
6525
6526 /* work to do in syscall_trace_leave() */
6527 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6528- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6529+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6530
6531 /* work to do on interrupt/exception return */
6532 #define _TIF_WORK_MASK \
6533@@ -156,7 +161,7 @@ static inline struct thread_info *current_thread_info(void)
6534 /* work to do on any return to u-space */
6535 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6536 _TIF_WORK_SYSCALL_EXIT | \
6537- _TIF_SYSCALL_TRACEPOINT)
6538+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6539
6540 /*
6541 * We stash processor id into a COP0 register to retrieve it fast
6542diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6543index bf8b324..cec5705 100644
6544--- a/arch/mips/include/asm/uaccess.h
6545+++ b/arch/mips/include/asm/uaccess.h
6546@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6547 __ok == 0; \
6548 })
6549
6550+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6551 #define access_ok(type, addr, size) \
6552 likely(__access_ok((addr), (size), __access_mask))
6553
6554diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6555index 1188e00..41cf144 100644
6556--- a/arch/mips/kernel/binfmt_elfn32.c
6557+++ b/arch/mips/kernel/binfmt_elfn32.c
6558@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6559 #undef ELF_ET_DYN_BASE
6560 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6561
6562+#ifdef CONFIG_PAX_ASLR
6563+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6564+
6565+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6566+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6567+#endif
6568+
6569 #include <asm/processor.h>
6570 #include <linux/module.h>
6571 #include <linux/elfcore.h>
6572diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6573index 9287678..f870e47 100644
6574--- a/arch/mips/kernel/binfmt_elfo32.c
6575+++ b/arch/mips/kernel/binfmt_elfo32.c
6576@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6577 #undef ELF_ET_DYN_BASE
6578 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6579
6580+#ifdef CONFIG_PAX_ASLR
6581+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6582+
6583+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6584+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6585+#endif
6586+
6587 #include <asm/processor.h>
6588
6589 #include <linux/module.h>
6590diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6591index a74ec3a..4f06f18 100644
6592--- a/arch/mips/kernel/i8259.c
6593+++ b/arch/mips/kernel/i8259.c
6594@@ -202,7 +202,7 @@ spurious_8259A_irq:
6595 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6596 spurious_irq_mask |= irqmask;
6597 }
6598- atomic_inc(&irq_err_count);
6599+ atomic_inc_unchecked(&irq_err_count);
6600 /*
6601 * Theoretically we do not have to handle this IRQ,
6602 * but in Linux this does not cause problems and is
6603diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6604index 44a1f79..2bd6aa3 100644
6605--- a/arch/mips/kernel/irq-gt641xx.c
6606+++ b/arch/mips/kernel/irq-gt641xx.c
6607@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6608 }
6609 }
6610
6611- atomic_inc(&irq_err_count);
6612+ atomic_inc_unchecked(&irq_err_count);
6613 }
6614
6615 void __init gt641xx_irq_init(void)
6616diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6617index d2bfbc2..a8eacd2 100644
6618--- a/arch/mips/kernel/irq.c
6619+++ b/arch/mips/kernel/irq.c
6620@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6621 printk("unexpected IRQ # %d\n", irq);
6622 }
6623
6624-atomic_t irq_err_count;
6625+atomic_unchecked_t irq_err_count;
6626
6627 int arch_show_interrupts(struct seq_file *p, int prec)
6628 {
6629- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6630+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6631 return 0;
6632 }
6633
6634 asmlinkage void spurious_interrupt(void)
6635 {
6636- atomic_inc(&irq_err_count);
6637+ atomic_inc_unchecked(&irq_err_count);
6638 }
6639
6640 void __init init_IRQ(void)
6641@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6642 #endif
6643 }
6644
6645+
6646 #ifdef DEBUG_STACKOVERFLOW
6647+extern void gr_handle_kernel_exploit(void);
6648+
6649 static inline void check_stack_overflow(void)
6650 {
6651 unsigned long sp;
6652@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6653 printk("do_IRQ: stack overflow: %ld\n",
6654 sp - sizeof(struct thread_info));
6655 dump_stack();
6656+ gr_handle_kernel_exploit();
6657 }
6658 }
6659 #else
6660diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6661index 0614717..002fa43 100644
6662--- a/arch/mips/kernel/pm-cps.c
6663+++ b/arch/mips/kernel/pm-cps.c
6664@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6665 nc_core_ready_count = nc_addr;
6666
6667 /* Ensure ready_count is zero-initialised before the assembly runs */
6668- ACCESS_ONCE(*nc_core_ready_count) = 0;
6669+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6670 coupled_barrier(&per_cpu(pm_barrier, core), online);
6671
6672 /* Run the generated entry code */
6673diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6674index 85bff5d..39bc202 100644
6675--- a/arch/mips/kernel/process.c
6676+++ b/arch/mips/kernel/process.c
6677@@ -534,18 +534,6 @@ out:
6678 return pc;
6679 }
6680
6681-/*
6682- * Don't forget that the stack pointer must be aligned on a 8 bytes
6683- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6684- */
6685-unsigned long arch_align_stack(unsigned long sp)
6686-{
6687- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6688- sp -= get_random_int() & ~PAGE_MASK;
6689-
6690- return sp & ALMASK;
6691-}
6692-
6693 static void arch_dump_stack(void *info)
6694 {
6695 struct pt_regs *regs;
6696diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6697index 5104528..950bbdc 100644
6698--- a/arch/mips/kernel/ptrace.c
6699+++ b/arch/mips/kernel/ptrace.c
6700@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6701 return ret;
6702 }
6703
6704+#ifdef CONFIG_GRKERNSEC_SETXID
6705+extern void gr_delayed_cred_worker(void);
6706+#endif
6707+
6708 /*
6709 * Notification of system call entry/exit
6710 * - triggered by current->work.syscall_trace
6711@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6712 tracehook_report_syscall_entry(regs))
6713 ret = -1;
6714
6715+#ifdef CONFIG_GRKERNSEC_SETXID
6716+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6717+ gr_delayed_cred_worker();
6718+#endif
6719+
6720 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6721 trace_sys_enter(regs, regs->regs[2]);
6722
6723diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6724index 07fc524..b9d7f28 100644
6725--- a/arch/mips/kernel/reset.c
6726+++ b/arch/mips/kernel/reset.c
6727@@ -13,6 +13,7 @@
6728 #include <linux/reboot.h>
6729
6730 #include <asm/reboot.h>
6731+#include <asm/bug.h>
6732
6733 /*
6734 * Urgs ... Too many MIPS machines to handle this in a generic way.
6735@@ -29,16 +30,19 @@ void machine_restart(char *command)
6736 {
6737 if (_machine_restart)
6738 _machine_restart(command);
6739+ BUG();
6740 }
6741
6742 void machine_halt(void)
6743 {
6744 if (_machine_halt)
6745 _machine_halt();
6746+ BUG();
6747 }
6748
6749 void machine_power_off(void)
6750 {
6751 if (pm_power_off)
6752 pm_power_off();
6753+ BUG();
6754 }
6755diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6756index 2242bdd..b284048 100644
6757--- a/arch/mips/kernel/sync-r4k.c
6758+++ b/arch/mips/kernel/sync-r4k.c
6759@@ -18,8 +18,8 @@
6760 #include <asm/mipsregs.h>
6761
6762 static atomic_t count_start_flag = ATOMIC_INIT(0);
6763-static atomic_t count_count_start = ATOMIC_INIT(0);
6764-static atomic_t count_count_stop = ATOMIC_INIT(0);
6765+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6766+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6767 static atomic_t count_reference = ATOMIC_INIT(0);
6768
6769 #define COUNTON 100
6770@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6771
6772 for (i = 0; i < NR_LOOPS; i++) {
6773 /* slaves loop on '!= 2' */
6774- while (atomic_read(&count_count_start) != 1)
6775+ while (atomic_read_unchecked(&count_count_start) != 1)
6776 mb();
6777- atomic_set(&count_count_stop, 0);
6778+ atomic_set_unchecked(&count_count_stop, 0);
6779 smp_wmb();
6780
6781 /* this lets the slaves write their count register */
6782- atomic_inc(&count_count_start);
6783+ atomic_inc_unchecked(&count_count_start);
6784
6785 /*
6786 * Everyone initialises count in the last loop:
6787@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6788 /*
6789 * Wait for all slaves to leave the synchronization point:
6790 */
6791- while (atomic_read(&count_count_stop) != 1)
6792+ while (atomic_read_unchecked(&count_count_stop) != 1)
6793 mb();
6794- atomic_set(&count_count_start, 0);
6795+ atomic_set_unchecked(&count_count_start, 0);
6796 smp_wmb();
6797- atomic_inc(&count_count_stop);
6798+ atomic_inc_unchecked(&count_count_stop);
6799 }
6800 /* Arrange for an interrupt in a short while */
6801 write_c0_compare(read_c0_count() + COUNTON);
6802@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6803 initcount = atomic_read(&count_reference);
6804
6805 for (i = 0; i < NR_LOOPS; i++) {
6806- atomic_inc(&count_count_start);
6807- while (atomic_read(&count_count_start) != 2)
6808+ atomic_inc_unchecked(&count_count_start);
6809+ while (atomic_read_unchecked(&count_count_start) != 2)
6810 mb();
6811
6812 /*
6813@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6814 if (i == NR_LOOPS-1)
6815 write_c0_count(initcount);
6816
6817- atomic_inc(&count_count_stop);
6818- while (atomic_read(&count_count_stop) != 2)
6819+ atomic_inc_unchecked(&count_count_stop);
6820+ while (atomic_read_unchecked(&count_count_stop) != 2)
6821 mb();
6822 }
6823 /* Arrange for an interrupt in a short while */
6824diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6825index c3b41e2..46c32e9 100644
6826--- a/arch/mips/kernel/traps.c
6827+++ b/arch/mips/kernel/traps.c
6828@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6829 siginfo_t info;
6830
6831 prev_state = exception_enter();
6832- die_if_kernel("Integer overflow", regs);
6833+ if (unlikely(!user_mode(regs))) {
6834+
6835+#ifdef CONFIG_PAX_REFCOUNT
6836+ if (fixup_exception(regs)) {
6837+ pax_report_refcount_overflow(regs);
6838+ exception_exit(prev_state);
6839+ return;
6840+ }
6841+#endif
6842+
6843+ die("Integer overflow", regs);
6844+ }
6845
6846 info.si_code = FPE_INTOVF;
6847 info.si_signo = SIGFPE;
6848diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6849index 270bbd4..c01932a 100644
6850--- a/arch/mips/kvm/mips.c
6851+++ b/arch/mips/kvm/mips.c
6852@@ -815,7 +815,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6853 return r;
6854 }
6855
6856-int kvm_arch_init(void *opaque)
6857+int kvm_arch_init(const void *opaque)
6858 {
6859 if (kvm_mips_callbacks) {
6860 kvm_err("kvm: module already exists\n");
6861diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6862index 70ab5d6..62940fe 100644
6863--- a/arch/mips/mm/fault.c
6864+++ b/arch/mips/mm/fault.c
6865@@ -28,6 +28,23 @@
6866 #include <asm/highmem.h> /* For VMALLOC_END */
6867 #include <linux/kdebug.h>
6868
6869+#ifdef CONFIG_PAX_PAGEEXEC
6870+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6871+{
6872+ unsigned long i;
6873+
6874+ printk(KERN_ERR "PAX: bytes at PC: ");
6875+ for (i = 0; i < 5; i++) {
6876+ unsigned int c;
6877+ if (get_user(c, (unsigned int *)pc+i))
6878+ printk(KERN_CONT "???????? ");
6879+ else
6880+ printk(KERN_CONT "%08x ", c);
6881+ }
6882+ printk("\n");
6883+}
6884+#endif
6885+
6886 /*
6887 * This routine handles page faults. It determines the address,
6888 * and the problem, and then passes it off to one of the appropriate
6889@@ -201,6 +218,14 @@ bad_area:
6890 bad_area_nosemaphore:
6891 /* User mode accesses just cause a SIGSEGV */
6892 if (user_mode(regs)) {
6893+
6894+#ifdef CONFIG_PAX_PAGEEXEC
6895+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6896+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6897+ do_group_exit(SIGKILL);
6898+ }
6899+#endif
6900+
6901 tsk->thread.cp0_badvaddr = address;
6902 tsk->thread.error_code = write;
6903 #if 0
6904diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6905index f1baadd..5472dca 100644
6906--- a/arch/mips/mm/mmap.c
6907+++ b/arch/mips/mm/mmap.c
6908@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6909 struct vm_area_struct *vma;
6910 unsigned long addr = addr0;
6911 int do_color_align;
6912+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6913 struct vm_unmapped_area_info info;
6914
6915 if (unlikely(len > TASK_SIZE))
6916@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6917 do_color_align = 1;
6918
6919 /* requesting a specific address */
6920+
6921+#ifdef CONFIG_PAX_RANDMMAP
6922+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6923+#endif
6924+
6925 if (addr) {
6926 if (do_color_align)
6927 addr = COLOUR_ALIGN(addr, pgoff);
6928@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6929 addr = PAGE_ALIGN(addr);
6930
6931 vma = find_vma(mm, addr);
6932- if (TASK_SIZE - len >= addr &&
6933- (!vma || addr + len <= vma->vm_start))
6934+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6935 return addr;
6936 }
6937
6938 info.length = len;
6939 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6940 info.align_offset = pgoff << PAGE_SHIFT;
6941+ info.threadstack_offset = offset;
6942
6943 if (dir == DOWN) {
6944 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6945@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6946 {
6947 unsigned long random_factor = 0UL;
6948
6949+#ifdef CONFIG_PAX_RANDMMAP
6950+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6951+#endif
6952+
6953 if (current->flags & PF_RANDOMIZE) {
6954 random_factor = get_random_int();
6955 random_factor = random_factor << PAGE_SHIFT;
6956@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6957
6958 if (mmap_is_legacy()) {
6959 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6960+
6961+#ifdef CONFIG_PAX_RANDMMAP
6962+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6963+ mm->mmap_base += mm->delta_mmap;
6964+#endif
6965+
6966 mm->get_unmapped_area = arch_get_unmapped_area;
6967 } else {
6968 mm->mmap_base = mmap_base(random_factor);
6969+
6970+#ifdef CONFIG_PAX_RANDMMAP
6971+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6972+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6973+#endif
6974+
6975 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6976 }
6977 }
6978
6979-static inline unsigned long brk_rnd(void)
6980-{
6981- unsigned long rnd = get_random_int();
6982-
6983- rnd = rnd << PAGE_SHIFT;
6984- /* 8MB for 32bit, 256MB for 64bit */
6985- if (TASK_IS_32BIT_ADDR)
6986- rnd = rnd & 0x7ffffful;
6987- else
6988- rnd = rnd & 0xffffffful;
6989-
6990- return rnd;
6991-}
6992-
6993-unsigned long arch_randomize_brk(struct mm_struct *mm)
6994-{
6995- unsigned long base = mm->brk;
6996- unsigned long ret;
6997-
6998- ret = PAGE_ALIGN(base + brk_rnd());
6999-
7000- if (ret < mm->brk)
7001- return mm->brk;
7002-
7003- return ret;
7004-}
7005-
7006 int __virt_addr_valid(const volatile void *kaddr)
7007 {
7008 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7009diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7010index d07e041..bedb72b 100644
7011--- a/arch/mips/pci/pci-octeon.c
7012+++ b/arch/mips/pci/pci-octeon.c
7013@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7014
7015
7016 static struct pci_ops octeon_pci_ops = {
7017- octeon_read_config,
7018- octeon_write_config,
7019+ .read = octeon_read_config,
7020+ .write = octeon_write_config,
7021 };
7022
7023 static struct resource octeon_pci_mem_resource = {
7024diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7025index 5e36c33..eb4a17b 100644
7026--- a/arch/mips/pci/pcie-octeon.c
7027+++ b/arch/mips/pci/pcie-octeon.c
7028@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7029 }
7030
7031 static struct pci_ops octeon_pcie0_ops = {
7032- octeon_pcie0_read_config,
7033- octeon_pcie0_write_config,
7034+ .read = octeon_pcie0_read_config,
7035+ .write = octeon_pcie0_write_config,
7036 };
7037
7038 static struct resource octeon_pcie0_mem_resource = {
7039@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7040 };
7041
7042 static struct pci_ops octeon_pcie1_ops = {
7043- octeon_pcie1_read_config,
7044- octeon_pcie1_write_config,
7045+ .read = octeon_pcie1_read_config,
7046+ .write = octeon_pcie1_write_config,
7047 };
7048
7049 static struct resource octeon_pcie1_mem_resource = {
7050@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7051 };
7052
7053 static struct pci_ops octeon_dummy_ops = {
7054- octeon_dummy_read_config,
7055- octeon_dummy_write_config,
7056+ .read = octeon_dummy_read_config,
7057+ .write = octeon_dummy_write_config,
7058 };
7059
7060 static struct resource octeon_dummy_mem_resource = {
7061diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7062index a2358b4..7cead4f 100644
7063--- a/arch/mips/sgi-ip27/ip27-nmi.c
7064+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7065@@ -187,9 +187,9 @@ void
7066 cont_nmi_dump(void)
7067 {
7068 #ifndef REAL_NMI_SIGNAL
7069- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7070+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7071
7072- atomic_inc(&nmied_cpus);
7073+ atomic_inc_unchecked(&nmied_cpus);
7074 #endif
7075 /*
7076 * Only allow 1 cpu to proceed
7077@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7078 udelay(10000);
7079 }
7080 #else
7081- while (atomic_read(&nmied_cpus) != num_online_cpus());
7082+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7083 #endif
7084
7085 /*
7086diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7087index a046b30..6799527 100644
7088--- a/arch/mips/sni/rm200.c
7089+++ b/arch/mips/sni/rm200.c
7090@@ -270,7 +270,7 @@ spurious_8259A_irq:
7091 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7092 spurious_irq_mask |= irqmask;
7093 }
7094- atomic_inc(&irq_err_count);
7095+ atomic_inc_unchecked(&irq_err_count);
7096 /*
7097 * Theoretically we do not have to handle this IRQ,
7098 * but in Linux this does not cause problems and is
7099diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7100index 41e873b..34d33a7 100644
7101--- a/arch/mips/vr41xx/common/icu.c
7102+++ b/arch/mips/vr41xx/common/icu.c
7103@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7104
7105 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7106
7107- atomic_inc(&irq_err_count);
7108+ atomic_inc_unchecked(&irq_err_count);
7109
7110 return -1;
7111 }
7112diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7113index ae0e4ee..e8f0692 100644
7114--- a/arch/mips/vr41xx/common/irq.c
7115+++ b/arch/mips/vr41xx/common/irq.c
7116@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7117 irq_cascade_t *cascade;
7118
7119 if (irq >= NR_IRQS) {
7120- atomic_inc(&irq_err_count);
7121+ atomic_inc_unchecked(&irq_err_count);
7122 return;
7123 }
7124
7125@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7126 ret = cascade->get_irq(irq);
7127 irq = ret;
7128 if (ret < 0)
7129- atomic_inc(&irq_err_count);
7130+ atomic_inc_unchecked(&irq_err_count);
7131 else
7132 irq_dispatch(irq);
7133 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7134diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7135index 967d144..db12197 100644
7136--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7137+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7138@@ -11,12 +11,14 @@
7139 #ifndef _ASM_PROC_CACHE_H
7140 #define _ASM_PROC_CACHE_H
7141
7142+#include <linux/const.h>
7143+
7144 /* L1 cache */
7145
7146 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7147 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7148-#define L1_CACHE_BYTES 16 /* bytes per entry */
7149 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7150+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7151 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7152
7153 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7154diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7155index bcb5df2..84fabd2 100644
7156--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7157+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7158@@ -16,13 +16,15 @@
7159 #ifndef _ASM_PROC_CACHE_H
7160 #define _ASM_PROC_CACHE_H
7161
7162+#include <linux/const.h>
7163+
7164 /*
7165 * L1 cache
7166 */
7167 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7168 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7169-#define L1_CACHE_BYTES 32 /* bytes per entry */
7170 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7171+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7172 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7173
7174 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7175diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7176index 4ce7a01..449202a 100644
7177--- a/arch/openrisc/include/asm/cache.h
7178+++ b/arch/openrisc/include/asm/cache.h
7179@@ -19,11 +19,13 @@
7180 #ifndef __ASM_OPENRISC_CACHE_H
7181 #define __ASM_OPENRISC_CACHE_H
7182
7183+#include <linux/const.h>
7184+
7185 /* FIXME: How can we replace these with values from the CPU...
7186 * they shouldn't be hard-coded!
7187 */
7188
7189-#define L1_CACHE_BYTES 16
7190 #define L1_CACHE_SHIFT 4
7191+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7192
7193 #endif /* __ASM_OPENRISC_CACHE_H */
7194diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7195index 226f8ca..9d9b87d 100644
7196--- a/arch/parisc/include/asm/atomic.h
7197+++ b/arch/parisc/include/asm/atomic.h
7198@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7199 return dec;
7200 }
7201
7202+#define atomic64_read_unchecked(v) atomic64_read(v)
7203+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7204+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7205+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7206+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7207+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7208+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7209+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7210+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7211+
7212 #endif /* !CONFIG_64BIT */
7213
7214
7215diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7216index 47f11c7..3420df2 100644
7217--- a/arch/parisc/include/asm/cache.h
7218+++ b/arch/parisc/include/asm/cache.h
7219@@ -5,6 +5,7 @@
7220 #ifndef __ARCH_PARISC_CACHE_H
7221 #define __ARCH_PARISC_CACHE_H
7222
7223+#include <linux/const.h>
7224
7225 /*
7226 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7227@@ -15,13 +16,13 @@
7228 * just ruin performance.
7229 */
7230 #ifdef CONFIG_PA20
7231-#define L1_CACHE_BYTES 64
7232 #define L1_CACHE_SHIFT 6
7233 #else
7234-#define L1_CACHE_BYTES 32
7235 #define L1_CACHE_SHIFT 5
7236 #endif
7237
7238+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7239+
7240 #ifndef __ASSEMBLY__
7241
7242 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7243diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7244index 3391d06..c23a2cc 100644
7245--- a/arch/parisc/include/asm/elf.h
7246+++ b/arch/parisc/include/asm/elf.h
7247@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7248
7249 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7250
7251+#ifdef CONFIG_PAX_ASLR
7252+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7253+
7254+#define PAX_DELTA_MMAP_LEN 16
7255+#define PAX_DELTA_STACK_LEN 16
7256+#endif
7257+
7258 /* This yields a mask that user programs can use to figure out what
7259 instruction set this CPU supports. This could be done in user space,
7260 but it's not easy, and we've already done it here. */
7261diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7262index f213f5b..0af3e8e 100644
7263--- a/arch/parisc/include/asm/pgalloc.h
7264+++ b/arch/parisc/include/asm/pgalloc.h
7265@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7266 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7267 }
7268
7269+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7270+{
7271+ pgd_populate(mm, pgd, pmd);
7272+}
7273+
7274 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7275 {
7276 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7277@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7278 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7279 #define pmd_free(mm, x) do { } while (0)
7280 #define pgd_populate(mm, pmd, pte) BUG()
7281+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7282
7283 #endif
7284
7285diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7286index 22b89d1..ce34230 100644
7287--- a/arch/parisc/include/asm/pgtable.h
7288+++ b/arch/parisc/include/asm/pgtable.h
7289@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7290 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7291 #define PAGE_COPY PAGE_EXECREAD
7292 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7293+
7294+#ifdef CONFIG_PAX_PAGEEXEC
7295+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7296+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7297+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7298+#else
7299+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7300+# define PAGE_COPY_NOEXEC PAGE_COPY
7301+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7302+#endif
7303+
7304 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7305 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7306 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7307diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7308index a5cb070..8604ddc 100644
7309--- a/arch/parisc/include/asm/uaccess.h
7310+++ b/arch/parisc/include/asm/uaccess.h
7311@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7312 const void __user *from,
7313 unsigned long n)
7314 {
7315- int sz = __compiletime_object_size(to);
7316+ size_t sz = __compiletime_object_size(to);
7317 int ret = -EFAULT;
7318
7319- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7320+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7321 ret = __copy_from_user(to, from, n);
7322 else
7323 copy_from_user_overflow();
7324diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7325index 5822e8e..bc5e638 100644
7326--- a/arch/parisc/kernel/module.c
7327+++ b/arch/parisc/kernel/module.c
7328@@ -98,16 +98,38 @@
7329
7330 /* three functions to determine where in the module core
7331 * or init pieces the location is */
7332+static inline int in_init_rx(struct module *me, void *loc)
7333+{
7334+ return (loc >= me->module_init_rx &&
7335+ loc < (me->module_init_rx + me->init_size_rx));
7336+}
7337+
7338+static inline int in_init_rw(struct module *me, void *loc)
7339+{
7340+ return (loc >= me->module_init_rw &&
7341+ loc < (me->module_init_rw + me->init_size_rw));
7342+}
7343+
7344 static inline int in_init(struct module *me, void *loc)
7345 {
7346- return (loc >= me->module_init &&
7347- loc <= (me->module_init + me->init_size));
7348+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7349+}
7350+
7351+static inline int in_core_rx(struct module *me, void *loc)
7352+{
7353+ return (loc >= me->module_core_rx &&
7354+ loc < (me->module_core_rx + me->core_size_rx));
7355+}
7356+
7357+static inline int in_core_rw(struct module *me, void *loc)
7358+{
7359+ return (loc >= me->module_core_rw &&
7360+ loc < (me->module_core_rw + me->core_size_rw));
7361 }
7362
7363 static inline int in_core(struct module *me, void *loc)
7364 {
7365- return (loc >= me->module_core &&
7366- loc <= (me->module_core + me->core_size));
7367+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7368 }
7369
7370 static inline int in_local(struct module *me, void *loc)
7371@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7372 }
7373
7374 /* align things a bit */
7375- me->core_size = ALIGN(me->core_size, 16);
7376- me->arch.got_offset = me->core_size;
7377- me->core_size += gots * sizeof(struct got_entry);
7378+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7379+ me->arch.got_offset = me->core_size_rw;
7380+ me->core_size_rw += gots * sizeof(struct got_entry);
7381
7382- me->core_size = ALIGN(me->core_size, 16);
7383- me->arch.fdesc_offset = me->core_size;
7384- me->core_size += fdescs * sizeof(Elf_Fdesc);
7385+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7386+ me->arch.fdesc_offset = me->core_size_rw;
7387+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7388
7389 me->arch.got_max = gots;
7390 me->arch.fdesc_max = fdescs;
7391@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7392
7393 BUG_ON(value == 0);
7394
7395- got = me->module_core + me->arch.got_offset;
7396+ got = me->module_core_rw + me->arch.got_offset;
7397 for (i = 0; got[i].addr; i++)
7398 if (got[i].addr == value)
7399 goto out;
7400@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7401 #ifdef CONFIG_64BIT
7402 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7403 {
7404- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7405+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7406
7407 if (!value) {
7408 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7409@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7410
7411 /* Create new one */
7412 fdesc->addr = value;
7413- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7414+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7415 return (Elf_Addr)fdesc;
7416 }
7417 #endif /* CONFIG_64BIT */
7418@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7419
7420 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7421 end = table + sechdrs[me->arch.unwind_section].sh_size;
7422- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7423+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7424
7425 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7426 me->arch.unwind_section, table, end, gp);
7427diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7428index e1ffea2..46ed66e 100644
7429--- a/arch/parisc/kernel/sys_parisc.c
7430+++ b/arch/parisc/kernel/sys_parisc.c
7431@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7432 unsigned long task_size = TASK_SIZE;
7433 int do_color_align, last_mmap;
7434 struct vm_unmapped_area_info info;
7435+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7436
7437 if (len > task_size)
7438 return -ENOMEM;
7439@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7440 goto found_addr;
7441 }
7442
7443+#ifdef CONFIG_PAX_RANDMMAP
7444+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7445+#endif
7446+
7447 if (addr) {
7448 if (do_color_align && last_mmap)
7449 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7450@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7451 info.high_limit = mmap_upper_limit();
7452 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7453 info.align_offset = shared_align_offset(last_mmap, pgoff);
7454+ info.threadstack_offset = offset;
7455 addr = vm_unmapped_area(&info);
7456
7457 found_addr:
7458@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7459 unsigned long addr = addr0;
7460 int do_color_align, last_mmap;
7461 struct vm_unmapped_area_info info;
7462+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7463
7464 #ifdef CONFIG_64BIT
7465 /* This should only ever run for 32-bit processes. */
7466@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7467 }
7468
7469 /* requesting a specific address */
7470+#ifdef CONFIG_PAX_RANDMMAP
7471+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7472+#endif
7473+
7474 if (addr) {
7475 if (do_color_align && last_mmap)
7476 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7477@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7478 info.high_limit = mm->mmap_base;
7479 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7480 info.align_offset = shared_align_offset(last_mmap, pgoff);
7481+ info.threadstack_offset = offset;
7482 addr = vm_unmapped_area(&info);
7483 if (!(addr & ~PAGE_MASK))
7484 goto found_addr;
7485@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7486 mm->mmap_legacy_base = mmap_legacy_base();
7487 mm->mmap_base = mmap_upper_limit();
7488
7489+#ifdef CONFIG_PAX_RANDMMAP
7490+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7491+ mm->mmap_legacy_base += mm->delta_mmap;
7492+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7493+ }
7494+#endif
7495+
7496 if (mmap_is_legacy()) {
7497 mm->mmap_base = mm->mmap_legacy_base;
7498 mm->get_unmapped_area = arch_get_unmapped_area;
7499diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7500index 47ee620..1107387 100644
7501--- a/arch/parisc/kernel/traps.c
7502+++ b/arch/parisc/kernel/traps.c
7503@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7504
7505 down_read(&current->mm->mmap_sem);
7506 vma = find_vma(current->mm,regs->iaoq[0]);
7507- if (vma && (regs->iaoq[0] >= vma->vm_start)
7508- && (vma->vm_flags & VM_EXEC)) {
7509-
7510+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7511 fault_address = regs->iaoq[0];
7512 fault_space = regs->iasq[0];
7513
7514diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7515index e5120e6..8ddb5cc 100644
7516--- a/arch/parisc/mm/fault.c
7517+++ b/arch/parisc/mm/fault.c
7518@@ -15,6 +15,7 @@
7519 #include <linux/sched.h>
7520 #include <linux/interrupt.h>
7521 #include <linux/module.h>
7522+#include <linux/unistd.h>
7523
7524 #include <asm/uaccess.h>
7525 #include <asm/traps.h>
7526@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7527 static unsigned long
7528 parisc_acctyp(unsigned long code, unsigned int inst)
7529 {
7530- if (code == 6 || code == 16)
7531+ if (code == 6 || code == 7 || code == 16)
7532 return VM_EXEC;
7533
7534 switch (inst & 0xf0000000) {
7535@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7536 }
7537 #endif
7538
7539+#ifdef CONFIG_PAX_PAGEEXEC
7540+/*
7541+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7542+ *
7543+ * returns 1 when task should be killed
7544+ * 2 when rt_sigreturn trampoline was detected
7545+ * 3 when unpatched PLT trampoline was detected
7546+ */
7547+static int pax_handle_fetch_fault(struct pt_regs *regs)
7548+{
7549+
7550+#ifdef CONFIG_PAX_EMUPLT
7551+ int err;
7552+
7553+ do { /* PaX: unpatched PLT emulation */
7554+ unsigned int bl, depwi;
7555+
7556+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7557+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7558+
7559+ if (err)
7560+ break;
7561+
7562+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7563+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7564+
7565+ err = get_user(ldw, (unsigned int *)addr);
7566+ err |= get_user(bv, (unsigned int *)(addr+4));
7567+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7568+
7569+ if (err)
7570+ break;
7571+
7572+ if (ldw == 0x0E801096U &&
7573+ bv == 0xEAC0C000U &&
7574+ ldw2 == 0x0E881095U)
7575+ {
7576+ unsigned int resolver, map;
7577+
7578+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7579+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7580+ if (err)
7581+ break;
7582+
7583+ regs->gr[20] = instruction_pointer(regs)+8;
7584+ regs->gr[21] = map;
7585+ regs->gr[22] = resolver;
7586+ regs->iaoq[0] = resolver | 3UL;
7587+ regs->iaoq[1] = regs->iaoq[0] + 4;
7588+ return 3;
7589+ }
7590+ }
7591+ } while (0);
7592+#endif
7593+
7594+#ifdef CONFIG_PAX_EMUTRAMP
7595+
7596+#ifndef CONFIG_PAX_EMUSIGRT
7597+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7598+ return 1;
7599+#endif
7600+
7601+ do { /* PaX: rt_sigreturn emulation */
7602+ unsigned int ldi1, ldi2, bel, nop;
7603+
7604+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7605+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7606+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7607+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7608+
7609+ if (err)
7610+ break;
7611+
7612+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7613+ ldi2 == 0x3414015AU &&
7614+ bel == 0xE4008200U &&
7615+ nop == 0x08000240U)
7616+ {
7617+ regs->gr[25] = (ldi1 & 2) >> 1;
7618+ regs->gr[20] = __NR_rt_sigreturn;
7619+ regs->gr[31] = regs->iaoq[1] + 16;
7620+ regs->sr[0] = regs->iasq[1];
7621+ regs->iaoq[0] = 0x100UL;
7622+ regs->iaoq[1] = regs->iaoq[0] + 4;
7623+ regs->iasq[0] = regs->sr[2];
7624+ regs->iasq[1] = regs->sr[2];
7625+ return 2;
7626+ }
7627+ } while (0);
7628+#endif
7629+
7630+ return 1;
7631+}
7632+
7633+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7634+{
7635+ unsigned long i;
7636+
7637+ printk(KERN_ERR "PAX: bytes at PC: ");
7638+ for (i = 0; i < 5; i++) {
7639+ unsigned int c;
7640+ if (get_user(c, (unsigned int *)pc+i))
7641+ printk(KERN_CONT "???????? ");
7642+ else
7643+ printk(KERN_CONT "%08x ", c);
7644+ }
7645+ printk("\n");
7646+}
7647+#endif
7648+
7649 int fixup_exception(struct pt_regs *regs)
7650 {
7651 const struct exception_table_entry *fix;
7652@@ -234,8 +345,33 @@ retry:
7653
7654 good_area:
7655
7656- if ((vma->vm_flags & acc_type) != acc_type)
7657+ if ((vma->vm_flags & acc_type) != acc_type) {
7658+
7659+#ifdef CONFIG_PAX_PAGEEXEC
7660+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7661+ (address & ~3UL) == instruction_pointer(regs))
7662+ {
7663+ up_read(&mm->mmap_sem);
7664+ switch (pax_handle_fetch_fault(regs)) {
7665+
7666+#ifdef CONFIG_PAX_EMUPLT
7667+ case 3:
7668+ return;
7669+#endif
7670+
7671+#ifdef CONFIG_PAX_EMUTRAMP
7672+ case 2:
7673+ return;
7674+#endif
7675+
7676+ }
7677+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7678+ do_group_exit(SIGKILL);
7679+ }
7680+#endif
7681+
7682 goto bad_area;
7683+ }
7684
7685 /*
7686 * If for any reason at all we couldn't handle the fault, make
7687diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7688index a2a168e..e484682 100644
7689--- a/arch/powerpc/Kconfig
7690+++ b/arch/powerpc/Kconfig
7691@@ -408,6 +408,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7692 config KEXEC
7693 bool "kexec system call"
7694 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7695+ depends on !GRKERNSEC_KMEM
7696 help
7697 kexec is a system call that implements the ability to shutdown your
7698 current kernel, and to start another kernel. It is like a reboot
7699diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7700index 512d278..d31fadd 100644
7701--- a/arch/powerpc/include/asm/atomic.h
7702+++ b/arch/powerpc/include/asm/atomic.h
7703@@ -12,6 +12,11 @@
7704
7705 #define ATOMIC_INIT(i) { (i) }
7706
7707+#define _ASM_EXTABLE(from, to) \
7708+" .section __ex_table,\"a\"\n" \
7709+ PPC_LONG" " #from ", " #to"\n" \
7710+" .previous\n"
7711+
7712 static __inline__ int atomic_read(const atomic_t *v)
7713 {
7714 int t;
7715@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7716 return t;
7717 }
7718
7719+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7720+{
7721+ int t;
7722+
7723+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7724+
7725+ return t;
7726+}
7727+
7728 static __inline__ void atomic_set(atomic_t *v, int i)
7729 {
7730 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7731 }
7732
7733-#define ATOMIC_OP(op, asm_op) \
7734-static __inline__ void atomic_##op(int a, atomic_t *v) \
7735+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7736+{
7737+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7738+}
7739+
7740+#ifdef CONFIG_PAX_REFCOUNT
7741+#define __REFCOUNT_OP(op) op##o.
7742+#define __OVERFLOW_PRE \
7743+ " mcrxr cr0\n"
7744+#define __OVERFLOW_POST \
7745+ " bf 4*cr0+so, 3f\n" \
7746+ "2: .long 0x00c00b00\n" \
7747+ "3:\n"
7748+#define __OVERFLOW_EXTABLE \
7749+ "\n4:\n"
7750+ _ASM_EXTABLE(2b, 4b)
7751+#else
7752+#define __REFCOUNT_OP(op) op
7753+#define __OVERFLOW_PRE
7754+#define __OVERFLOW_POST
7755+#define __OVERFLOW_EXTABLE
7756+#endif
7757+
7758+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7759+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7760 { \
7761 int t; \
7762 \
7763 __asm__ __volatile__( \
7764-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7765+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7766+ pre_op \
7767 #asm_op " %0,%2,%0\n" \
7768+ post_op \
7769 PPC405_ERR77(0,%3) \
7770 " stwcx. %0,0,%3 \n" \
7771 " bne- 1b\n" \
7772+ extable \
7773 : "=&r" (t), "+m" (v->counter) \
7774 : "r" (a), "r" (&v->counter) \
7775 : "cc"); \
7776 } \
7777
7778-#define ATOMIC_OP_RETURN(op, asm_op) \
7779-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7780+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7781+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7782+
7783+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7784+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7785 { \
7786 int t; \
7787 \
7788 __asm__ __volatile__( \
7789 PPC_ATOMIC_ENTRY_BARRIER \
7790-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7791+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7792+ pre_op \
7793 #asm_op " %0,%1,%0\n" \
7794+ post_op \
7795 PPC405_ERR77(0,%2) \
7796 " stwcx. %0,0,%2 \n" \
7797 " bne- 1b\n" \
7798+ extable \
7799 PPC_ATOMIC_EXIT_BARRIER \
7800 : "=&r" (t) \
7801 : "r" (a), "r" (&v->counter) \
7802@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7803 return t; \
7804 }
7805
7806+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7807+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7808+
7809 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7810
7811 ATOMIC_OPS(add, add)
7812@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7813
7814 #undef ATOMIC_OPS
7815 #undef ATOMIC_OP_RETURN
7816+#undef __ATOMIC_OP_RETURN
7817 #undef ATOMIC_OP
7818+#undef __ATOMIC_OP
7819
7820 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7821
7822-static __inline__ void atomic_inc(atomic_t *v)
7823-{
7824- int t;
7825+/*
7826+ * atomic_inc - increment atomic variable
7827+ * @v: pointer of type atomic_t
7828+ *
7829+ * Automatically increments @v by 1
7830+ */
7831+#define atomic_inc(v) atomic_add(1, (v))
7832+#define atomic_inc_return(v) atomic_add_return(1, (v))
7833
7834- __asm__ __volatile__(
7835-"1: lwarx %0,0,%2 # atomic_inc\n\
7836- addic %0,%0,1\n"
7837- PPC405_ERR77(0,%2)
7838-" stwcx. %0,0,%2 \n\
7839- bne- 1b"
7840- : "=&r" (t), "+m" (v->counter)
7841- : "r" (&v->counter)
7842- : "cc", "xer");
7843+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7844+{
7845+ atomic_add_unchecked(1, v);
7846 }
7847
7848-static __inline__ int atomic_inc_return(atomic_t *v)
7849+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7850 {
7851- int t;
7852-
7853- __asm__ __volatile__(
7854- PPC_ATOMIC_ENTRY_BARRIER
7855-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7856- addic %0,%0,1\n"
7857- PPC405_ERR77(0,%1)
7858-" stwcx. %0,0,%1 \n\
7859- bne- 1b"
7860- PPC_ATOMIC_EXIT_BARRIER
7861- : "=&r" (t)
7862- : "r" (&v->counter)
7863- : "cc", "xer", "memory");
7864-
7865- return t;
7866+ return atomic_add_return_unchecked(1, v);
7867 }
7868
7869 /*
7870@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7871 */
7872 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7873
7874-static __inline__ void atomic_dec(atomic_t *v)
7875+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7876 {
7877- int t;
7878-
7879- __asm__ __volatile__(
7880-"1: lwarx %0,0,%2 # atomic_dec\n\
7881- addic %0,%0,-1\n"
7882- PPC405_ERR77(0,%2)\
7883-" stwcx. %0,0,%2\n\
7884- bne- 1b"
7885- : "=&r" (t), "+m" (v->counter)
7886- : "r" (&v->counter)
7887- : "cc", "xer");
7888+ return atomic_add_return_unchecked(1, v) == 0;
7889 }
7890
7891-static __inline__ int atomic_dec_return(atomic_t *v)
7892+/*
7893+ * atomic_dec - decrement atomic variable
7894+ * @v: pointer of type atomic_t
7895+ *
7896+ * Atomically decrements @v by 1
7897+ */
7898+#define atomic_dec(v) atomic_sub(1, (v))
7899+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7900+
7901+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7902 {
7903- int t;
7904-
7905- __asm__ __volatile__(
7906- PPC_ATOMIC_ENTRY_BARRIER
7907-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7908- addic %0,%0,-1\n"
7909- PPC405_ERR77(0,%1)
7910-" stwcx. %0,0,%1\n\
7911- bne- 1b"
7912- PPC_ATOMIC_EXIT_BARRIER
7913- : "=&r" (t)
7914- : "r" (&v->counter)
7915- : "cc", "xer", "memory");
7916-
7917- return t;
7918+ atomic_sub_unchecked(1, v);
7919 }
7920
7921 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7922 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7923
7924+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7925+{
7926+ return cmpxchg(&(v->counter), old, new);
7927+}
7928+
7929+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7930+{
7931+ return xchg(&(v->counter), new);
7932+}
7933+
7934 /**
7935 * __atomic_add_unless - add unless the number is a given value
7936 * @v: pointer of type atomic_t
7937@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7938 PPC_ATOMIC_ENTRY_BARRIER
7939 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7940 cmpw 0,%0,%3 \n\
7941- beq- 2f \n\
7942- add %0,%2,%0 \n"
7943+ beq- 2f \n"
7944+
7945+#ifdef CONFIG_PAX_REFCOUNT
7946+" mcrxr cr0\n"
7947+" addo. %0,%2,%0\n"
7948+" bf 4*cr0+so, 4f\n"
7949+"3:.long " "0x00c00b00""\n"
7950+"4:\n"
7951+#else
7952+ "add %0,%2,%0 \n"
7953+#endif
7954+
7955 PPC405_ERR77(0,%2)
7956 " stwcx. %0,0,%1 \n\
7957 bne- 1b \n"
7958+"5:"
7959+
7960+#ifdef CONFIG_PAX_REFCOUNT
7961+ _ASM_EXTABLE(3b, 5b)
7962+#endif
7963+
7964 PPC_ATOMIC_EXIT_BARRIER
7965 " subf %0,%2,%0 \n\
7966 2:"
7967@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7968 }
7969 #define atomic_dec_if_positive atomic_dec_if_positive
7970
7971+#define smp_mb__before_atomic_dec() smp_mb()
7972+#define smp_mb__after_atomic_dec() smp_mb()
7973+#define smp_mb__before_atomic_inc() smp_mb()
7974+#define smp_mb__after_atomic_inc() smp_mb()
7975+
7976 #ifdef __powerpc64__
7977
7978 #define ATOMIC64_INIT(i) { (i) }
7979@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7980 return t;
7981 }
7982
7983+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7984+{
7985+ long t;
7986+
7987+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7988+
7989+ return t;
7990+}
7991+
7992 static __inline__ void atomic64_set(atomic64_t *v, long i)
7993 {
7994 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7995 }
7996
7997-#define ATOMIC64_OP(op, asm_op) \
7998-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
7999+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8000+{
8001+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8002+}
8003+
8004+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
8005+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
8006 { \
8007 long t; \
8008 \
8009 __asm__ __volatile__( \
8010 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8011+ pre_op \
8012 #asm_op " %0,%2,%0\n" \
8013+ post_op \
8014 " stdcx. %0,0,%3 \n" \
8015 " bne- 1b\n" \
8016+ extable \
8017 : "=&r" (t), "+m" (v->counter) \
8018 : "r" (a), "r" (&v->counter) \
8019 : "cc"); \
8020 }
8021
8022-#define ATOMIC64_OP_RETURN(op, asm_op) \
8023-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8024+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8025+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8026+
8027+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8028+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8029 { \
8030 long t; \
8031 \
8032 __asm__ __volatile__( \
8033 PPC_ATOMIC_ENTRY_BARRIER \
8034 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8035+ pre_op \
8036 #asm_op " %0,%1,%0\n" \
8037+ post_op \
8038 " stdcx. %0,0,%2 \n" \
8039 " bne- 1b\n" \
8040+ extable \
8041 PPC_ATOMIC_EXIT_BARRIER \
8042 : "=&r" (t) \
8043 : "r" (a), "r" (&v->counter) \
8044@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8045 return t; \
8046 }
8047
8048+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8049+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8050+
8051 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8052
8053 ATOMIC64_OPS(add, add)
8054@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8055
8056 #undef ATOMIC64_OPS
8057 #undef ATOMIC64_OP_RETURN
8058+#undef __ATOMIC64_OP_RETURN
8059 #undef ATOMIC64_OP
8060+#undef __ATOMIC64_OP
8061+#undef __OVERFLOW_EXTABLE
8062+#undef __OVERFLOW_POST
8063+#undef __OVERFLOW_PRE
8064+#undef __REFCOUNT_OP
8065
8066 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8067
8068-static __inline__ void atomic64_inc(atomic64_t *v)
8069-{
8070- long t;
8071+/*
8072+ * atomic64_inc - increment atomic variable
8073+ * @v: pointer of type atomic64_t
8074+ *
8075+ * Automatically increments @v by 1
8076+ */
8077+#define atomic64_inc(v) atomic64_add(1, (v))
8078+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8079
8080- __asm__ __volatile__(
8081-"1: ldarx %0,0,%2 # atomic64_inc\n\
8082- addic %0,%0,1\n\
8083- stdcx. %0,0,%2 \n\
8084- bne- 1b"
8085- : "=&r" (t), "+m" (v->counter)
8086- : "r" (&v->counter)
8087- : "cc", "xer");
8088+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8089+{
8090+ atomic64_add_unchecked(1, v);
8091 }
8092
8093-static __inline__ long atomic64_inc_return(atomic64_t *v)
8094+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8095 {
8096- long t;
8097-
8098- __asm__ __volatile__(
8099- PPC_ATOMIC_ENTRY_BARRIER
8100-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8101- addic %0,%0,1\n\
8102- stdcx. %0,0,%1 \n\
8103- bne- 1b"
8104- PPC_ATOMIC_EXIT_BARRIER
8105- : "=&r" (t)
8106- : "r" (&v->counter)
8107- : "cc", "xer", "memory");
8108-
8109- return t;
8110+ return atomic64_add_return_unchecked(1, v);
8111 }
8112
8113 /*
8114@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8115 */
8116 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8117
8118-static __inline__ void atomic64_dec(atomic64_t *v)
8119+/*
8120+ * atomic64_dec - decrement atomic variable
8121+ * @v: pointer of type atomic64_t
8122+ *
8123+ * Atomically decrements @v by 1
8124+ */
8125+#define atomic64_dec(v) atomic64_sub(1, (v))
8126+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8127+
8128+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8129 {
8130- long t;
8131-
8132- __asm__ __volatile__(
8133-"1: ldarx %0,0,%2 # atomic64_dec\n\
8134- addic %0,%0,-1\n\
8135- stdcx. %0,0,%2\n\
8136- bne- 1b"
8137- : "=&r" (t), "+m" (v->counter)
8138- : "r" (&v->counter)
8139- : "cc", "xer");
8140-}
8141-
8142-static __inline__ long atomic64_dec_return(atomic64_t *v)
8143-{
8144- long t;
8145-
8146- __asm__ __volatile__(
8147- PPC_ATOMIC_ENTRY_BARRIER
8148-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8149- addic %0,%0,-1\n\
8150- stdcx. %0,0,%1\n\
8151- bne- 1b"
8152- PPC_ATOMIC_EXIT_BARRIER
8153- : "=&r" (t)
8154- : "r" (&v->counter)
8155- : "cc", "xer", "memory");
8156-
8157- return t;
8158+ atomic64_sub_unchecked(1, v);
8159 }
8160
8161 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8162@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8163 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8164 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8165
8166+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8167+{
8168+ return cmpxchg(&(v->counter), old, new);
8169+}
8170+
8171+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8172+{
8173+ return xchg(&(v->counter), new);
8174+}
8175+
8176 /**
8177 * atomic64_add_unless - add unless the number is a given value
8178 * @v: pointer of type atomic64_t
8179@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8180
8181 __asm__ __volatile__ (
8182 PPC_ATOMIC_ENTRY_BARRIER
8183-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8184+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8185 cmpd 0,%0,%3 \n\
8186- beq- 2f \n\
8187- add %0,%2,%0 \n"
8188+ beq- 2f \n"
8189+
8190+#ifdef CONFIG_PAX_REFCOUNT
8191+" mcrxr cr0\n"
8192+" addo. %0,%2,%0\n"
8193+" bf 4*cr0+so, 4f\n"
8194+"3:.long " "0x00c00b00""\n"
8195+"4:\n"
8196+#else
8197+ "add %0,%2,%0 \n"
8198+#endif
8199+
8200 " stdcx. %0,0,%1 \n\
8201 bne- 1b \n"
8202 PPC_ATOMIC_EXIT_BARRIER
8203+"5:"
8204+
8205+#ifdef CONFIG_PAX_REFCOUNT
8206+ _ASM_EXTABLE(3b, 5b)
8207+#endif
8208+
8209 " subf %0,%2,%0 \n\
8210 2:"
8211 : "=&r" (t)
8212diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8213index a3bf5be..e03ba81 100644
8214--- a/arch/powerpc/include/asm/barrier.h
8215+++ b/arch/powerpc/include/asm/barrier.h
8216@@ -76,7 +76,7 @@
8217 do { \
8218 compiletime_assert_atomic_type(*p); \
8219 smp_lwsync(); \
8220- ACCESS_ONCE(*p) = (v); \
8221+ ACCESS_ONCE_RW(*p) = (v); \
8222 } while (0)
8223
8224 #define smp_load_acquire(p) \
8225diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8226index 34a05a1..a1f2c67 100644
8227--- a/arch/powerpc/include/asm/cache.h
8228+++ b/arch/powerpc/include/asm/cache.h
8229@@ -4,6 +4,7 @@
8230 #ifdef __KERNEL__
8231
8232 #include <asm/reg.h>
8233+#include <linux/const.h>
8234
8235 /* bytes per L1 cache line */
8236 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8237@@ -23,7 +24,7 @@
8238 #define L1_CACHE_SHIFT 7
8239 #endif
8240
8241-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8242+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8243
8244 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8245
8246diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8247index 57d289a..b36c98c 100644
8248--- a/arch/powerpc/include/asm/elf.h
8249+++ b/arch/powerpc/include/asm/elf.h
8250@@ -30,6 +30,18 @@
8251
8252 #define ELF_ET_DYN_BASE 0x20000000
8253
8254+#ifdef CONFIG_PAX_ASLR
8255+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8256+
8257+#ifdef __powerpc64__
8258+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8259+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8260+#else
8261+#define PAX_DELTA_MMAP_LEN 15
8262+#define PAX_DELTA_STACK_LEN 15
8263+#endif
8264+#endif
8265+
8266 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8267
8268 /*
8269@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8270 (0x7ff >> (PAGE_SHIFT - 12)) : \
8271 (0x3ffff >> (PAGE_SHIFT - 12)))
8272
8273-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8274-#define arch_randomize_brk arch_randomize_brk
8275-
8276-
8277 #ifdef CONFIG_SPU_BASE
8278 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8279 #define NT_SPU 1
8280diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8281index 8196e9c..d83a9f3 100644
8282--- a/arch/powerpc/include/asm/exec.h
8283+++ b/arch/powerpc/include/asm/exec.h
8284@@ -4,6 +4,6 @@
8285 #ifndef _ASM_POWERPC_EXEC_H
8286 #define _ASM_POWERPC_EXEC_H
8287
8288-extern unsigned long arch_align_stack(unsigned long sp);
8289+#define arch_align_stack(x) ((x) & ~0xfUL)
8290
8291 #endif /* _ASM_POWERPC_EXEC_H */
8292diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8293index 5acabbd..7ea14fa 100644
8294--- a/arch/powerpc/include/asm/kmap_types.h
8295+++ b/arch/powerpc/include/asm/kmap_types.h
8296@@ -10,7 +10,7 @@
8297 * 2 of the License, or (at your option) any later version.
8298 */
8299
8300-#define KM_TYPE_NR 16
8301+#define KM_TYPE_NR 17
8302
8303 #endif /* __KERNEL__ */
8304 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8305diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8306index b8da913..c02b593 100644
8307--- a/arch/powerpc/include/asm/local.h
8308+++ b/arch/powerpc/include/asm/local.h
8309@@ -9,21 +9,65 @@ typedef struct
8310 atomic_long_t a;
8311 } local_t;
8312
8313+typedef struct
8314+{
8315+ atomic_long_unchecked_t a;
8316+} local_unchecked_t;
8317+
8318 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8319
8320 #define local_read(l) atomic_long_read(&(l)->a)
8321+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8322 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8323+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8324
8325 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8326+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8327 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8328+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8329 #define local_inc(l) atomic_long_inc(&(l)->a)
8330+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8331 #define local_dec(l) atomic_long_dec(&(l)->a)
8332+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8333
8334 static __inline__ long local_add_return(long a, local_t *l)
8335 {
8336 long t;
8337
8338 __asm__ __volatile__(
8339+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8340+
8341+#ifdef CONFIG_PAX_REFCOUNT
8342+" mcrxr cr0\n"
8343+" addo. %0,%1,%0\n"
8344+" bf 4*cr0+so, 3f\n"
8345+"2:.long " "0x00c00b00""\n"
8346+#else
8347+" add %0,%1,%0\n"
8348+#endif
8349+
8350+"3:\n"
8351+ PPC405_ERR77(0,%2)
8352+ PPC_STLCX "%0,0,%2 \n\
8353+ bne- 1b"
8354+
8355+#ifdef CONFIG_PAX_REFCOUNT
8356+"\n4:\n"
8357+ _ASM_EXTABLE(2b, 4b)
8358+#endif
8359+
8360+ : "=&r" (t)
8361+ : "r" (a), "r" (&(l->a.counter))
8362+ : "cc", "memory");
8363+
8364+ return t;
8365+}
8366+
8367+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8368+{
8369+ long t;
8370+
8371+ __asm__ __volatile__(
8372 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8373 add %0,%1,%0\n"
8374 PPC405_ERR77(0,%2)
8375@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8376
8377 #define local_cmpxchg(l, o, n) \
8378 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8379+#define local_cmpxchg_unchecked(l, o, n) \
8380+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8381 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8382
8383 /**
8384diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8385index 8565c25..2865190 100644
8386--- a/arch/powerpc/include/asm/mman.h
8387+++ b/arch/powerpc/include/asm/mman.h
8388@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8389 }
8390 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8391
8392-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8393+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8394 {
8395 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8396 }
8397diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8398index 69c0598..2c56964 100644
8399--- a/arch/powerpc/include/asm/page.h
8400+++ b/arch/powerpc/include/asm/page.h
8401@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8402 * and needs to be executable. This means the whole heap ends
8403 * up being executable.
8404 */
8405-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8406- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8407+#define VM_DATA_DEFAULT_FLAGS32 \
8408+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8409+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8410
8411 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8412 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8413@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8414 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8415 #endif
8416
8417+#define ktla_ktva(addr) (addr)
8418+#define ktva_ktla(addr) (addr)
8419+
8420 #ifndef CONFIG_PPC_BOOK3S_64
8421 /*
8422 * Use the top bit of the higher-level page table entries to indicate whether
8423diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8424index d908a46..3753f71 100644
8425--- a/arch/powerpc/include/asm/page_64.h
8426+++ b/arch/powerpc/include/asm/page_64.h
8427@@ -172,15 +172,18 @@ do { \
8428 * stack by default, so in the absence of a PT_GNU_STACK program header
8429 * we turn execute permission off.
8430 */
8431-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8432- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8433+#define VM_STACK_DEFAULT_FLAGS32 \
8434+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8435+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8436
8437 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8438 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8439
8440+#ifndef CONFIG_PAX_PAGEEXEC
8441 #define VM_STACK_DEFAULT_FLAGS \
8442 (is_32bit_task() ? \
8443 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8444+#endif
8445
8446 #include <asm-generic/getorder.h>
8447
8448diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8449index 4b0be20..c15a27d 100644
8450--- a/arch/powerpc/include/asm/pgalloc-64.h
8451+++ b/arch/powerpc/include/asm/pgalloc-64.h
8452@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8453 #ifndef CONFIG_PPC_64K_PAGES
8454
8455 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8456+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8457
8458 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8459 {
8460@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8461 pud_set(pud, (unsigned long)pmd);
8462 }
8463
8464+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8465+{
8466+ pud_populate(mm, pud, pmd);
8467+}
8468+
8469 #define pmd_populate(mm, pmd, pte_page) \
8470 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8471 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8472@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8473 #endif
8474
8475 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8476+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8477
8478 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8479 pte_t *pte)
8480diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8481index a8805fe..6d69617 100644
8482--- a/arch/powerpc/include/asm/pgtable.h
8483+++ b/arch/powerpc/include/asm/pgtable.h
8484@@ -2,6 +2,7 @@
8485 #define _ASM_POWERPC_PGTABLE_H
8486 #ifdef __KERNEL__
8487
8488+#include <linux/const.h>
8489 #ifndef __ASSEMBLY__
8490 #include <linux/mmdebug.h>
8491 #include <linux/mmzone.h>
8492diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8493index 4aad413..85d86bf 100644
8494--- a/arch/powerpc/include/asm/pte-hash32.h
8495+++ b/arch/powerpc/include/asm/pte-hash32.h
8496@@ -21,6 +21,7 @@
8497 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8498 #define _PAGE_USER 0x004 /* usermode access allowed */
8499 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8500+#define _PAGE_EXEC _PAGE_GUARDED
8501 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8502 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8503 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8504diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8505index 1c874fb..e8480a4 100644
8506--- a/arch/powerpc/include/asm/reg.h
8507+++ b/arch/powerpc/include/asm/reg.h
8508@@ -253,6 +253,7 @@
8509 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8510 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8511 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8512+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8513 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8514 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8515 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8516diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8517index 5a6614a..d89995d1 100644
8518--- a/arch/powerpc/include/asm/smp.h
8519+++ b/arch/powerpc/include/asm/smp.h
8520@@ -51,7 +51,7 @@ struct smp_ops_t {
8521 int (*cpu_disable)(void);
8522 void (*cpu_die)(unsigned int nr);
8523 int (*cpu_bootable)(unsigned int nr);
8524-};
8525+} __no_const;
8526
8527 extern void smp_send_debugger_break(void);
8528 extern void start_secondary_resume(void);
8529diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8530index 4dbe072..b803275 100644
8531--- a/arch/powerpc/include/asm/spinlock.h
8532+++ b/arch/powerpc/include/asm/spinlock.h
8533@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8534 __asm__ __volatile__(
8535 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8536 __DO_SIGN_EXTEND
8537-" addic. %0,%0,1\n\
8538- ble- 2f\n"
8539+
8540+#ifdef CONFIG_PAX_REFCOUNT
8541+" mcrxr cr0\n"
8542+" addico. %0,%0,1\n"
8543+" bf 4*cr0+so, 3f\n"
8544+"2:.long " "0x00c00b00""\n"
8545+#else
8546+" addic. %0,%0,1\n"
8547+#endif
8548+
8549+"3:\n"
8550+ "ble- 4f\n"
8551 PPC405_ERR77(0,%1)
8552 " stwcx. %0,0,%1\n\
8553 bne- 1b\n"
8554 PPC_ACQUIRE_BARRIER
8555-"2:" : "=&r" (tmp)
8556+"4:"
8557+
8558+#ifdef CONFIG_PAX_REFCOUNT
8559+ _ASM_EXTABLE(2b,4b)
8560+#endif
8561+
8562+ : "=&r" (tmp)
8563 : "r" (&rw->lock)
8564 : "cr0", "xer", "memory");
8565
8566@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8567 __asm__ __volatile__(
8568 "# read_unlock\n\t"
8569 PPC_RELEASE_BARRIER
8570-"1: lwarx %0,0,%1\n\
8571- addic %0,%0,-1\n"
8572+"1: lwarx %0,0,%1\n"
8573+
8574+#ifdef CONFIG_PAX_REFCOUNT
8575+" mcrxr cr0\n"
8576+" addico. %0,%0,-1\n"
8577+" bf 4*cr0+so, 3f\n"
8578+"2:.long " "0x00c00b00""\n"
8579+#else
8580+" addic. %0,%0,-1\n"
8581+#endif
8582+
8583+"3:\n"
8584 PPC405_ERR77(0,%1)
8585 " stwcx. %0,0,%1\n\
8586 bne- 1b"
8587+
8588+#ifdef CONFIG_PAX_REFCOUNT
8589+"\n4:\n"
8590+ _ASM_EXTABLE(2b, 4b)
8591+#endif
8592+
8593 : "=&r"(tmp)
8594 : "r"(&rw->lock)
8595 : "cr0", "xer", "memory");
8596diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8597index 0be6c68..9c3c6ee 100644
8598--- a/arch/powerpc/include/asm/thread_info.h
8599+++ b/arch/powerpc/include/asm/thread_info.h
8600@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8601 #if defined(CONFIG_PPC64)
8602 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8603 #endif
8604+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8605+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8606
8607 /* as above, but as bit values */
8608 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8609@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8610 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8611 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8612 #define _TIF_NOHZ (1<<TIF_NOHZ)
8613+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8614 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8615 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8616- _TIF_NOHZ)
8617+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8618
8619 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8620 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8621diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8622index a0c071d..49cdc7f 100644
8623--- a/arch/powerpc/include/asm/uaccess.h
8624+++ b/arch/powerpc/include/asm/uaccess.h
8625@@ -58,6 +58,7 @@
8626
8627 #endif
8628
8629+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8630 #define access_ok(type, addr, size) \
8631 (__chk_user_ptr(addr), \
8632 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8633@@ -318,52 +319,6 @@ do { \
8634 extern unsigned long __copy_tofrom_user(void __user *to,
8635 const void __user *from, unsigned long size);
8636
8637-#ifndef __powerpc64__
8638-
8639-static inline unsigned long copy_from_user(void *to,
8640- const void __user *from, unsigned long n)
8641-{
8642- unsigned long over;
8643-
8644- if (access_ok(VERIFY_READ, from, n))
8645- return __copy_tofrom_user((__force void __user *)to, from, n);
8646- if ((unsigned long)from < TASK_SIZE) {
8647- over = (unsigned long)from + n - TASK_SIZE;
8648- return __copy_tofrom_user((__force void __user *)to, from,
8649- n - over) + over;
8650- }
8651- return n;
8652-}
8653-
8654-static inline unsigned long copy_to_user(void __user *to,
8655- const void *from, unsigned long n)
8656-{
8657- unsigned long over;
8658-
8659- if (access_ok(VERIFY_WRITE, to, n))
8660- return __copy_tofrom_user(to, (__force void __user *)from, n);
8661- if ((unsigned long)to < TASK_SIZE) {
8662- over = (unsigned long)to + n - TASK_SIZE;
8663- return __copy_tofrom_user(to, (__force void __user *)from,
8664- n - over) + over;
8665- }
8666- return n;
8667-}
8668-
8669-#else /* __powerpc64__ */
8670-
8671-#define __copy_in_user(to, from, size) \
8672- __copy_tofrom_user((to), (from), (size))
8673-
8674-extern unsigned long copy_from_user(void *to, const void __user *from,
8675- unsigned long n);
8676-extern unsigned long copy_to_user(void __user *to, const void *from,
8677- unsigned long n);
8678-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8679- unsigned long n);
8680-
8681-#endif /* __powerpc64__ */
8682-
8683 static inline unsigned long __copy_from_user_inatomic(void *to,
8684 const void __user *from, unsigned long n)
8685 {
8686@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8687 if (ret == 0)
8688 return 0;
8689 }
8690+
8691+ if (!__builtin_constant_p(n))
8692+ check_object_size(to, n, false);
8693+
8694 return __copy_tofrom_user((__force void __user *)to, from, n);
8695 }
8696
8697@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8698 if (ret == 0)
8699 return 0;
8700 }
8701+
8702+ if (!__builtin_constant_p(n))
8703+ check_object_size(from, n, true);
8704+
8705 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8706 }
8707
8708@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8709 return __copy_to_user_inatomic(to, from, size);
8710 }
8711
8712+#ifndef __powerpc64__
8713+
8714+static inline unsigned long __must_check copy_from_user(void *to,
8715+ const void __user *from, unsigned long n)
8716+{
8717+ unsigned long over;
8718+
8719+ if ((long)n < 0)
8720+ return n;
8721+
8722+ if (access_ok(VERIFY_READ, from, n)) {
8723+ if (!__builtin_constant_p(n))
8724+ check_object_size(to, n, false);
8725+ return __copy_tofrom_user((__force void __user *)to, from, n);
8726+ }
8727+ if ((unsigned long)from < TASK_SIZE) {
8728+ over = (unsigned long)from + n - TASK_SIZE;
8729+ if (!__builtin_constant_p(n - over))
8730+ check_object_size(to, n - over, false);
8731+ return __copy_tofrom_user((__force void __user *)to, from,
8732+ n - over) + over;
8733+ }
8734+ return n;
8735+}
8736+
8737+static inline unsigned long __must_check copy_to_user(void __user *to,
8738+ const void *from, unsigned long n)
8739+{
8740+ unsigned long over;
8741+
8742+ if ((long)n < 0)
8743+ return n;
8744+
8745+ if (access_ok(VERIFY_WRITE, to, n)) {
8746+ if (!__builtin_constant_p(n))
8747+ check_object_size(from, n, true);
8748+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8749+ }
8750+ if ((unsigned long)to < TASK_SIZE) {
8751+ over = (unsigned long)to + n - TASK_SIZE;
8752+ if (!__builtin_constant_p(n))
8753+ check_object_size(from, n - over, true);
8754+ return __copy_tofrom_user(to, (__force void __user *)from,
8755+ n - over) + over;
8756+ }
8757+ return n;
8758+}
8759+
8760+#else /* __powerpc64__ */
8761+
8762+#define __copy_in_user(to, from, size) \
8763+ __copy_tofrom_user((to), (from), (size))
8764+
8765+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8766+{
8767+ if ((long)n < 0 || n > INT_MAX)
8768+ return n;
8769+
8770+ if (!__builtin_constant_p(n))
8771+ check_object_size(to, n, false);
8772+
8773+ if (likely(access_ok(VERIFY_READ, from, n)))
8774+ n = __copy_from_user(to, from, n);
8775+ else
8776+ memset(to, 0, n);
8777+ return n;
8778+}
8779+
8780+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8781+{
8782+ if ((long)n < 0 || n > INT_MAX)
8783+ return n;
8784+
8785+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8786+ if (!__builtin_constant_p(n))
8787+ check_object_size(from, n, true);
8788+ n = __copy_to_user(to, from, n);
8789+ }
8790+ return n;
8791+}
8792+
8793+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8794+ unsigned long n);
8795+
8796+#endif /* __powerpc64__ */
8797+
8798 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8799
8800 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8801diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8802index 502cf69..53936a1 100644
8803--- a/arch/powerpc/kernel/Makefile
8804+++ b/arch/powerpc/kernel/Makefile
8805@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8806 CFLAGS_btext.o += -fPIC
8807 endif
8808
8809+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8810+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8811+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8812+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8813+
8814 ifdef CONFIG_FUNCTION_TRACER
8815 # Do not trace early boot code
8816 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8817@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8818 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8819 endif
8820
8821+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8822+
8823 obj-y := cputable.o ptrace.o syscalls.o \
8824 irq.o align.o signal_32.o pmc.o vdso.o \
8825 process.o systbl.o idle.o \
8826diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8827index 3e68d1c..72a5ee6 100644
8828--- a/arch/powerpc/kernel/exceptions-64e.S
8829+++ b/arch/powerpc/kernel/exceptions-64e.S
8830@@ -1010,6 +1010,7 @@ storage_fault_common:
8831 std r14,_DAR(r1)
8832 std r15,_DSISR(r1)
8833 addi r3,r1,STACK_FRAME_OVERHEAD
8834+ bl save_nvgprs
8835 mr r4,r14
8836 mr r5,r15
8837 ld r14,PACA_EXGEN+EX_R14(r13)
8838@@ -1018,8 +1019,7 @@ storage_fault_common:
8839 cmpdi r3,0
8840 bne- 1f
8841 b ret_from_except_lite
8842-1: bl save_nvgprs
8843- mr r5,r3
8844+1: mr r5,r3
8845 addi r3,r1,STACK_FRAME_OVERHEAD
8846 ld r4,_DAR(r1)
8847 bl bad_page_fault
8848diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8849index c2df815..bae3d12 100644
8850--- a/arch/powerpc/kernel/exceptions-64s.S
8851+++ b/arch/powerpc/kernel/exceptions-64s.S
8852@@ -1599,10 +1599,10 @@ handle_page_fault:
8853 11: ld r4,_DAR(r1)
8854 ld r5,_DSISR(r1)
8855 addi r3,r1,STACK_FRAME_OVERHEAD
8856+ bl save_nvgprs
8857 bl do_page_fault
8858 cmpdi r3,0
8859 beq+ 12f
8860- bl save_nvgprs
8861 mr r5,r3
8862 addi r3,r1,STACK_FRAME_OVERHEAD
8863 lwz r4,_DAR(r1)
8864diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8865index 4509603..cdb491f 100644
8866--- a/arch/powerpc/kernel/irq.c
8867+++ b/arch/powerpc/kernel/irq.c
8868@@ -460,6 +460,8 @@ void migrate_irqs(void)
8869 }
8870 #endif
8871
8872+extern void gr_handle_kernel_exploit(void);
8873+
8874 static inline void check_stack_overflow(void)
8875 {
8876 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8877@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8878 pr_err("do_IRQ: stack overflow: %ld\n",
8879 sp - sizeof(struct thread_info));
8880 dump_stack();
8881+ gr_handle_kernel_exploit();
8882 }
8883 #endif
8884 }
8885diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8886index c94d2e0..992a9ce 100644
8887--- a/arch/powerpc/kernel/module_32.c
8888+++ b/arch/powerpc/kernel/module_32.c
8889@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8890 me->arch.core_plt_section = i;
8891 }
8892 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8893- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8894+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8895 return -ENOEXEC;
8896 }
8897
8898@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8899
8900 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8901 /* Init, or core PLT? */
8902- if (location >= mod->module_core
8903- && location < mod->module_core + mod->core_size)
8904+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8905+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8906 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8907- else
8908+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8909+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8910 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8911+ else {
8912+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8913+ return ~0UL;
8914+ }
8915
8916 /* Find this entry, or if that fails, the next avail. entry */
8917 while (entry->jump[0]) {
8918@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8919 }
8920 #ifdef CONFIG_DYNAMIC_FTRACE
8921 module->arch.tramp =
8922- do_plt_call(module->module_core,
8923+ do_plt_call(module->module_core_rx,
8924 (unsigned long)ftrace_caller,
8925 sechdrs, module);
8926 #endif
8927diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8928index b4cc7be..1fe8bb3 100644
8929--- a/arch/powerpc/kernel/process.c
8930+++ b/arch/powerpc/kernel/process.c
8931@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8932 * Lookup NIP late so we have the best change of getting the
8933 * above info out without failing
8934 */
8935- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8936- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8937+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8938+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8939 #endif
8940 show_stack(current, (unsigned long *) regs->gpr[1]);
8941 if (!user_mode(regs))
8942@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8943 newsp = stack[0];
8944 ip = stack[STACK_FRAME_LR_SAVE];
8945 if (!firstframe || ip != lr) {
8946- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8947+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8948 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8949 if ((ip == rth) && curr_frame >= 0) {
8950- printk(" (%pS)",
8951+ printk(" (%pA)",
8952 (void *)current->ret_stack[curr_frame].ret);
8953 curr_frame--;
8954 }
8955@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8956 struct pt_regs *regs = (struct pt_regs *)
8957 (sp + STACK_FRAME_OVERHEAD);
8958 lr = regs->link;
8959- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8960+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8961 regs->trap, (void *)regs->nip, (void *)lr);
8962 firstframe = 1;
8963 }
8964@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8965 mtspr(SPRN_CTRLT, ctrl);
8966 }
8967 #endif /* CONFIG_PPC64 */
8968-
8969-unsigned long arch_align_stack(unsigned long sp)
8970-{
8971- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8972- sp -= get_random_int() & ~PAGE_MASK;
8973- return sp & ~0xf;
8974-}
8975-
8976-static inline unsigned long brk_rnd(void)
8977-{
8978- unsigned long rnd = 0;
8979-
8980- /* 8MB for 32bit, 1GB for 64bit */
8981- if (is_32bit_task())
8982- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8983- else
8984- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8985-
8986- return rnd << PAGE_SHIFT;
8987-}
8988-
8989-unsigned long arch_randomize_brk(struct mm_struct *mm)
8990-{
8991- unsigned long base = mm->brk;
8992- unsigned long ret;
8993-
8994-#ifdef CONFIG_PPC_STD_MMU_64
8995- /*
8996- * If we are using 1TB segments and we are allowed to randomise
8997- * the heap, we can put it above 1TB so it is backed by a 1TB
8998- * segment. Otherwise the heap will be in the bottom 1TB
8999- * which always uses 256MB segments and this may result in a
9000- * performance penalty.
9001- */
9002- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9003- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9004-#endif
9005-
9006- ret = PAGE_ALIGN(base + brk_rnd());
9007-
9008- if (ret < mm->brk)
9009- return mm->brk;
9010-
9011- return ret;
9012-}
9013-
9014diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9015index f21897b..28c0428 100644
9016--- a/arch/powerpc/kernel/ptrace.c
9017+++ b/arch/powerpc/kernel/ptrace.c
9018@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9019 return ret;
9020 }
9021
9022+#ifdef CONFIG_GRKERNSEC_SETXID
9023+extern void gr_delayed_cred_worker(void);
9024+#endif
9025+
9026 /*
9027 * We must return the syscall number to actually look up in the table.
9028 * This can be -1L to skip running any syscall at all.
9029@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9030
9031 secure_computing_strict(regs->gpr[0]);
9032
9033+#ifdef CONFIG_GRKERNSEC_SETXID
9034+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9035+ gr_delayed_cred_worker();
9036+#endif
9037+
9038 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9039 tracehook_report_syscall_entry(regs))
9040 /*
9041@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9042 {
9043 int step;
9044
9045+#ifdef CONFIG_GRKERNSEC_SETXID
9046+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9047+ gr_delayed_cred_worker();
9048+#endif
9049+
9050 audit_syscall_exit(regs);
9051
9052 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9053diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9054index b171001..4ac7ac5 100644
9055--- a/arch/powerpc/kernel/signal_32.c
9056+++ b/arch/powerpc/kernel/signal_32.c
9057@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9058 /* Save user registers on the stack */
9059 frame = &rt_sf->uc.uc_mcontext;
9060 addr = frame;
9061- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9062+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9063 sigret = 0;
9064 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9065 } else {
9066diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9067index 2cb0c94..c0c0bc9 100644
9068--- a/arch/powerpc/kernel/signal_64.c
9069+++ b/arch/powerpc/kernel/signal_64.c
9070@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9071 current->thread.fp_state.fpscr = 0;
9072
9073 /* Set up to return from userspace. */
9074- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9075+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9076 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9077 } else {
9078 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9079diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9080index e6595b7..24bde6e 100644
9081--- a/arch/powerpc/kernel/traps.c
9082+++ b/arch/powerpc/kernel/traps.c
9083@@ -36,6 +36,7 @@
9084 #include <linux/debugfs.h>
9085 #include <linux/ratelimit.h>
9086 #include <linux/context_tracking.h>
9087+#include <linux/uaccess.h>
9088
9089 #include <asm/emulated_ops.h>
9090 #include <asm/pgtable.h>
9091@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9092 return flags;
9093 }
9094
9095+extern void gr_handle_kernel_exploit(void);
9096+
9097 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9098 int signr)
9099 {
9100@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9101 panic("Fatal exception in interrupt");
9102 if (panic_on_oops)
9103 panic("Fatal exception");
9104+
9105+ gr_handle_kernel_exploit();
9106+
9107 do_exit(signr);
9108 }
9109
9110@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9111 enum ctx_state prev_state = exception_enter();
9112 unsigned int reason = get_reason(regs);
9113
9114+#ifdef CONFIG_PAX_REFCOUNT
9115+ unsigned int bkpt;
9116+ const struct exception_table_entry *entry;
9117+
9118+ if (reason & REASON_ILLEGAL) {
9119+ /* Check if PaX bad instruction */
9120+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9121+ current->thread.trap_nr = 0;
9122+ pax_report_refcount_overflow(regs);
9123+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9124+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9125+ regs->nip = entry->fixup;
9126+ return;
9127+ }
9128+ /* fixup_exception() could not handle */
9129+ goto bail;
9130+ }
9131+ }
9132+#endif
9133+
9134 /* We can now get here via a FP Unavailable exception if the core
9135 * has no FPU, in that case the reason flags will be 0 */
9136
9137diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9138index 305eb0d..accc5b40 100644
9139--- a/arch/powerpc/kernel/vdso.c
9140+++ b/arch/powerpc/kernel/vdso.c
9141@@ -34,6 +34,7 @@
9142 #include <asm/vdso.h>
9143 #include <asm/vdso_datapage.h>
9144 #include <asm/setup.h>
9145+#include <asm/mman.h>
9146
9147 #undef DEBUG
9148
9149@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9150 vdso_base = VDSO32_MBASE;
9151 #endif
9152
9153- current->mm->context.vdso_base = 0;
9154+ current->mm->context.vdso_base = ~0UL;
9155
9156 /* vDSO has a problem and was disabled, just don't "enable" it for the
9157 * process
9158@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9159 vdso_base = get_unmapped_area(NULL, vdso_base,
9160 (vdso_pages << PAGE_SHIFT) +
9161 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9162- 0, 0);
9163+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9164 if (IS_ERR_VALUE(vdso_base)) {
9165 rc = vdso_base;
9166 goto fail_mmapsem;
9167diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9168index c45eaab..5f41b57 100644
9169--- a/arch/powerpc/kvm/powerpc.c
9170+++ b/arch/powerpc/kvm/powerpc.c
9171@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9172 }
9173 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9174
9175-int kvm_arch_init(void *opaque)
9176+int kvm_arch_init(const void *opaque)
9177 {
9178 return 0;
9179 }
9180diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9181index 5eea6f3..5d10396 100644
9182--- a/arch/powerpc/lib/usercopy_64.c
9183+++ b/arch/powerpc/lib/usercopy_64.c
9184@@ -9,22 +9,6 @@
9185 #include <linux/module.h>
9186 #include <asm/uaccess.h>
9187
9188-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9189-{
9190- if (likely(access_ok(VERIFY_READ, from, n)))
9191- n = __copy_from_user(to, from, n);
9192- else
9193- memset(to, 0, n);
9194- return n;
9195-}
9196-
9197-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9198-{
9199- if (likely(access_ok(VERIFY_WRITE, to, n)))
9200- n = __copy_to_user(to, from, n);
9201- return n;
9202-}
9203-
9204 unsigned long copy_in_user(void __user *to, const void __user *from,
9205 unsigned long n)
9206 {
9207@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9208 return n;
9209 }
9210
9211-EXPORT_SYMBOL(copy_from_user);
9212-EXPORT_SYMBOL(copy_to_user);
9213 EXPORT_SYMBOL(copy_in_user);
9214
9215diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9216index 6154b0a..4de2b19 100644
9217--- a/arch/powerpc/mm/fault.c
9218+++ b/arch/powerpc/mm/fault.c
9219@@ -33,6 +33,10 @@
9220 #include <linux/ratelimit.h>
9221 #include <linux/context_tracking.h>
9222 #include <linux/hugetlb.h>
9223+#include <linux/slab.h>
9224+#include <linux/pagemap.h>
9225+#include <linux/compiler.h>
9226+#include <linux/unistd.h>
9227
9228 #include <asm/firmware.h>
9229 #include <asm/page.h>
9230@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9231 }
9232 #endif
9233
9234+#ifdef CONFIG_PAX_PAGEEXEC
9235+/*
9236+ * PaX: decide what to do with offenders (regs->nip = fault address)
9237+ *
9238+ * returns 1 when task should be killed
9239+ */
9240+static int pax_handle_fetch_fault(struct pt_regs *regs)
9241+{
9242+ return 1;
9243+}
9244+
9245+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9246+{
9247+ unsigned long i;
9248+
9249+ printk(KERN_ERR "PAX: bytes at PC: ");
9250+ for (i = 0; i < 5; i++) {
9251+ unsigned int c;
9252+ if (get_user(c, (unsigned int __user *)pc+i))
9253+ printk(KERN_CONT "???????? ");
9254+ else
9255+ printk(KERN_CONT "%08x ", c);
9256+ }
9257+ printk("\n");
9258+}
9259+#endif
9260+
9261 /*
9262 * Check whether the instruction at regs->nip is a store using
9263 * an update addressing form which will update r1.
9264@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9265 * indicate errors in DSISR but can validly be set in SRR1.
9266 */
9267 if (trap == 0x400)
9268- error_code &= 0x48200000;
9269+ error_code &= 0x58200000;
9270 else
9271 is_write = error_code & DSISR_ISSTORE;
9272 #else
9273@@ -383,7 +414,7 @@ good_area:
9274 * "undefined". Of those that can be set, this is the only
9275 * one which seems bad.
9276 */
9277- if (error_code & 0x10000000)
9278+ if (error_code & DSISR_GUARDED)
9279 /* Guarded storage error. */
9280 goto bad_area;
9281 #endif /* CONFIG_8xx */
9282@@ -398,7 +429,7 @@ good_area:
9283 * processors use the same I/D cache coherency mechanism
9284 * as embedded.
9285 */
9286- if (error_code & DSISR_PROTFAULT)
9287+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9288 goto bad_area;
9289 #endif /* CONFIG_PPC_STD_MMU */
9290
9291@@ -490,6 +521,23 @@ bad_area:
9292 bad_area_nosemaphore:
9293 /* User mode accesses cause a SIGSEGV */
9294 if (user_mode(regs)) {
9295+
9296+#ifdef CONFIG_PAX_PAGEEXEC
9297+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9298+#ifdef CONFIG_PPC_STD_MMU
9299+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9300+#else
9301+ if (is_exec && regs->nip == address) {
9302+#endif
9303+ switch (pax_handle_fetch_fault(regs)) {
9304+ }
9305+
9306+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9307+ do_group_exit(SIGKILL);
9308+ }
9309+ }
9310+#endif
9311+
9312 _exception(SIGSEGV, regs, code, address);
9313 goto bail;
9314 }
9315diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9316index cb8bdbe..cde4bc7 100644
9317--- a/arch/powerpc/mm/mmap.c
9318+++ b/arch/powerpc/mm/mmap.c
9319@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9320 return sysctl_legacy_va_layout;
9321 }
9322
9323-static unsigned long mmap_rnd(void)
9324+static unsigned long mmap_rnd(struct mm_struct *mm)
9325 {
9326 unsigned long rnd = 0;
9327
9328+#ifdef CONFIG_PAX_RANDMMAP
9329+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9330+#endif
9331+
9332 if (current->flags & PF_RANDOMIZE) {
9333 /* 8MB for 32bit, 1GB for 64bit */
9334 if (is_32bit_task())
9335@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9336 return rnd << PAGE_SHIFT;
9337 }
9338
9339-static inline unsigned long mmap_base(void)
9340+static inline unsigned long mmap_base(struct mm_struct *mm)
9341 {
9342 unsigned long gap = rlimit(RLIMIT_STACK);
9343
9344@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9345 else if (gap > MAX_GAP)
9346 gap = MAX_GAP;
9347
9348- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9349+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9350 }
9351
9352 /*
9353@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9354 */
9355 if (mmap_is_legacy()) {
9356 mm->mmap_base = TASK_UNMAPPED_BASE;
9357+
9358+#ifdef CONFIG_PAX_RANDMMAP
9359+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9360+ mm->mmap_base += mm->delta_mmap;
9361+#endif
9362+
9363 mm->get_unmapped_area = arch_get_unmapped_area;
9364 } else {
9365- mm->mmap_base = mmap_base();
9366+ mm->mmap_base = mmap_base(mm);
9367+
9368+#ifdef CONFIG_PAX_RANDMMAP
9369+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9370+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9371+#endif
9372+
9373 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9374 }
9375 }
9376diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9377index ded0ea1..f213a9b 100644
9378--- a/arch/powerpc/mm/slice.c
9379+++ b/arch/powerpc/mm/slice.c
9380@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9381 if ((mm->task_size - len) < addr)
9382 return 0;
9383 vma = find_vma(mm, addr);
9384- return (!vma || (addr + len) <= vma->vm_start);
9385+ return check_heap_stack_gap(vma, addr, len, 0);
9386 }
9387
9388 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9389@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9390 info.align_offset = 0;
9391
9392 addr = TASK_UNMAPPED_BASE;
9393+
9394+#ifdef CONFIG_PAX_RANDMMAP
9395+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9396+ addr += mm->delta_mmap;
9397+#endif
9398+
9399 while (addr < TASK_SIZE) {
9400 info.low_limit = addr;
9401 if (!slice_scan_available(addr, available, 1, &addr))
9402@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9403 if (fixed && addr > (mm->task_size - len))
9404 return -ENOMEM;
9405
9406+#ifdef CONFIG_PAX_RANDMMAP
9407+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9408+ addr = 0;
9409+#endif
9410+
9411 /* If hint, make sure it matches our alignment restrictions */
9412 if (!fixed && addr) {
9413 addr = _ALIGN_UP(addr, 1ul << pshift);
9414diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9415index f223875..94170e4 100644
9416--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9417+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9418@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9419 }
9420
9421 static struct pci_ops scc_pciex_pci_ops = {
9422- scc_pciex_read_config,
9423- scc_pciex_write_config,
9424+ .read = scc_pciex_read_config,
9425+ .write = scc_pciex_write_config,
9426 };
9427
9428 static void pciex_clear_intr_all(unsigned int __iomem *base)
9429diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9430index d966bbe..372124a 100644
9431--- a/arch/powerpc/platforms/cell/spufs/file.c
9432+++ b/arch/powerpc/platforms/cell/spufs/file.c
9433@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9434 return VM_FAULT_NOPAGE;
9435 }
9436
9437-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9438+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9439 unsigned long address,
9440- void *buf, int len, int write)
9441+ void *buf, size_t len, int write)
9442 {
9443 struct spu_context *ctx = vma->vm_file->private_data;
9444 unsigned long offset = address - vma->vm_start;
9445diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9446index fa934fe..c296056 100644
9447--- a/arch/s390/include/asm/atomic.h
9448+++ b/arch/s390/include/asm/atomic.h
9449@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9450 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9451 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9452
9453+#define atomic64_read_unchecked(v) atomic64_read(v)
9454+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9455+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9456+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9457+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9458+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9459+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9460+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9461+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9462+
9463 #endif /* __ARCH_S390_ATOMIC__ */
9464diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9465index 8d72471..5322500 100644
9466--- a/arch/s390/include/asm/barrier.h
9467+++ b/arch/s390/include/asm/barrier.h
9468@@ -42,7 +42,7 @@
9469 do { \
9470 compiletime_assert_atomic_type(*p); \
9471 barrier(); \
9472- ACCESS_ONCE(*p) = (v); \
9473+ ACCESS_ONCE_RW(*p) = (v); \
9474 } while (0)
9475
9476 #define smp_load_acquire(p) \
9477diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9478index 4d7ccac..d03d0ad 100644
9479--- a/arch/s390/include/asm/cache.h
9480+++ b/arch/s390/include/asm/cache.h
9481@@ -9,8 +9,10 @@
9482 #ifndef __ARCH_S390_CACHE_H
9483 #define __ARCH_S390_CACHE_H
9484
9485-#define L1_CACHE_BYTES 256
9486+#include <linux/const.h>
9487+
9488 #define L1_CACHE_SHIFT 8
9489+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9490 #define NET_SKB_PAD 32
9491
9492 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9493diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9494index f6e43d3..5f57681 100644
9495--- a/arch/s390/include/asm/elf.h
9496+++ b/arch/s390/include/asm/elf.h
9497@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9498 the loader. We need to make sure that it is out of the way of the program
9499 that it will "exec", and that there is sufficient room for the brk. */
9500
9501-extern unsigned long randomize_et_dyn(unsigned long base);
9502-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9503+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9504+
9505+#ifdef CONFIG_PAX_ASLR
9506+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9507+
9508+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9509+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9510+#endif
9511
9512 /* This yields a mask that user programs can use to figure out what
9513 instruction set this CPU supports. */
9514@@ -223,9 +229,6 @@ struct linux_binprm;
9515 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9516 int arch_setup_additional_pages(struct linux_binprm *, int);
9517
9518-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9519-#define arch_randomize_brk arch_randomize_brk
9520-
9521 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9522
9523 #endif
9524diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9525index c4a93d6..4d2a9b4 100644
9526--- a/arch/s390/include/asm/exec.h
9527+++ b/arch/s390/include/asm/exec.h
9528@@ -7,6 +7,6 @@
9529 #ifndef __ASM_EXEC_H
9530 #define __ASM_EXEC_H
9531
9532-extern unsigned long arch_align_stack(unsigned long sp);
9533+#define arch_align_stack(x) ((x) & ~0xfUL)
9534
9535 #endif /* __ASM_EXEC_H */
9536diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9537index cd4c68e..6764641 100644
9538--- a/arch/s390/include/asm/uaccess.h
9539+++ b/arch/s390/include/asm/uaccess.h
9540@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9541 __range_ok((unsigned long)(addr), (size)); \
9542 })
9543
9544+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9545 #define access_ok(type, addr, size) __access_ok(addr, size)
9546
9547 /*
9548@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9549 copy_to_user(void __user *to, const void *from, unsigned long n)
9550 {
9551 might_fault();
9552+
9553+ if ((long)n < 0)
9554+ return n;
9555+
9556 return __copy_to_user(to, from, n);
9557 }
9558
9559@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9560 static inline unsigned long __must_check
9561 copy_from_user(void *to, const void __user *from, unsigned long n)
9562 {
9563- unsigned int sz = __compiletime_object_size(to);
9564+ size_t sz = __compiletime_object_size(to);
9565
9566 might_fault();
9567- if (unlikely(sz != -1 && sz < n)) {
9568+
9569+ if ((long)n < 0)
9570+ return n;
9571+
9572+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9573 copy_from_user_overflow();
9574 return n;
9575 }
9576diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9577index 409d152..d90d368 100644
9578--- a/arch/s390/kernel/module.c
9579+++ b/arch/s390/kernel/module.c
9580@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9581
9582 /* Increase core size by size of got & plt and set start
9583 offsets for got and plt. */
9584- me->core_size = ALIGN(me->core_size, 4);
9585- me->arch.got_offset = me->core_size;
9586- me->core_size += me->arch.got_size;
9587- me->arch.plt_offset = me->core_size;
9588- me->core_size += me->arch.plt_size;
9589+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9590+ me->arch.got_offset = me->core_size_rw;
9591+ me->core_size_rw += me->arch.got_size;
9592+ me->arch.plt_offset = me->core_size_rx;
9593+ me->core_size_rx += me->arch.plt_size;
9594 return 0;
9595 }
9596
9597@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9598 if (info->got_initialized == 0) {
9599 Elf_Addr *gotent;
9600
9601- gotent = me->module_core + me->arch.got_offset +
9602+ gotent = me->module_core_rw + me->arch.got_offset +
9603 info->got_offset;
9604 *gotent = val;
9605 info->got_initialized = 1;
9606@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9607 rc = apply_rela_bits(loc, val, 0, 64, 0);
9608 else if (r_type == R_390_GOTENT ||
9609 r_type == R_390_GOTPLTENT) {
9610- val += (Elf_Addr) me->module_core - loc;
9611+ val += (Elf_Addr) me->module_core_rw - loc;
9612 rc = apply_rela_bits(loc, val, 1, 32, 1);
9613 }
9614 break;
9615@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9616 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9617 if (info->plt_initialized == 0) {
9618 unsigned int *ip;
9619- ip = me->module_core + me->arch.plt_offset +
9620+ ip = me->module_core_rx + me->arch.plt_offset +
9621 info->plt_offset;
9622 #ifndef CONFIG_64BIT
9623 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9624@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9625 val - loc + 0xffffUL < 0x1ffffeUL) ||
9626 (r_type == R_390_PLT32DBL &&
9627 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9628- val = (Elf_Addr) me->module_core +
9629+ val = (Elf_Addr) me->module_core_rx +
9630 me->arch.plt_offset +
9631 info->plt_offset;
9632 val += rela->r_addend - loc;
9633@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9634 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9635 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9636 val = val + rela->r_addend -
9637- ((Elf_Addr) me->module_core + me->arch.got_offset);
9638+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9639 if (r_type == R_390_GOTOFF16)
9640 rc = apply_rela_bits(loc, val, 0, 16, 0);
9641 else if (r_type == R_390_GOTOFF32)
9642@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9643 break;
9644 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9645 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9646- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9647+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9648 rela->r_addend - loc;
9649 if (r_type == R_390_GOTPC)
9650 rc = apply_rela_bits(loc, val, 1, 32, 0);
9651diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9652index aa7a839..6c2a916 100644
9653--- a/arch/s390/kernel/process.c
9654+++ b/arch/s390/kernel/process.c
9655@@ -219,37 +219,3 @@ unsigned long get_wchan(struct task_struct *p)
9656 }
9657 return 0;
9658 }
9659-
9660-unsigned long arch_align_stack(unsigned long sp)
9661-{
9662- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9663- sp -= get_random_int() & ~PAGE_MASK;
9664- return sp & ~0xf;
9665-}
9666-
9667-static inline unsigned long brk_rnd(void)
9668-{
9669- /* 8MB for 32bit, 1GB for 64bit */
9670- if (is_32bit_task())
9671- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9672- else
9673- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9674-}
9675-
9676-unsigned long arch_randomize_brk(struct mm_struct *mm)
9677-{
9678- unsigned long ret;
9679-
9680- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9681- return (ret > mm->brk) ? ret : mm->brk;
9682-}
9683-
9684-unsigned long randomize_et_dyn(unsigned long base)
9685-{
9686- unsigned long ret;
9687-
9688- if (!(current->flags & PF_RANDOMIZE))
9689- return base;
9690- ret = PAGE_ALIGN(base + brk_rnd());
9691- return (ret > base) ? ret : base;
9692-}
9693diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9694index 9b436c2..54fbf0a 100644
9695--- a/arch/s390/mm/mmap.c
9696+++ b/arch/s390/mm/mmap.c
9697@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9698 */
9699 if (mmap_is_legacy()) {
9700 mm->mmap_base = mmap_base_legacy();
9701+
9702+#ifdef CONFIG_PAX_RANDMMAP
9703+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9704+ mm->mmap_base += mm->delta_mmap;
9705+#endif
9706+
9707 mm->get_unmapped_area = arch_get_unmapped_area;
9708 } else {
9709 mm->mmap_base = mmap_base();
9710+
9711+#ifdef CONFIG_PAX_RANDMMAP
9712+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9713+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9714+#endif
9715+
9716 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9717 }
9718 }
9719@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9720 */
9721 if (mmap_is_legacy()) {
9722 mm->mmap_base = mmap_base_legacy();
9723+
9724+#ifdef CONFIG_PAX_RANDMMAP
9725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9726+ mm->mmap_base += mm->delta_mmap;
9727+#endif
9728+
9729 mm->get_unmapped_area = s390_get_unmapped_area;
9730 } else {
9731 mm->mmap_base = mmap_base();
9732+
9733+#ifdef CONFIG_PAX_RANDMMAP
9734+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9735+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9736+#endif
9737+
9738 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9739 }
9740 }
9741diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9742index ae3d59f..f65f075 100644
9743--- a/arch/score/include/asm/cache.h
9744+++ b/arch/score/include/asm/cache.h
9745@@ -1,7 +1,9 @@
9746 #ifndef _ASM_SCORE_CACHE_H
9747 #define _ASM_SCORE_CACHE_H
9748
9749+#include <linux/const.h>
9750+
9751 #define L1_CACHE_SHIFT 4
9752-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9753+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9754
9755 #endif /* _ASM_SCORE_CACHE_H */
9756diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9757index f9f3cd5..58ff438 100644
9758--- a/arch/score/include/asm/exec.h
9759+++ b/arch/score/include/asm/exec.h
9760@@ -1,6 +1,6 @@
9761 #ifndef _ASM_SCORE_EXEC_H
9762 #define _ASM_SCORE_EXEC_H
9763
9764-extern unsigned long arch_align_stack(unsigned long sp);
9765+#define arch_align_stack(x) (x)
9766
9767 #endif /* _ASM_SCORE_EXEC_H */
9768diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9769index a1519ad3..e8ac1ff 100644
9770--- a/arch/score/kernel/process.c
9771+++ b/arch/score/kernel/process.c
9772@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9773
9774 return task_pt_regs(task)->cp0_epc;
9775 }
9776-
9777-unsigned long arch_align_stack(unsigned long sp)
9778-{
9779- return sp;
9780-}
9781diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9782index ef9e555..331bd29 100644
9783--- a/arch/sh/include/asm/cache.h
9784+++ b/arch/sh/include/asm/cache.h
9785@@ -9,10 +9,11 @@
9786 #define __ASM_SH_CACHE_H
9787 #ifdef __KERNEL__
9788
9789+#include <linux/const.h>
9790 #include <linux/init.h>
9791 #include <cpu/cache.h>
9792
9793-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9794+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9795
9796 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9797
9798diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9799index 6777177..cb5e44f 100644
9800--- a/arch/sh/mm/mmap.c
9801+++ b/arch/sh/mm/mmap.c
9802@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9803 struct mm_struct *mm = current->mm;
9804 struct vm_area_struct *vma;
9805 int do_colour_align;
9806+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9807 struct vm_unmapped_area_info info;
9808
9809 if (flags & MAP_FIXED) {
9810@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9811 if (filp || (flags & MAP_SHARED))
9812 do_colour_align = 1;
9813
9814+#ifdef CONFIG_PAX_RANDMMAP
9815+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9816+#endif
9817+
9818 if (addr) {
9819 if (do_colour_align)
9820 addr = COLOUR_ALIGN(addr, pgoff);
9821@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9822 addr = PAGE_ALIGN(addr);
9823
9824 vma = find_vma(mm, addr);
9825- if (TASK_SIZE - len >= addr &&
9826- (!vma || addr + len <= vma->vm_start))
9827+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9828 return addr;
9829 }
9830
9831 info.flags = 0;
9832 info.length = len;
9833- info.low_limit = TASK_UNMAPPED_BASE;
9834+ info.low_limit = mm->mmap_base;
9835 info.high_limit = TASK_SIZE;
9836 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9837 info.align_offset = pgoff << PAGE_SHIFT;
9838@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9839 struct mm_struct *mm = current->mm;
9840 unsigned long addr = addr0;
9841 int do_colour_align;
9842+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9843 struct vm_unmapped_area_info info;
9844
9845 if (flags & MAP_FIXED) {
9846@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9847 if (filp || (flags & MAP_SHARED))
9848 do_colour_align = 1;
9849
9850+#ifdef CONFIG_PAX_RANDMMAP
9851+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9852+#endif
9853+
9854 /* requesting a specific address */
9855 if (addr) {
9856 if (do_colour_align)
9857@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9858 addr = PAGE_ALIGN(addr);
9859
9860 vma = find_vma(mm, addr);
9861- if (TASK_SIZE - len >= addr &&
9862- (!vma || addr + len <= vma->vm_start))
9863+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9864 return addr;
9865 }
9866
9867@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9868 VM_BUG_ON(addr != -ENOMEM);
9869 info.flags = 0;
9870 info.low_limit = TASK_UNMAPPED_BASE;
9871+
9872+#ifdef CONFIG_PAX_RANDMMAP
9873+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9874+ info.low_limit += mm->delta_mmap;
9875+#endif
9876+
9877 info.high_limit = TASK_SIZE;
9878 addr = vm_unmapped_area(&info);
9879 }
9880diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9881index 4082749..fd97781 100644
9882--- a/arch/sparc/include/asm/atomic_64.h
9883+++ b/arch/sparc/include/asm/atomic_64.h
9884@@ -15,18 +15,38 @@
9885 #define ATOMIC64_INIT(i) { (i) }
9886
9887 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9888+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9889+{
9890+ return ACCESS_ONCE(v->counter);
9891+}
9892 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9893+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9894+{
9895+ return ACCESS_ONCE(v->counter);
9896+}
9897
9898 #define atomic_set(v, i) (((v)->counter) = i)
9899+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9900+{
9901+ v->counter = i;
9902+}
9903 #define atomic64_set(v, i) (((v)->counter) = i)
9904+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9905+{
9906+ v->counter = i;
9907+}
9908
9909-#define ATOMIC_OP(op) \
9910-void atomic_##op(int, atomic_t *); \
9911-void atomic64_##op(long, atomic64_t *);
9912+#define __ATOMIC_OP(op, suffix) \
9913+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9914+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9915
9916-#define ATOMIC_OP_RETURN(op) \
9917-int atomic_##op##_return(int, atomic_t *); \
9918-long atomic64_##op##_return(long, atomic64_t *);
9919+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9920+
9921+#define __ATOMIC_OP_RETURN(op, suffix) \
9922+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9923+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9924+
9925+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9926
9927 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9928
9929@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9930
9931 #undef ATOMIC_OPS
9932 #undef ATOMIC_OP_RETURN
9933+#undef __ATOMIC_OP_RETURN
9934 #undef ATOMIC_OP
9935+#undef __ATOMIC_OP
9936
9937 #define atomic_dec_return(v) atomic_sub_return(1, v)
9938 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9939
9940 #define atomic_inc_return(v) atomic_add_return(1, v)
9941+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9942+{
9943+ return atomic_add_return_unchecked(1, v);
9944+}
9945 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9946+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9947+{
9948+ return atomic64_add_return_unchecked(1, v);
9949+}
9950
9951 /*
9952 * atomic_inc_and_test - increment and test
9953@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9954 * other cases.
9955 */
9956 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9957+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9958+{
9959+ return atomic_inc_return_unchecked(v) == 0;
9960+}
9961 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9962
9963 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9964@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9965 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9966
9967 #define atomic_inc(v) atomic_add(1, v)
9968+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9969+{
9970+ atomic_add_unchecked(1, v);
9971+}
9972 #define atomic64_inc(v) atomic64_add(1, v)
9973+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9974+{
9975+ atomic64_add_unchecked(1, v);
9976+}
9977
9978 #define atomic_dec(v) atomic_sub(1, v)
9979+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9980+{
9981+ atomic_sub_unchecked(1, v);
9982+}
9983 #define atomic64_dec(v) atomic64_sub(1, v)
9984+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9985+{
9986+ atomic64_sub_unchecked(1, v);
9987+}
9988
9989 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9990 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9991
9992 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9993+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9994+{
9995+ return cmpxchg(&v->counter, old, new);
9996+}
9997 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9998+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9999+{
10000+ return xchg(&v->counter, new);
10001+}
10002
10003 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10004 {
10005- int c, old;
10006+ int c, old, new;
10007 c = atomic_read(v);
10008 for (;;) {
10009- if (unlikely(c == (u)))
10010+ if (unlikely(c == u))
10011 break;
10012- old = atomic_cmpxchg((v), c, c + (a));
10013+
10014+ asm volatile("addcc %2, %0, %0\n"
10015+
10016+#ifdef CONFIG_PAX_REFCOUNT
10017+ "tvs %%icc, 6\n"
10018+#endif
10019+
10020+ : "=r" (new)
10021+ : "0" (c), "ir" (a)
10022+ : "cc");
10023+
10024+ old = atomic_cmpxchg(v, c, new);
10025 if (likely(old == c))
10026 break;
10027 c = old;
10028@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10029 #define atomic64_cmpxchg(v, o, n) \
10030 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10031 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10032+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10033+{
10034+ return xchg(&v->counter, new);
10035+}
10036
10037 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10038 {
10039- long c, old;
10040+ long c, old, new;
10041 c = atomic64_read(v);
10042 for (;;) {
10043- if (unlikely(c == (u)))
10044+ if (unlikely(c == u))
10045 break;
10046- old = atomic64_cmpxchg((v), c, c + (a));
10047+
10048+ asm volatile("addcc %2, %0, %0\n"
10049+
10050+#ifdef CONFIG_PAX_REFCOUNT
10051+ "tvs %%xcc, 6\n"
10052+#endif
10053+
10054+ : "=r" (new)
10055+ : "0" (c), "ir" (a)
10056+ : "cc");
10057+
10058+ old = atomic64_cmpxchg(v, c, new);
10059 if (likely(old == c))
10060 break;
10061 c = old;
10062 }
10063- return c != (u);
10064+ return c != u;
10065 }
10066
10067 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10068diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10069index 7664894..45a974b 100644
10070--- a/arch/sparc/include/asm/barrier_64.h
10071+++ b/arch/sparc/include/asm/barrier_64.h
10072@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10073 do { \
10074 compiletime_assert_atomic_type(*p); \
10075 barrier(); \
10076- ACCESS_ONCE(*p) = (v); \
10077+ ACCESS_ONCE_RW(*p) = (v); \
10078 } while (0)
10079
10080 #define smp_load_acquire(p) \
10081diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10082index 5bb6991..5c2132e 100644
10083--- a/arch/sparc/include/asm/cache.h
10084+++ b/arch/sparc/include/asm/cache.h
10085@@ -7,10 +7,12 @@
10086 #ifndef _SPARC_CACHE_H
10087 #define _SPARC_CACHE_H
10088
10089+#include <linux/const.h>
10090+
10091 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10092
10093 #define L1_CACHE_SHIFT 5
10094-#define L1_CACHE_BYTES 32
10095+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10096
10097 #ifdef CONFIG_SPARC32
10098 #define SMP_CACHE_BYTES_SHIFT 5
10099diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10100index a24e41f..47677ff 100644
10101--- a/arch/sparc/include/asm/elf_32.h
10102+++ b/arch/sparc/include/asm/elf_32.h
10103@@ -114,6 +114,13 @@ typedef struct {
10104
10105 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10106
10107+#ifdef CONFIG_PAX_ASLR
10108+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10109+
10110+#define PAX_DELTA_MMAP_LEN 16
10111+#define PAX_DELTA_STACK_LEN 16
10112+#endif
10113+
10114 /* This yields a mask that user programs can use to figure out what
10115 instruction set this cpu supports. This can NOT be done in userspace
10116 on Sparc. */
10117diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10118index 370ca1e..d4f4a98 100644
10119--- a/arch/sparc/include/asm/elf_64.h
10120+++ b/arch/sparc/include/asm/elf_64.h
10121@@ -189,6 +189,13 @@ typedef struct {
10122 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10123 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10124
10125+#ifdef CONFIG_PAX_ASLR
10126+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10127+
10128+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10129+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10130+#endif
10131+
10132 extern unsigned long sparc64_elf_hwcap;
10133 #define ELF_HWCAP sparc64_elf_hwcap
10134
10135diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10136index a3890da..f6a408e 100644
10137--- a/arch/sparc/include/asm/pgalloc_32.h
10138+++ b/arch/sparc/include/asm/pgalloc_32.h
10139@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10140 }
10141
10142 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10143+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10144
10145 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10146 unsigned long address)
10147diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10148index 5e31871..13469c6 100644
10149--- a/arch/sparc/include/asm/pgalloc_64.h
10150+++ b/arch/sparc/include/asm/pgalloc_64.h
10151@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10152 }
10153
10154 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10155+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10156
10157 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10158 {
10159@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10160 }
10161
10162 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10163+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10164
10165 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10166 {
10167diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10168index 59ba6f6..4518128 100644
10169--- a/arch/sparc/include/asm/pgtable.h
10170+++ b/arch/sparc/include/asm/pgtable.h
10171@@ -5,4 +5,8 @@
10172 #else
10173 #include <asm/pgtable_32.h>
10174 #endif
10175+
10176+#define ktla_ktva(addr) (addr)
10177+#define ktva_ktla(addr) (addr)
10178+
10179 #endif
10180diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10181index b9b91ae..950b91e 100644
10182--- a/arch/sparc/include/asm/pgtable_32.h
10183+++ b/arch/sparc/include/asm/pgtable_32.h
10184@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10185 #define PAGE_SHARED SRMMU_PAGE_SHARED
10186 #define PAGE_COPY SRMMU_PAGE_COPY
10187 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10188+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10189+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10190+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10191 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10192
10193 /* Top-level page directory - dummy used by init-mm.
10194@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10195
10196 /* xwr */
10197 #define __P000 PAGE_NONE
10198-#define __P001 PAGE_READONLY
10199-#define __P010 PAGE_COPY
10200-#define __P011 PAGE_COPY
10201+#define __P001 PAGE_READONLY_NOEXEC
10202+#define __P010 PAGE_COPY_NOEXEC
10203+#define __P011 PAGE_COPY_NOEXEC
10204 #define __P100 PAGE_READONLY
10205 #define __P101 PAGE_READONLY
10206 #define __P110 PAGE_COPY
10207 #define __P111 PAGE_COPY
10208
10209 #define __S000 PAGE_NONE
10210-#define __S001 PAGE_READONLY
10211-#define __S010 PAGE_SHARED
10212-#define __S011 PAGE_SHARED
10213+#define __S001 PAGE_READONLY_NOEXEC
10214+#define __S010 PAGE_SHARED_NOEXEC
10215+#define __S011 PAGE_SHARED_NOEXEC
10216 #define __S100 PAGE_READONLY
10217 #define __S101 PAGE_READONLY
10218 #define __S110 PAGE_SHARED
10219diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10220index 79da178..c2eede8 100644
10221--- a/arch/sparc/include/asm/pgtsrmmu.h
10222+++ b/arch/sparc/include/asm/pgtsrmmu.h
10223@@ -115,6 +115,11 @@
10224 SRMMU_EXEC | SRMMU_REF)
10225 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10226 SRMMU_EXEC | SRMMU_REF)
10227+
10228+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10229+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10230+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10231+
10232 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10233 SRMMU_DIRTY | SRMMU_REF)
10234
10235diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10236index 29d64b1..4272fe8 100644
10237--- a/arch/sparc/include/asm/setup.h
10238+++ b/arch/sparc/include/asm/setup.h
10239@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10240 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10241
10242 /* init_64.c */
10243-extern atomic_t dcpage_flushes;
10244-extern atomic_t dcpage_flushes_xcall;
10245+extern atomic_unchecked_t dcpage_flushes;
10246+extern atomic_unchecked_t dcpage_flushes_xcall;
10247
10248 extern int sysctl_tsb_ratio;
10249 #endif
10250diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10251index 9689176..63c18ea 100644
10252--- a/arch/sparc/include/asm/spinlock_64.h
10253+++ b/arch/sparc/include/asm/spinlock_64.h
10254@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10255
10256 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10257
10258-static void inline arch_read_lock(arch_rwlock_t *lock)
10259+static inline void arch_read_lock(arch_rwlock_t *lock)
10260 {
10261 unsigned long tmp1, tmp2;
10262
10263 __asm__ __volatile__ (
10264 "1: ldsw [%2], %0\n"
10265 " brlz,pn %0, 2f\n"
10266-"4: add %0, 1, %1\n"
10267+"4: addcc %0, 1, %1\n"
10268+
10269+#ifdef CONFIG_PAX_REFCOUNT
10270+" tvs %%icc, 6\n"
10271+#endif
10272+
10273 " cas [%2], %0, %1\n"
10274 " cmp %0, %1\n"
10275 " bne,pn %%icc, 1b\n"
10276@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10277 " .previous"
10278 : "=&r" (tmp1), "=&r" (tmp2)
10279 : "r" (lock)
10280- : "memory");
10281+ : "memory", "cc");
10282 }
10283
10284-static int inline arch_read_trylock(arch_rwlock_t *lock)
10285+static inline int arch_read_trylock(arch_rwlock_t *lock)
10286 {
10287 int tmp1, tmp2;
10288
10289@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10290 "1: ldsw [%2], %0\n"
10291 " brlz,a,pn %0, 2f\n"
10292 " mov 0, %0\n"
10293-" add %0, 1, %1\n"
10294+" addcc %0, 1, %1\n"
10295+
10296+#ifdef CONFIG_PAX_REFCOUNT
10297+" tvs %%icc, 6\n"
10298+#endif
10299+
10300 " cas [%2], %0, %1\n"
10301 " cmp %0, %1\n"
10302 " bne,pn %%icc, 1b\n"
10303@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10304 return tmp1;
10305 }
10306
10307-static void inline arch_read_unlock(arch_rwlock_t *lock)
10308+static inline void arch_read_unlock(arch_rwlock_t *lock)
10309 {
10310 unsigned long tmp1, tmp2;
10311
10312 __asm__ __volatile__(
10313 "1: lduw [%2], %0\n"
10314-" sub %0, 1, %1\n"
10315+" subcc %0, 1, %1\n"
10316+
10317+#ifdef CONFIG_PAX_REFCOUNT
10318+" tvs %%icc, 6\n"
10319+#endif
10320+
10321 " cas [%2], %0, %1\n"
10322 " cmp %0, %1\n"
10323 " bne,pn %%xcc, 1b\n"
10324@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10325 : "memory");
10326 }
10327
10328-static void inline arch_write_lock(arch_rwlock_t *lock)
10329+static inline void arch_write_lock(arch_rwlock_t *lock)
10330 {
10331 unsigned long mask, tmp1, tmp2;
10332
10333@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10334 : "memory");
10335 }
10336
10337-static void inline arch_write_unlock(arch_rwlock_t *lock)
10338+static inline void arch_write_unlock(arch_rwlock_t *lock)
10339 {
10340 __asm__ __volatile__(
10341 " stw %%g0, [%0]"
10342@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10343 : "memory");
10344 }
10345
10346-static int inline arch_write_trylock(arch_rwlock_t *lock)
10347+static inline int arch_write_trylock(arch_rwlock_t *lock)
10348 {
10349 unsigned long mask, tmp1, tmp2, result;
10350
10351diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10352index 025c984..a216504 100644
10353--- a/arch/sparc/include/asm/thread_info_32.h
10354+++ b/arch/sparc/include/asm/thread_info_32.h
10355@@ -49,6 +49,8 @@ struct thread_info {
10356 unsigned long w_saved;
10357
10358 struct restart_block restart_block;
10359+
10360+ unsigned long lowest_stack;
10361 };
10362
10363 /*
10364diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10365index 798f027..b009941 100644
10366--- a/arch/sparc/include/asm/thread_info_64.h
10367+++ b/arch/sparc/include/asm/thread_info_64.h
10368@@ -63,6 +63,8 @@ struct thread_info {
10369 struct pt_regs *kern_una_regs;
10370 unsigned int kern_una_insn;
10371
10372+ unsigned long lowest_stack;
10373+
10374 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10375 __attribute__ ((aligned(64)));
10376 };
10377@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10378 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10379 /* flag bit 4 is available */
10380 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10381-/* flag bit 6 is available */
10382+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10383 #define TIF_32BIT 7 /* 32-bit binary */
10384 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10385 #define TIF_SECCOMP 9 /* secure computing */
10386 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10387 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10388+
10389 /* NOTE: Thread flags >= 12 should be ones we have no interest
10390 * in using in assembly, else we can't use the mask as
10391 * an immediate value in instructions such as andcc.
10392@@ -215,12 +218,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10393 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10394 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10395 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10396+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10397
10398 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10399 _TIF_DO_NOTIFY_RESUME_MASK | \
10400 _TIF_NEED_RESCHED)
10401 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10402
10403+#define _TIF_WORK_SYSCALL \
10404+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10405+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10406+
10407 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10408
10409 /*
10410diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10411index bd56c28..4b63d83 100644
10412--- a/arch/sparc/include/asm/uaccess.h
10413+++ b/arch/sparc/include/asm/uaccess.h
10414@@ -1,5 +1,6 @@
10415 #ifndef ___ASM_SPARC_UACCESS_H
10416 #define ___ASM_SPARC_UACCESS_H
10417+
10418 #if defined(__sparc__) && defined(__arch64__)
10419 #include <asm/uaccess_64.h>
10420 #else
10421diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10422index 9634d08..f55fe4f 100644
10423--- a/arch/sparc/include/asm/uaccess_32.h
10424+++ b/arch/sparc/include/asm/uaccess_32.h
10425@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10426
10427 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10428 {
10429- if (n && __access_ok((unsigned long) to, n))
10430+ if ((long)n < 0)
10431+ return n;
10432+
10433+ if (n && __access_ok((unsigned long) to, n)) {
10434+ if (!__builtin_constant_p(n))
10435+ check_object_size(from, n, true);
10436 return __copy_user(to, (__force void __user *) from, n);
10437- else
10438+ } else
10439 return n;
10440 }
10441
10442 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10443 {
10444+ if ((long)n < 0)
10445+ return n;
10446+
10447+ if (!__builtin_constant_p(n))
10448+ check_object_size(from, n, true);
10449+
10450 return __copy_user(to, (__force void __user *) from, n);
10451 }
10452
10453 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10454 {
10455- if (n && __access_ok((unsigned long) from, n))
10456+ if ((long)n < 0)
10457+ return n;
10458+
10459+ if (n && __access_ok((unsigned long) from, n)) {
10460+ if (!__builtin_constant_p(n))
10461+ check_object_size(to, n, false);
10462 return __copy_user((__force void __user *) to, from, n);
10463- else
10464+ } else
10465 return n;
10466 }
10467
10468 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10469 {
10470+ if ((long)n < 0)
10471+ return n;
10472+
10473 return __copy_user((__force void __user *) to, from, n);
10474 }
10475
10476diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10477index c990a5e..f17b9c1 100644
10478--- a/arch/sparc/include/asm/uaccess_64.h
10479+++ b/arch/sparc/include/asm/uaccess_64.h
10480@@ -10,6 +10,7 @@
10481 #include <linux/compiler.h>
10482 #include <linux/string.h>
10483 #include <linux/thread_info.h>
10484+#include <linux/kernel.h>
10485 #include <asm/asi.h>
10486 #include <asm/spitfire.h>
10487 #include <asm-generic/uaccess-unaligned.h>
10488@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10489 static inline unsigned long __must_check
10490 copy_from_user(void *to, const void __user *from, unsigned long size)
10491 {
10492- unsigned long ret = ___copy_from_user(to, from, size);
10493+ unsigned long ret;
10494
10495+ if ((long)size < 0 || size > INT_MAX)
10496+ return size;
10497+
10498+ if (!__builtin_constant_p(size))
10499+ check_object_size(to, size, false);
10500+
10501+ ret = ___copy_from_user(to, from, size);
10502 if (unlikely(ret))
10503 ret = copy_from_user_fixup(to, from, size);
10504
10505@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10506 static inline unsigned long __must_check
10507 copy_to_user(void __user *to, const void *from, unsigned long size)
10508 {
10509- unsigned long ret = ___copy_to_user(to, from, size);
10510+ unsigned long ret;
10511
10512+ if ((long)size < 0 || size > INT_MAX)
10513+ return size;
10514+
10515+ if (!__builtin_constant_p(size))
10516+ check_object_size(from, size, true);
10517+
10518+ ret = ___copy_to_user(to, from, size);
10519 if (unlikely(ret))
10520 ret = copy_to_user_fixup(to, from, size);
10521 return ret;
10522diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10523index 7cf9c6e..6206648 100644
10524--- a/arch/sparc/kernel/Makefile
10525+++ b/arch/sparc/kernel/Makefile
10526@@ -4,7 +4,7 @@
10527 #
10528
10529 asflags-y := -ansi
10530-ccflags-y := -Werror
10531+#ccflags-y := -Werror
10532
10533 extra-y := head_$(BITS).o
10534
10535diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10536index 50e7b62..79fae35 100644
10537--- a/arch/sparc/kernel/process_32.c
10538+++ b/arch/sparc/kernel/process_32.c
10539@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10540
10541 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10542 r->psr, r->pc, r->npc, r->y, print_tainted());
10543- printk("PC: <%pS>\n", (void *) r->pc);
10544+ printk("PC: <%pA>\n", (void *) r->pc);
10545 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10546 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10547 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10548 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10549 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10550 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10551- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10552+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10553
10554 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10555 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10556@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10557 rw = (struct reg_window32 *) fp;
10558 pc = rw->ins[7];
10559 printk("[%08lx : ", pc);
10560- printk("%pS ] ", (void *) pc);
10561+ printk("%pA ] ", (void *) pc);
10562 fp = rw->ins[6];
10563 } while (++count < 16);
10564 printk("\n");
10565diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10566index 46a5964..a35c62c 100644
10567--- a/arch/sparc/kernel/process_64.c
10568+++ b/arch/sparc/kernel/process_64.c
10569@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10570 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10571 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10572 if (regs->tstate & TSTATE_PRIV)
10573- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10574+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10575 }
10576
10577 void show_regs(struct pt_regs *regs)
10578@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10579
10580 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10581 regs->tpc, regs->tnpc, regs->y, print_tainted());
10582- printk("TPC: <%pS>\n", (void *) regs->tpc);
10583+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10584 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10585 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10586 regs->u_regs[3]);
10587@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10588 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10589 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10590 regs->u_regs[15]);
10591- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10592+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10593 show_regwindow(regs);
10594 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10595 }
10596@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10597 ((tp && tp->task) ? tp->task->pid : -1));
10598
10599 if (gp->tstate & TSTATE_PRIV) {
10600- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10601+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10602 (void *) gp->tpc,
10603 (void *) gp->o7,
10604 (void *) gp->i7,
10605diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10606index 79cc0d1..ec62734 100644
10607--- a/arch/sparc/kernel/prom_common.c
10608+++ b/arch/sparc/kernel/prom_common.c
10609@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10610
10611 unsigned int prom_early_allocated __initdata;
10612
10613-static struct of_pdt_ops prom_sparc_ops __initdata = {
10614+static struct of_pdt_ops prom_sparc_ops __initconst = {
10615 .nextprop = prom_common_nextprop,
10616 .getproplen = prom_getproplen,
10617 .getproperty = prom_getproperty,
10618diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10619index 9ddc492..27a5619 100644
10620--- a/arch/sparc/kernel/ptrace_64.c
10621+++ b/arch/sparc/kernel/ptrace_64.c
10622@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10623 return ret;
10624 }
10625
10626+#ifdef CONFIG_GRKERNSEC_SETXID
10627+extern void gr_delayed_cred_worker(void);
10628+#endif
10629+
10630 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10631 {
10632 int ret = 0;
10633@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10634 if (test_thread_flag(TIF_NOHZ))
10635 user_exit();
10636
10637+#ifdef CONFIG_GRKERNSEC_SETXID
10638+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10639+ gr_delayed_cred_worker();
10640+#endif
10641+
10642 if (test_thread_flag(TIF_SYSCALL_TRACE))
10643 ret = tracehook_report_syscall_entry(regs);
10644
10645@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10646 if (test_thread_flag(TIF_NOHZ))
10647 user_exit();
10648
10649+#ifdef CONFIG_GRKERNSEC_SETXID
10650+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10651+ gr_delayed_cred_worker();
10652+#endif
10653+
10654 audit_syscall_exit(regs);
10655
10656 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10657diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10658index da6f1a7..e5dea8f 100644
10659--- a/arch/sparc/kernel/smp_64.c
10660+++ b/arch/sparc/kernel/smp_64.c
10661@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10662 return;
10663
10664 #ifdef CONFIG_DEBUG_DCFLUSH
10665- atomic_inc(&dcpage_flushes);
10666+ atomic_inc_unchecked(&dcpage_flushes);
10667 #endif
10668
10669 this_cpu = get_cpu();
10670@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10671 xcall_deliver(data0, __pa(pg_addr),
10672 (u64) pg_addr, cpumask_of(cpu));
10673 #ifdef CONFIG_DEBUG_DCFLUSH
10674- atomic_inc(&dcpage_flushes_xcall);
10675+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10676 #endif
10677 }
10678 }
10679@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10680 preempt_disable();
10681
10682 #ifdef CONFIG_DEBUG_DCFLUSH
10683- atomic_inc(&dcpage_flushes);
10684+ atomic_inc_unchecked(&dcpage_flushes);
10685 #endif
10686 data0 = 0;
10687 pg_addr = page_address(page);
10688@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10689 xcall_deliver(data0, __pa(pg_addr),
10690 (u64) pg_addr, cpu_online_mask);
10691 #ifdef CONFIG_DEBUG_DCFLUSH
10692- atomic_inc(&dcpage_flushes_xcall);
10693+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10694 #endif
10695 }
10696 __local_flush_dcache_page(page);
10697diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10698index 646988d..b88905f 100644
10699--- a/arch/sparc/kernel/sys_sparc_32.c
10700+++ b/arch/sparc/kernel/sys_sparc_32.c
10701@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10702 if (len > TASK_SIZE - PAGE_SIZE)
10703 return -ENOMEM;
10704 if (!addr)
10705- addr = TASK_UNMAPPED_BASE;
10706+ addr = current->mm->mmap_base;
10707
10708 info.flags = 0;
10709 info.length = len;
10710diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10711index 30e7ddb..266a3b0 100644
10712--- a/arch/sparc/kernel/sys_sparc_64.c
10713+++ b/arch/sparc/kernel/sys_sparc_64.c
10714@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10715 struct vm_area_struct * vma;
10716 unsigned long task_size = TASK_SIZE;
10717 int do_color_align;
10718+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10719 struct vm_unmapped_area_info info;
10720
10721 if (flags & MAP_FIXED) {
10722 /* We do not accept a shared mapping if it would violate
10723 * cache aliasing constraints.
10724 */
10725- if ((flags & MAP_SHARED) &&
10726+ if ((filp || (flags & MAP_SHARED)) &&
10727 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10728 return -EINVAL;
10729 return addr;
10730@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10731 if (filp || (flags & MAP_SHARED))
10732 do_color_align = 1;
10733
10734+#ifdef CONFIG_PAX_RANDMMAP
10735+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10736+#endif
10737+
10738 if (addr) {
10739 if (do_color_align)
10740 addr = COLOR_ALIGN(addr, pgoff);
10741@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10742 addr = PAGE_ALIGN(addr);
10743
10744 vma = find_vma(mm, addr);
10745- if (task_size - len >= addr &&
10746- (!vma || addr + len <= vma->vm_start))
10747+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10748 return addr;
10749 }
10750
10751 info.flags = 0;
10752 info.length = len;
10753- info.low_limit = TASK_UNMAPPED_BASE;
10754+ info.low_limit = mm->mmap_base;
10755 info.high_limit = min(task_size, VA_EXCLUDE_START);
10756 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10757 info.align_offset = pgoff << PAGE_SHIFT;
10758+ info.threadstack_offset = offset;
10759 addr = vm_unmapped_area(&info);
10760
10761 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10762 VM_BUG_ON(addr != -ENOMEM);
10763 info.low_limit = VA_EXCLUDE_END;
10764+
10765+#ifdef CONFIG_PAX_RANDMMAP
10766+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10767+ info.low_limit += mm->delta_mmap;
10768+#endif
10769+
10770 info.high_limit = task_size;
10771 addr = vm_unmapped_area(&info);
10772 }
10773@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10774 unsigned long task_size = STACK_TOP32;
10775 unsigned long addr = addr0;
10776 int do_color_align;
10777+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10778 struct vm_unmapped_area_info info;
10779
10780 /* This should only ever run for 32-bit processes. */
10781@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10782 /* We do not accept a shared mapping if it would violate
10783 * cache aliasing constraints.
10784 */
10785- if ((flags & MAP_SHARED) &&
10786+ if ((filp || (flags & MAP_SHARED)) &&
10787 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10788 return -EINVAL;
10789 return addr;
10790@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10791 if (filp || (flags & MAP_SHARED))
10792 do_color_align = 1;
10793
10794+#ifdef CONFIG_PAX_RANDMMAP
10795+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10796+#endif
10797+
10798 /* requesting a specific address */
10799 if (addr) {
10800 if (do_color_align)
10801@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10802 addr = PAGE_ALIGN(addr);
10803
10804 vma = find_vma(mm, addr);
10805- if (task_size - len >= addr &&
10806- (!vma || addr + len <= vma->vm_start))
10807+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10808 return addr;
10809 }
10810
10811@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10812 info.high_limit = mm->mmap_base;
10813 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10814 info.align_offset = pgoff << PAGE_SHIFT;
10815+ info.threadstack_offset = offset;
10816 addr = vm_unmapped_area(&info);
10817
10818 /*
10819@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10820 VM_BUG_ON(addr != -ENOMEM);
10821 info.flags = 0;
10822 info.low_limit = TASK_UNMAPPED_BASE;
10823+
10824+#ifdef CONFIG_PAX_RANDMMAP
10825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10826+ info.low_limit += mm->delta_mmap;
10827+#endif
10828+
10829 info.high_limit = STACK_TOP32;
10830 addr = vm_unmapped_area(&info);
10831 }
10832@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10833 EXPORT_SYMBOL(get_fb_unmapped_area);
10834
10835 /* Essentially the same as PowerPC. */
10836-static unsigned long mmap_rnd(void)
10837+static unsigned long mmap_rnd(struct mm_struct *mm)
10838 {
10839 unsigned long rnd = 0UL;
10840
10841+#ifdef CONFIG_PAX_RANDMMAP
10842+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10843+#endif
10844+
10845 if (current->flags & PF_RANDOMIZE) {
10846 unsigned long val = get_random_int();
10847 if (test_thread_flag(TIF_32BIT))
10848@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10849
10850 void arch_pick_mmap_layout(struct mm_struct *mm)
10851 {
10852- unsigned long random_factor = mmap_rnd();
10853+ unsigned long random_factor = mmap_rnd(mm);
10854 unsigned long gap;
10855
10856 /*
10857@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10858 gap == RLIM_INFINITY ||
10859 sysctl_legacy_va_layout) {
10860 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10861+
10862+#ifdef CONFIG_PAX_RANDMMAP
10863+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10864+ mm->mmap_base += mm->delta_mmap;
10865+#endif
10866+
10867 mm->get_unmapped_area = arch_get_unmapped_area;
10868 } else {
10869 /* We know it's 32-bit */
10870@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10871 gap = (task_size / 6 * 5);
10872
10873 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10874+
10875+#ifdef CONFIG_PAX_RANDMMAP
10876+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10877+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10878+#endif
10879+
10880 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10881 }
10882 }
10883diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10884index bb00089..e0ea580 100644
10885--- a/arch/sparc/kernel/syscalls.S
10886+++ b/arch/sparc/kernel/syscalls.S
10887@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10888 #endif
10889 .align 32
10890 1: ldx [%g6 + TI_FLAGS], %l5
10891- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10892+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10893 be,pt %icc, rtrap
10894 nop
10895 call syscall_trace_leave
10896@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10897
10898 srl %i3, 0, %o3 ! IEU0
10899 srl %i2, 0, %o2 ! IEU0 Group
10900- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10901+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10902 bne,pn %icc, linux_syscall_trace32 ! CTI
10903 mov %i0, %l5 ! IEU1
10904 5: call %l7 ! CTI Group brk forced
10905@@ -218,7 +218,7 @@ linux_sparc_syscall:
10906
10907 mov %i3, %o3 ! IEU1
10908 mov %i4, %o4 ! IEU0 Group
10909- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10910+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10911 bne,pn %icc, linux_syscall_trace ! CTI Group
10912 mov %i0, %l5 ! IEU0
10913 2: call %l7 ! CTI Group brk forced
10914@@ -233,7 +233,7 @@ ret_sys_call:
10915
10916 cmp %o0, -ERESTART_RESTARTBLOCK
10917 bgeu,pn %xcc, 1f
10918- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10919+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10920 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10921
10922 2:
10923diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10924index 6fd386c5..6907d81 100644
10925--- a/arch/sparc/kernel/traps_32.c
10926+++ b/arch/sparc/kernel/traps_32.c
10927@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10928 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10929 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10930
10931+extern void gr_handle_kernel_exploit(void);
10932+
10933 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10934 {
10935 static int die_counter;
10936@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10937 count++ < 30 &&
10938 (((unsigned long) rw) >= PAGE_OFFSET) &&
10939 !(((unsigned long) rw) & 0x7)) {
10940- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10941+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10942 (void *) rw->ins[7]);
10943 rw = (struct reg_window32 *)rw->ins[6];
10944 }
10945 }
10946 printk("Instruction DUMP:");
10947 instruction_dump ((unsigned long *) regs->pc);
10948- if(regs->psr & PSR_PS)
10949+ if(regs->psr & PSR_PS) {
10950+ gr_handle_kernel_exploit();
10951 do_exit(SIGKILL);
10952+ }
10953 do_exit(SIGSEGV);
10954 }
10955
10956diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10957index 981a769..d906eda 100644
10958--- a/arch/sparc/kernel/traps_64.c
10959+++ b/arch/sparc/kernel/traps_64.c
10960@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10961 i + 1,
10962 p->trapstack[i].tstate, p->trapstack[i].tpc,
10963 p->trapstack[i].tnpc, p->trapstack[i].tt);
10964- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10965+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10966 }
10967 }
10968
10969@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10970
10971 lvl -= 0x100;
10972 if (regs->tstate & TSTATE_PRIV) {
10973+
10974+#ifdef CONFIG_PAX_REFCOUNT
10975+ if (lvl == 6)
10976+ pax_report_refcount_overflow(regs);
10977+#endif
10978+
10979 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10980 die_if_kernel(buffer, regs);
10981 }
10982@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10983 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10984 {
10985 char buffer[32];
10986-
10987+
10988 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10989 0, lvl, SIGTRAP) == NOTIFY_STOP)
10990 return;
10991
10992+#ifdef CONFIG_PAX_REFCOUNT
10993+ if (lvl == 6)
10994+ pax_report_refcount_overflow(regs);
10995+#endif
10996+
10997 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10998
10999 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11000@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11001 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11002 printk("%s" "ERROR(%d): ",
11003 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11004- printk("TPC<%pS>\n", (void *) regs->tpc);
11005+ printk("TPC<%pA>\n", (void *) regs->tpc);
11006 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11007 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11008 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11009@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11010 smp_processor_id(),
11011 (type & 0x1) ? 'I' : 'D',
11012 regs->tpc);
11013- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11014+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11015 panic("Irrecoverable Cheetah+ parity error.");
11016 }
11017
11018@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11019 smp_processor_id(),
11020 (type & 0x1) ? 'I' : 'D',
11021 regs->tpc);
11022- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11023+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11024 }
11025
11026 struct sun4v_error_entry {
11027@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11028 /*0x38*/u64 reserved_5;
11029 };
11030
11031-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11032-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11033+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11034+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11035
11036 static const char *sun4v_err_type_to_str(u8 type)
11037 {
11038@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11039 }
11040
11041 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11042- int cpu, const char *pfx, atomic_t *ocnt)
11043+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11044 {
11045 u64 *raw_ptr = (u64 *) ent;
11046 u32 attrs;
11047@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11048
11049 show_regs(regs);
11050
11051- if ((cnt = atomic_read(ocnt)) != 0) {
11052- atomic_set(ocnt, 0);
11053+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11054+ atomic_set_unchecked(ocnt, 0);
11055 wmb();
11056 printk("%s: Queue overflowed %d times.\n",
11057 pfx, cnt);
11058@@ -2048,7 +2059,7 @@ out:
11059 */
11060 void sun4v_resum_overflow(struct pt_regs *regs)
11061 {
11062- atomic_inc(&sun4v_resum_oflow_cnt);
11063+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11064 }
11065
11066 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11067@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11068 /* XXX Actually even this can make not that much sense. Perhaps
11069 * XXX we should just pull the plug and panic directly from here?
11070 */
11071- atomic_inc(&sun4v_nonresum_oflow_cnt);
11072+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11073 }
11074
11075 static void sun4v_tlb_error(struct pt_regs *regs)
11076@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11077
11078 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11079 regs->tpc, tl);
11080- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11081+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11082 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11083- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11084+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11085 (void *) regs->u_regs[UREG_I7]);
11086 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11087 "pte[%lx] error[%lx]\n",
11088@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11089
11090 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11091 regs->tpc, tl);
11092- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11093+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11094 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11095- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11096+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11097 (void *) regs->u_regs[UREG_I7]);
11098 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11099 "pte[%lx] error[%lx]\n",
11100@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11101 fp = (unsigned long)sf->fp + STACK_BIAS;
11102 }
11103
11104- printk(" [%016lx] %pS\n", pc, (void *) pc);
11105+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11106 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11107 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11108 int index = tsk->curr_ret_stack;
11109 if (tsk->ret_stack && index >= graph) {
11110 pc = tsk->ret_stack[index - graph].ret;
11111- printk(" [%016lx] %pS\n", pc, (void *) pc);
11112+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11113 graph++;
11114 }
11115 }
11116@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11117 return (struct reg_window *) (fp + STACK_BIAS);
11118 }
11119
11120+extern void gr_handle_kernel_exploit(void);
11121+
11122 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11123 {
11124 static int die_counter;
11125@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11126 while (rw &&
11127 count++ < 30 &&
11128 kstack_valid(tp, (unsigned long) rw)) {
11129- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11130+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11131 (void *) rw->ins[7]);
11132
11133 rw = kernel_stack_up(rw);
11134@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11135 }
11136 user_instruction_dump ((unsigned int __user *) regs->tpc);
11137 }
11138- if (regs->tstate & TSTATE_PRIV)
11139+ if (regs->tstate & TSTATE_PRIV) {
11140+ gr_handle_kernel_exploit();
11141 do_exit(SIGKILL);
11142+ }
11143 do_exit(SIGSEGV);
11144 }
11145 EXPORT_SYMBOL(die_if_kernel);
11146diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11147index 62098a8..547ab2c 100644
11148--- a/arch/sparc/kernel/unaligned_64.c
11149+++ b/arch/sparc/kernel/unaligned_64.c
11150@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11151 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11152
11153 if (__ratelimit(&ratelimit)) {
11154- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11155+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11156 regs->tpc, (void *) regs->tpc);
11157 }
11158 }
11159diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11160index 3269b02..64f5231 100644
11161--- a/arch/sparc/lib/Makefile
11162+++ b/arch/sparc/lib/Makefile
11163@@ -2,7 +2,7 @@
11164 #
11165
11166 asflags-y := -ansi -DST_DIV0=0x02
11167-ccflags-y := -Werror
11168+#ccflags-y := -Werror
11169
11170 lib-$(CONFIG_SPARC32) += ashrdi3.o
11171 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11172diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11173index 05dac43..76f8ed4 100644
11174--- a/arch/sparc/lib/atomic_64.S
11175+++ b/arch/sparc/lib/atomic_64.S
11176@@ -15,11 +15,22 @@
11177 * a value and does the barriers.
11178 */
11179
11180-#define ATOMIC_OP(op) \
11181-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11182+#ifdef CONFIG_PAX_REFCOUNT
11183+#define __REFCOUNT_OP(op) op##cc
11184+#define __OVERFLOW_IOP tvs %icc, 6;
11185+#define __OVERFLOW_XOP tvs %xcc, 6;
11186+#else
11187+#define __REFCOUNT_OP(op) op
11188+#define __OVERFLOW_IOP
11189+#define __OVERFLOW_XOP
11190+#endif
11191+
11192+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11193+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11194 BACKOFF_SETUP(%o2); \
11195 1: lduw [%o1], %g1; \
11196- op %g1, %o0, %g7; \
11197+ asm_op %g1, %o0, %g7; \
11198+ post_op \
11199 cas [%o1], %g1, %g7; \
11200 cmp %g1, %g7; \
11201 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11202@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11203 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11204 ENDPROC(atomic_##op); \
11205
11206-#define ATOMIC_OP_RETURN(op) \
11207-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11208+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11209+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11210+
11211+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11212+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11213 BACKOFF_SETUP(%o2); \
11214 1: lduw [%o1], %g1; \
11215- op %g1, %o0, %g7; \
11216+ asm_op %g1, %o0, %g7; \
11217+ post_op \
11218 cas [%o1], %g1, %g7; \
11219 cmp %g1, %g7; \
11220 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11221@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11222 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11223 ENDPROC(atomic_##op##_return);
11224
11225+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11226+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11227+
11228 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11229
11230 ATOMIC_OPS(add)
11231@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11232
11233 #undef ATOMIC_OPS
11234 #undef ATOMIC_OP_RETURN
11235+#undef __ATOMIC_OP_RETURN
11236 #undef ATOMIC_OP
11237+#undef __ATOMIC_OP
11238
11239-#define ATOMIC64_OP(op) \
11240-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11241+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11242+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11243 BACKOFF_SETUP(%o2); \
11244 1: ldx [%o1], %g1; \
11245- op %g1, %o0, %g7; \
11246+ asm_op %g1, %o0, %g7; \
11247+ post_op \
11248 casx [%o1], %g1, %g7; \
11249 cmp %g1, %g7; \
11250 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11251@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11252 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11253 ENDPROC(atomic64_##op); \
11254
11255-#define ATOMIC64_OP_RETURN(op) \
11256-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11257+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11258+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11259+
11260+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11261+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11262 BACKOFF_SETUP(%o2); \
11263 1: ldx [%o1], %g1; \
11264- op %g1, %o0, %g7; \
11265+ asm_op %g1, %o0, %g7; \
11266+ post_op \
11267 casx [%o1], %g1, %g7; \
11268 cmp %g1, %g7; \
11269 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11270@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11271 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11272 ENDPROC(atomic64_##op##_return);
11273
11274+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11275+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11276+
11277 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11278
11279 ATOMIC64_OPS(add)
11280@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11281
11282 #undef ATOMIC64_OPS
11283 #undef ATOMIC64_OP_RETURN
11284+#undef __ATOMIC64_OP_RETURN
11285 #undef ATOMIC64_OP
11286+#undef __ATOMIC64_OP
11287+#undef __OVERFLOW_XOP
11288+#undef __OVERFLOW_IOP
11289+#undef __REFCOUNT_OP
11290
11291 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11292 BACKOFF_SETUP(%o2)
11293diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11294index 1d649a9..fbc5bfc 100644
11295--- a/arch/sparc/lib/ksyms.c
11296+++ b/arch/sparc/lib/ksyms.c
11297@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11298 /* Atomic counter implementation. */
11299 #define ATOMIC_OP(op) \
11300 EXPORT_SYMBOL(atomic_##op); \
11301-EXPORT_SYMBOL(atomic64_##op);
11302+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11303+EXPORT_SYMBOL(atomic64_##op); \
11304+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11305
11306 #define ATOMIC_OP_RETURN(op) \
11307 EXPORT_SYMBOL(atomic_##op##_return); \
11308@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11309 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11310
11311 ATOMIC_OPS(add)
11312+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11313+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11314 ATOMIC_OPS(sub)
11315
11316 #undef ATOMIC_OPS
11317diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11318index 30c3ecc..736f015 100644
11319--- a/arch/sparc/mm/Makefile
11320+++ b/arch/sparc/mm/Makefile
11321@@ -2,7 +2,7 @@
11322 #
11323
11324 asflags-y := -ansi
11325-ccflags-y := -Werror
11326+#ccflags-y := -Werror
11327
11328 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11329 obj-y += fault_$(BITS).o
11330diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11331index 70d8171..274c6c0 100644
11332--- a/arch/sparc/mm/fault_32.c
11333+++ b/arch/sparc/mm/fault_32.c
11334@@ -21,6 +21,9 @@
11335 #include <linux/perf_event.h>
11336 #include <linux/interrupt.h>
11337 #include <linux/kdebug.h>
11338+#include <linux/slab.h>
11339+#include <linux/pagemap.h>
11340+#include <linux/compiler.h>
11341
11342 #include <asm/page.h>
11343 #include <asm/pgtable.h>
11344@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11345 return safe_compute_effective_address(regs, insn);
11346 }
11347
11348+#ifdef CONFIG_PAX_PAGEEXEC
11349+#ifdef CONFIG_PAX_DLRESOLVE
11350+static void pax_emuplt_close(struct vm_area_struct *vma)
11351+{
11352+ vma->vm_mm->call_dl_resolve = 0UL;
11353+}
11354+
11355+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11356+{
11357+ unsigned int *kaddr;
11358+
11359+ vmf->page = alloc_page(GFP_HIGHUSER);
11360+ if (!vmf->page)
11361+ return VM_FAULT_OOM;
11362+
11363+ kaddr = kmap(vmf->page);
11364+ memset(kaddr, 0, PAGE_SIZE);
11365+ kaddr[0] = 0x9DE3BFA8U; /* save */
11366+ flush_dcache_page(vmf->page);
11367+ kunmap(vmf->page);
11368+ return VM_FAULT_MAJOR;
11369+}
11370+
11371+static const struct vm_operations_struct pax_vm_ops = {
11372+ .close = pax_emuplt_close,
11373+ .fault = pax_emuplt_fault
11374+};
11375+
11376+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11377+{
11378+ int ret;
11379+
11380+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11381+ vma->vm_mm = current->mm;
11382+ vma->vm_start = addr;
11383+ vma->vm_end = addr + PAGE_SIZE;
11384+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11385+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11386+ vma->vm_ops = &pax_vm_ops;
11387+
11388+ ret = insert_vm_struct(current->mm, vma);
11389+ if (ret)
11390+ return ret;
11391+
11392+ ++current->mm->total_vm;
11393+ return 0;
11394+}
11395+#endif
11396+
11397+/*
11398+ * PaX: decide what to do with offenders (regs->pc = fault address)
11399+ *
11400+ * returns 1 when task should be killed
11401+ * 2 when patched PLT trampoline was detected
11402+ * 3 when unpatched PLT trampoline was detected
11403+ */
11404+static int pax_handle_fetch_fault(struct pt_regs *regs)
11405+{
11406+
11407+#ifdef CONFIG_PAX_EMUPLT
11408+ int err;
11409+
11410+ do { /* PaX: patched PLT emulation #1 */
11411+ unsigned int sethi1, sethi2, jmpl;
11412+
11413+ err = get_user(sethi1, (unsigned int *)regs->pc);
11414+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11415+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11416+
11417+ if (err)
11418+ break;
11419+
11420+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11421+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11422+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11423+ {
11424+ unsigned int addr;
11425+
11426+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11427+ addr = regs->u_regs[UREG_G1];
11428+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11429+ regs->pc = addr;
11430+ regs->npc = addr+4;
11431+ return 2;
11432+ }
11433+ } while (0);
11434+
11435+ do { /* PaX: patched PLT emulation #2 */
11436+ unsigned int ba;
11437+
11438+ err = get_user(ba, (unsigned int *)regs->pc);
11439+
11440+ if (err)
11441+ break;
11442+
11443+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11444+ unsigned int addr;
11445+
11446+ if ((ba & 0xFFC00000U) == 0x30800000U)
11447+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11448+ else
11449+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11450+ regs->pc = addr;
11451+ regs->npc = addr+4;
11452+ return 2;
11453+ }
11454+ } while (0);
11455+
11456+ do { /* PaX: patched PLT emulation #3 */
11457+ unsigned int sethi, bajmpl, nop;
11458+
11459+ err = get_user(sethi, (unsigned int *)regs->pc);
11460+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11461+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11462+
11463+ if (err)
11464+ break;
11465+
11466+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11467+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11468+ nop == 0x01000000U)
11469+ {
11470+ unsigned int addr;
11471+
11472+ addr = (sethi & 0x003FFFFFU) << 10;
11473+ regs->u_regs[UREG_G1] = addr;
11474+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11475+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11476+ else
11477+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11478+ regs->pc = addr;
11479+ regs->npc = addr+4;
11480+ return 2;
11481+ }
11482+ } while (0);
11483+
11484+ do { /* PaX: unpatched PLT emulation step 1 */
11485+ unsigned int sethi, ba, nop;
11486+
11487+ err = get_user(sethi, (unsigned int *)regs->pc);
11488+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11489+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11490+
11491+ if (err)
11492+ break;
11493+
11494+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11495+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11496+ nop == 0x01000000U)
11497+ {
11498+ unsigned int addr, save, call;
11499+
11500+ if ((ba & 0xFFC00000U) == 0x30800000U)
11501+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11502+ else
11503+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11504+
11505+ err = get_user(save, (unsigned int *)addr);
11506+ err |= get_user(call, (unsigned int *)(addr+4));
11507+ err |= get_user(nop, (unsigned int *)(addr+8));
11508+ if (err)
11509+ break;
11510+
11511+#ifdef CONFIG_PAX_DLRESOLVE
11512+ if (save == 0x9DE3BFA8U &&
11513+ (call & 0xC0000000U) == 0x40000000U &&
11514+ nop == 0x01000000U)
11515+ {
11516+ struct vm_area_struct *vma;
11517+ unsigned long call_dl_resolve;
11518+
11519+ down_read(&current->mm->mmap_sem);
11520+ call_dl_resolve = current->mm->call_dl_resolve;
11521+ up_read(&current->mm->mmap_sem);
11522+ if (likely(call_dl_resolve))
11523+ goto emulate;
11524+
11525+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11526+
11527+ down_write(&current->mm->mmap_sem);
11528+ if (current->mm->call_dl_resolve) {
11529+ call_dl_resolve = current->mm->call_dl_resolve;
11530+ up_write(&current->mm->mmap_sem);
11531+ if (vma)
11532+ kmem_cache_free(vm_area_cachep, vma);
11533+ goto emulate;
11534+ }
11535+
11536+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11537+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11538+ up_write(&current->mm->mmap_sem);
11539+ if (vma)
11540+ kmem_cache_free(vm_area_cachep, vma);
11541+ return 1;
11542+ }
11543+
11544+ if (pax_insert_vma(vma, call_dl_resolve)) {
11545+ up_write(&current->mm->mmap_sem);
11546+ kmem_cache_free(vm_area_cachep, vma);
11547+ return 1;
11548+ }
11549+
11550+ current->mm->call_dl_resolve = call_dl_resolve;
11551+ up_write(&current->mm->mmap_sem);
11552+
11553+emulate:
11554+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11555+ regs->pc = call_dl_resolve;
11556+ regs->npc = addr+4;
11557+ return 3;
11558+ }
11559+#endif
11560+
11561+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11562+ if ((save & 0xFFC00000U) == 0x05000000U &&
11563+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11564+ nop == 0x01000000U)
11565+ {
11566+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11567+ regs->u_regs[UREG_G2] = addr + 4;
11568+ addr = (save & 0x003FFFFFU) << 10;
11569+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11570+ regs->pc = addr;
11571+ regs->npc = addr+4;
11572+ return 3;
11573+ }
11574+ }
11575+ } while (0);
11576+
11577+ do { /* PaX: unpatched PLT emulation step 2 */
11578+ unsigned int save, call, nop;
11579+
11580+ err = get_user(save, (unsigned int *)(regs->pc-4));
11581+ err |= get_user(call, (unsigned int *)regs->pc);
11582+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11583+ if (err)
11584+ break;
11585+
11586+ if (save == 0x9DE3BFA8U &&
11587+ (call & 0xC0000000U) == 0x40000000U &&
11588+ nop == 0x01000000U)
11589+ {
11590+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11591+
11592+ regs->u_regs[UREG_RETPC] = regs->pc;
11593+ regs->pc = dl_resolve;
11594+ regs->npc = dl_resolve+4;
11595+ return 3;
11596+ }
11597+ } while (0);
11598+#endif
11599+
11600+ return 1;
11601+}
11602+
11603+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11604+{
11605+ unsigned long i;
11606+
11607+ printk(KERN_ERR "PAX: bytes at PC: ");
11608+ for (i = 0; i < 8; i++) {
11609+ unsigned int c;
11610+ if (get_user(c, (unsigned int *)pc+i))
11611+ printk(KERN_CONT "???????? ");
11612+ else
11613+ printk(KERN_CONT "%08x ", c);
11614+ }
11615+ printk("\n");
11616+}
11617+#endif
11618+
11619 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11620 int text_fault)
11621 {
11622@@ -226,6 +500,24 @@ good_area:
11623 if (!(vma->vm_flags & VM_WRITE))
11624 goto bad_area;
11625 } else {
11626+
11627+#ifdef CONFIG_PAX_PAGEEXEC
11628+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11629+ up_read(&mm->mmap_sem);
11630+ switch (pax_handle_fetch_fault(regs)) {
11631+
11632+#ifdef CONFIG_PAX_EMUPLT
11633+ case 2:
11634+ case 3:
11635+ return;
11636+#endif
11637+
11638+ }
11639+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11640+ do_group_exit(SIGKILL);
11641+ }
11642+#endif
11643+
11644 /* Allow reads even for write-only mappings */
11645 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11646 goto bad_area;
11647diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11648index 4798232..f76e3aa 100644
11649--- a/arch/sparc/mm/fault_64.c
11650+++ b/arch/sparc/mm/fault_64.c
11651@@ -22,6 +22,9 @@
11652 #include <linux/kdebug.h>
11653 #include <linux/percpu.h>
11654 #include <linux/context_tracking.h>
11655+#include <linux/slab.h>
11656+#include <linux/pagemap.h>
11657+#include <linux/compiler.h>
11658
11659 #include <asm/page.h>
11660 #include <asm/pgtable.h>
11661@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11662 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11663 regs->tpc);
11664 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11665- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11666+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11667 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11668 dump_stack();
11669 unhandled_fault(regs->tpc, current, regs);
11670@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11671 show_regs(regs);
11672 }
11673
11674+#ifdef CONFIG_PAX_PAGEEXEC
11675+#ifdef CONFIG_PAX_DLRESOLVE
11676+static void pax_emuplt_close(struct vm_area_struct *vma)
11677+{
11678+ vma->vm_mm->call_dl_resolve = 0UL;
11679+}
11680+
11681+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11682+{
11683+ unsigned int *kaddr;
11684+
11685+ vmf->page = alloc_page(GFP_HIGHUSER);
11686+ if (!vmf->page)
11687+ return VM_FAULT_OOM;
11688+
11689+ kaddr = kmap(vmf->page);
11690+ memset(kaddr, 0, PAGE_SIZE);
11691+ kaddr[0] = 0x9DE3BFA8U; /* save */
11692+ flush_dcache_page(vmf->page);
11693+ kunmap(vmf->page);
11694+ return VM_FAULT_MAJOR;
11695+}
11696+
11697+static const struct vm_operations_struct pax_vm_ops = {
11698+ .close = pax_emuplt_close,
11699+ .fault = pax_emuplt_fault
11700+};
11701+
11702+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11703+{
11704+ int ret;
11705+
11706+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11707+ vma->vm_mm = current->mm;
11708+ vma->vm_start = addr;
11709+ vma->vm_end = addr + PAGE_SIZE;
11710+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11711+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11712+ vma->vm_ops = &pax_vm_ops;
11713+
11714+ ret = insert_vm_struct(current->mm, vma);
11715+ if (ret)
11716+ return ret;
11717+
11718+ ++current->mm->total_vm;
11719+ return 0;
11720+}
11721+#endif
11722+
11723+/*
11724+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11725+ *
11726+ * returns 1 when task should be killed
11727+ * 2 when patched PLT trampoline was detected
11728+ * 3 when unpatched PLT trampoline was detected
11729+ */
11730+static int pax_handle_fetch_fault(struct pt_regs *regs)
11731+{
11732+
11733+#ifdef CONFIG_PAX_EMUPLT
11734+ int err;
11735+
11736+ do { /* PaX: patched PLT emulation #1 */
11737+ unsigned int sethi1, sethi2, jmpl;
11738+
11739+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11740+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11741+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11742+
11743+ if (err)
11744+ break;
11745+
11746+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11747+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11748+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11749+ {
11750+ unsigned long addr;
11751+
11752+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11753+ addr = regs->u_regs[UREG_G1];
11754+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11755+
11756+ if (test_thread_flag(TIF_32BIT))
11757+ addr &= 0xFFFFFFFFUL;
11758+
11759+ regs->tpc = addr;
11760+ regs->tnpc = addr+4;
11761+ return 2;
11762+ }
11763+ } while (0);
11764+
11765+ do { /* PaX: patched PLT emulation #2 */
11766+ unsigned int ba;
11767+
11768+ err = get_user(ba, (unsigned int *)regs->tpc);
11769+
11770+ if (err)
11771+ break;
11772+
11773+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11774+ unsigned long addr;
11775+
11776+ if ((ba & 0xFFC00000U) == 0x30800000U)
11777+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11778+ else
11779+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11780+
11781+ if (test_thread_flag(TIF_32BIT))
11782+ addr &= 0xFFFFFFFFUL;
11783+
11784+ regs->tpc = addr;
11785+ regs->tnpc = addr+4;
11786+ return 2;
11787+ }
11788+ } while (0);
11789+
11790+ do { /* PaX: patched PLT emulation #3 */
11791+ unsigned int sethi, bajmpl, nop;
11792+
11793+ err = get_user(sethi, (unsigned int *)regs->tpc);
11794+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11795+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11796+
11797+ if (err)
11798+ break;
11799+
11800+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11801+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11802+ nop == 0x01000000U)
11803+ {
11804+ unsigned long addr;
11805+
11806+ addr = (sethi & 0x003FFFFFU) << 10;
11807+ regs->u_regs[UREG_G1] = addr;
11808+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11809+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11810+ else
11811+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11812+
11813+ if (test_thread_flag(TIF_32BIT))
11814+ addr &= 0xFFFFFFFFUL;
11815+
11816+ regs->tpc = addr;
11817+ regs->tnpc = addr+4;
11818+ return 2;
11819+ }
11820+ } while (0);
11821+
11822+ do { /* PaX: patched PLT emulation #4 */
11823+ unsigned int sethi, mov1, call, mov2;
11824+
11825+ err = get_user(sethi, (unsigned int *)regs->tpc);
11826+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11827+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11828+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11829+
11830+ if (err)
11831+ break;
11832+
11833+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11834+ mov1 == 0x8210000FU &&
11835+ (call & 0xC0000000U) == 0x40000000U &&
11836+ mov2 == 0x9E100001U)
11837+ {
11838+ unsigned long addr;
11839+
11840+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11841+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11842+
11843+ if (test_thread_flag(TIF_32BIT))
11844+ addr &= 0xFFFFFFFFUL;
11845+
11846+ regs->tpc = addr;
11847+ regs->tnpc = addr+4;
11848+ return 2;
11849+ }
11850+ } while (0);
11851+
11852+ do { /* PaX: patched PLT emulation #5 */
11853+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11854+
11855+ err = get_user(sethi, (unsigned int *)regs->tpc);
11856+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11857+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11858+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11859+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11860+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11861+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11862+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11863+
11864+ if (err)
11865+ break;
11866+
11867+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11868+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11869+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11870+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11871+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11872+ sllx == 0x83287020U &&
11873+ jmpl == 0x81C04005U &&
11874+ nop == 0x01000000U)
11875+ {
11876+ unsigned long addr;
11877+
11878+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11879+ regs->u_regs[UREG_G1] <<= 32;
11880+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11881+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11882+ regs->tpc = addr;
11883+ regs->tnpc = addr+4;
11884+ return 2;
11885+ }
11886+ } while (0);
11887+
11888+ do { /* PaX: patched PLT emulation #6 */
11889+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11890+
11891+ err = get_user(sethi, (unsigned int *)regs->tpc);
11892+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11893+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11894+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11895+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11896+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11897+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11898+
11899+ if (err)
11900+ break;
11901+
11902+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11903+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11904+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11905+ sllx == 0x83287020U &&
11906+ (or & 0xFFFFE000U) == 0x8A116000U &&
11907+ jmpl == 0x81C04005U &&
11908+ nop == 0x01000000U)
11909+ {
11910+ unsigned long addr;
11911+
11912+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11913+ regs->u_regs[UREG_G1] <<= 32;
11914+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11915+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11916+ regs->tpc = addr;
11917+ regs->tnpc = addr+4;
11918+ return 2;
11919+ }
11920+ } while (0);
11921+
11922+ do { /* PaX: unpatched PLT emulation step 1 */
11923+ unsigned int sethi, ba, nop;
11924+
11925+ err = get_user(sethi, (unsigned int *)regs->tpc);
11926+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11927+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11928+
11929+ if (err)
11930+ break;
11931+
11932+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11933+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11934+ nop == 0x01000000U)
11935+ {
11936+ unsigned long addr;
11937+ unsigned int save, call;
11938+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11939+
11940+ if ((ba & 0xFFC00000U) == 0x30800000U)
11941+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11942+ else
11943+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11944+
11945+ if (test_thread_flag(TIF_32BIT))
11946+ addr &= 0xFFFFFFFFUL;
11947+
11948+ err = get_user(save, (unsigned int *)addr);
11949+ err |= get_user(call, (unsigned int *)(addr+4));
11950+ err |= get_user(nop, (unsigned int *)(addr+8));
11951+ if (err)
11952+ break;
11953+
11954+#ifdef CONFIG_PAX_DLRESOLVE
11955+ if (save == 0x9DE3BFA8U &&
11956+ (call & 0xC0000000U) == 0x40000000U &&
11957+ nop == 0x01000000U)
11958+ {
11959+ struct vm_area_struct *vma;
11960+ unsigned long call_dl_resolve;
11961+
11962+ down_read(&current->mm->mmap_sem);
11963+ call_dl_resolve = current->mm->call_dl_resolve;
11964+ up_read(&current->mm->mmap_sem);
11965+ if (likely(call_dl_resolve))
11966+ goto emulate;
11967+
11968+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11969+
11970+ down_write(&current->mm->mmap_sem);
11971+ if (current->mm->call_dl_resolve) {
11972+ call_dl_resolve = current->mm->call_dl_resolve;
11973+ up_write(&current->mm->mmap_sem);
11974+ if (vma)
11975+ kmem_cache_free(vm_area_cachep, vma);
11976+ goto emulate;
11977+ }
11978+
11979+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11980+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11981+ up_write(&current->mm->mmap_sem);
11982+ if (vma)
11983+ kmem_cache_free(vm_area_cachep, vma);
11984+ return 1;
11985+ }
11986+
11987+ if (pax_insert_vma(vma, call_dl_resolve)) {
11988+ up_write(&current->mm->mmap_sem);
11989+ kmem_cache_free(vm_area_cachep, vma);
11990+ return 1;
11991+ }
11992+
11993+ current->mm->call_dl_resolve = call_dl_resolve;
11994+ up_write(&current->mm->mmap_sem);
11995+
11996+emulate:
11997+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11998+ regs->tpc = call_dl_resolve;
11999+ regs->tnpc = addr+4;
12000+ return 3;
12001+ }
12002+#endif
12003+
12004+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12005+ if ((save & 0xFFC00000U) == 0x05000000U &&
12006+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12007+ nop == 0x01000000U)
12008+ {
12009+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12010+ regs->u_regs[UREG_G2] = addr + 4;
12011+ addr = (save & 0x003FFFFFU) << 10;
12012+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12013+
12014+ if (test_thread_flag(TIF_32BIT))
12015+ addr &= 0xFFFFFFFFUL;
12016+
12017+ regs->tpc = addr;
12018+ regs->tnpc = addr+4;
12019+ return 3;
12020+ }
12021+
12022+ /* PaX: 64-bit PLT stub */
12023+ err = get_user(sethi1, (unsigned int *)addr);
12024+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12025+ err |= get_user(or1, (unsigned int *)(addr+8));
12026+ err |= get_user(or2, (unsigned int *)(addr+12));
12027+ err |= get_user(sllx, (unsigned int *)(addr+16));
12028+ err |= get_user(add, (unsigned int *)(addr+20));
12029+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12030+ err |= get_user(nop, (unsigned int *)(addr+28));
12031+ if (err)
12032+ break;
12033+
12034+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12035+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12036+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12037+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12038+ sllx == 0x89293020U &&
12039+ add == 0x8A010005U &&
12040+ jmpl == 0x89C14000U &&
12041+ nop == 0x01000000U)
12042+ {
12043+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12044+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12045+ regs->u_regs[UREG_G4] <<= 32;
12046+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12047+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12048+ regs->u_regs[UREG_G4] = addr + 24;
12049+ addr = regs->u_regs[UREG_G5];
12050+ regs->tpc = addr;
12051+ regs->tnpc = addr+4;
12052+ return 3;
12053+ }
12054+ }
12055+ } while (0);
12056+
12057+#ifdef CONFIG_PAX_DLRESOLVE
12058+ do { /* PaX: unpatched PLT emulation step 2 */
12059+ unsigned int save, call, nop;
12060+
12061+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12062+ err |= get_user(call, (unsigned int *)regs->tpc);
12063+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12064+ if (err)
12065+ break;
12066+
12067+ if (save == 0x9DE3BFA8U &&
12068+ (call & 0xC0000000U) == 0x40000000U &&
12069+ nop == 0x01000000U)
12070+ {
12071+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12072+
12073+ if (test_thread_flag(TIF_32BIT))
12074+ dl_resolve &= 0xFFFFFFFFUL;
12075+
12076+ regs->u_regs[UREG_RETPC] = regs->tpc;
12077+ regs->tpc = dl_resolve;
12078+ regs->tnpc = dl_resolve+4;
12079+ return 3;
12080+ }
12081+ } while (0);
12082+#endif
12083+
12084+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12085+ unsigned int sethi, ba, nop;
12086+
12087+ err = get_user(sethi, (unsigned int *)regs->tpc);
12088+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12089+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12090+
12091+ if (err)
12092+ break;
12093+
12094+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12095+ (ba & 0xFFF00000U) == 0x30600000U &&
12096+ nop == 0x01000000U)
12097+ {
12098+ unsigned long addr;
12099+
12100+ addr = (sethi & 0x003FFFFFU) << 10;
12101+ regs->u_regs[UREG_G1] = addr;
12102+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12103+
12104+ if (test_thread_flag(TIF_32BIT))
12105+ addr &= 0xFFFFFFFFUL;
12106+
12107+ regs->tpc = addr;
12108+ regs->tnpc = addr+4;
12109+ return 2;
12110+ }
12111+ } while (0);
12112+
12113+#endif
12114+
12115+ return 1;
12116+}
12117+
12118+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12119+{
12120+ unsigned long i;
12121+
12122+ printk(KERN_ERR "PAX: bytes at PC: ");
12123+ for (i = 0; i < 8; i++) {
12124+ unsigned int c;
12125+ if (get_user(c, (unsigned int *)pc+i))
12126+ printk(KERN_CONT "???????? ");
12127+ else
12128+ printk(KERN_CONT "%08x ", c);
12129+ }
12130+ printk("\n");
12131+}
12132+#endif
12133+
12134 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12135 {
12136 enum ctx_state prev_state = exception_enter();
12137@@ -353,6 +816,29 @@ retry:
12138 if (!vma)
12139 goto bad_area;
12140
12141+#ifdef CONFIG_PAX_PAGEEXEC
12142+ /* PaX: detect ITLB misses on non-exec pages */
12143+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12144+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12145+ {
12146+ if (address != regs->tpc)
12147+ goto good_area;
12148+
12149+ up_read(&mm->mmap_sem);
12150+ switch (pax_handle_fetch_fault(regs)) {
12151+
12152+#ifdef CONFIG_PAX_EMUPLT
12153+ case 2:
12154+ case 3:
12155+ return;
12156+#endif
12157+
12158+ }
12159+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12160+ do_group_exit(SIGKILL);
12161+ }
12162+#endif
12163+
12164 /* Pure DTLB misses do not tell us whether the fault causing
12165 * load/store/atomic was a write or not, it only says that there
12166 * was no match. So in such a case we (carefully) read the
12167diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12168index d329537..2c3746a 100644
12169--- a/arch/sparc/mm/hugetlbpage.c
12170+++ b/arch/sparc/mm/hugetlbpage.c
12171@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12172 unsigned long addr,
12173 unsigned long len,
12174 unsigned long pgoff,
12175- unsigned long flags)
12176+ unsigned long flags,
12177+ unsigned long offset)
12178 {
12179+ struct mm_struct *mm = current->mm;
12180 unsigned long task_size = TASK_SIZE;
12181 struct vm_unmapped_area_info info;
12182
12183@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12184
12185 info.flags = 0;
12186 info.length = len;
12187- info.low_limit = TASK_UNMAPPED_BASE;
12188+ info.low_limit = mm->mmap_base;
12189 info.high_limit = min(task_size, VA_EXCLUDE_START);
12190 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12191 info.align_offset = 0;
12192+ info.threadstack_offset = offset;
12193 addr = vm_unmapped_area(&info);
12194
12195 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12196 VM_BUG_ON(addr != -ENOMEM);
12197 info.low_limit = VA_EXCLUDE_END;
12198+
12199+#ifdef CONFIG_PAX_RANDMMAP
12200+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12201+ info.low_limit += mm->delta_mmap;
12202+#endif
12203+
12204 info.high_limit = task_size;
12205 addr = vm_unmapped_area(&info);
12206 }
12207@@ -55,7 +64,8 @@ static unsigned long
12208 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12209 const unsigned long len,
12210 const unsigned long pgoff,
12211- const unsigned long flags)
12212+ const unsigned long flags,
12213+ const unsigned long offset)
12214 {
12215 struct mm_struct *mm = current->mm;
12216 unsigned long addr = addr0;
12217@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12218 info.high_limit = mm->mmap_base;
12219 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12220 info.align_offset = 0;
12221+ info.threadstack_offset = offset;
12222 addr = vm_unmapped_area(&info);
12223
12224 /*
12225@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12226 VM_BUG_ON(addr != -ENOMEM);
12227 info.flags = 0;
12228 info.low_limit = TASK_UNMAPPED_BASE;
12229+
12230+#ifdef CONFIG_PAX_RANDMMAP
12231+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12232+ info.low_limit += mm->delta_mmap;
12233+#endif
12234+
12235 info.high_limit = STACK_TOP32;
12236 addr = vm_unmapped_area(&info);
12237 }
12238@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12239 struct mm_struct *mm = current->mm;
12240 struct vm_area_struct *vma;
12241 unsigned long task_size = TASK_SIZE;
12242+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12243
12244 if (test_thread_flag(TIF_32BIT))
12245 task_size = STACK_TOP32;
12246@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12247 return addr;
12248 }
12249
12250+#ifdef CONFIG_PAX_RANDMMAP
12251+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12252+#endif
12253+
12254 if (addr) {
12255 addr = ALIGN(addr, HPAGE_SIZE);
12256 vma = find_vma(mm, addr);
12257- if (task_size - len >= addr &&
12258- (!vma || addr + len <= vma->vm_start))
12259+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12260 return addr;
12261 }
12262 if (mm->get_unmapped_area == arch_get_unmapped_area)
12263 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12264- pgoff, flags);
12265+ pgoff, flags, offset);
12266 else
12267 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12268- pgoff, flags);
12269+ pgoff, flags, offset);
12270 }
12271
12272 pte_t *huge_pte_alloc(struct mm_struct *mm,
12273diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12274index 3ea267c..93f0659 100644
12275--- a/arch/sparc/mm/init_64.c
12276+++ b/arch/sparc/mm/init_64.c
12277@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12278 int num_kernel_image_mappings;
12279
12280 #ifdef CONFIG_DEBUG_DCFLUSH
12281-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12282+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12283 #ifdef CONFIG_SMP
12284-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12285+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12286 #endif
12287 #endif
12288
12289@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12290 {
12291 BUG_ON(tlb_type == hypervisor);
12292 #ifdef CONFIG_DEBUG_DCFLUSH
12293- atomic_inc(&dcpage_flushes);
12294+ atomic_inc_unchecked(&dcpage_flushes);
12295 #endif
12296
12297 #ifdef DCACHE_ALIASING_POSSIBLE
12298@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12299
12300 #ifdef CONFIG_DEBUG_DCFLUSH
12301 seq_printf(m, "DCPageFlushes\t: %d\n",
12302- atomic_read(&dcpage_flushes));
12303+ atomic_read_unchecked(&dcpage_flushes));
12304 #ifdef CONFIG_SMP
12305 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12306- atomic_read(&dcpage_flushes_xcall));
12307+ atomic_read_unchecked(&dcpage_flushes_xcall));
12308 #endif /* CONFIG_SMP */
12309 #endif /* CONFIG_DEBUG_DCFLUSH */
12310 }
12311diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12312index 7cca418..53fc030 100644
12313--- a/arch/tile/Kconfig
12314+++ b/arch/tile/Kconfig
12315@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12316
12317 config KEXEC
12318 bool "kexec system call"
12319+ depends on !GRKERNSEC_KMEM
12320 ---help---
12321 kexec is a system call that implements the ability to shutdown your
12322 current kernel, and to start another kernel. It is like a reboot
12323diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12324index 7b11c5f..755a026 100644
12325--- a/arch/tile/include/asm/atomic_64.h
12326+++ b/arch/tile/include/asm/atomic_64.h
12327@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12328
12329 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12330
12331+#define atomic64_read_unchecked(v) atomic64_read(v)
12332+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12333+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12334+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12335+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12336+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12337+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12338+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12339+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12340+
12341 /* Define this to indicate that cmpxchg is an efficient operation. */
12342 #define __HAVE_ARCH_CMPXCHG
12343
12344diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12345index 6160761..00cac88 100644
12346--- a/arch/tile/include/asm/cache.h
12347+++ b/arch/tile/include/asm/cache.h
12348@@ -15,11 +15,12 @@
12349 #ifndef _ASM_TILE_CACHE_H
12350 #define _ASM_TILE_CACHE_H
12351
12352+#include <linux/const.h>
12353 #include <arch/chip.h>
12354
12355 /* bytes per L1 data cache line */
12356 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12357-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12358+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12359
12360 /* bytes per L2 cache line */
12361 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12362diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12363index b6cde32..c0cb736 100644
12364--- a/arch/tile/include/asm/uaccess.h
12365+++ b/arch/tile/include/asm/uaccess.h
12366@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12367 const void __user *from,
12368 unsigned long n)
12369 {
12370- int sz = __compiletime_object_size(to);
12371+ size_t sz = __compiletime_object_size(to);
12372
12373- if (likely(sz == -1 || sz >= n))
12374+ if (likely(sz == (size_t)-1 || sz >= n))
12375 n = _copy_from_user(to, from, n);
12376 else
12377 copy_from_user_overflow();
12378diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12379index 3270e00..a77236e 100644
12380--- a/arch/tile/mm/hugetlbpage.c
12381+++ b/arch/tile/mm/hugetlbpage.c
12382@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12383 info.high_limit = TASK_SIZE;
12384 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12385 info.align_offset = 0;
12386+ info.threadstack_offset = 0;
12387 return vm_unmapped_area(&info);
12388 }
12389
12390@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12391 info.high_limit = current->mm->mmap_base;
12392 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12393 info.align_offset = 0;
12394+ info.threadstack_offset = 0;
12395 addr = vm_unmapped_area(&info);
12396
12397 /*
12398diff --git a/arch/um/Makefile b/arch/um/Makefile
12399index e4b1a96..16162f8 100644
12400--- a/arch/um/Makefile
12401+++ b/arch/um/Makefile
12402@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12403 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12404 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12405
12406+ifdef CONSTIFY_PLUGIN
12407+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12408+endif
12409+
12410 #This will adjust *FLAGS accordingly to the platform.
12411 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12412
12413diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12414index 19e1bdd..3665b77 100644
12415--- a/arch/um/include/asm/cache.h
12416+++ b/arch/um/include/asm/cache.h
12417@@ -1,6 +1,7 @@
12418 #ifndef __UM_CACHE_H
12419 #define __UM_CACHE_H
12420
12421+#include <linux/const.h>
12422
12423 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12424 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12425@@ -12,6 +13,6 @@
12426 # define L1_CACHE_SHIFT 5
12427 #endif
12428
12429-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12430+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12431
12432 #endif
12433diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12434index 2e0a6b1..a64d0f5 100644
12435--- a/arch/um/include/asm/kmap_types.h
12436+++ b/arch/um/include/asm/kmap_types.h
12437@@ -8,6 +8,6 @@
12438
12439 /* No more #include "asm/arch/kmap_types.h" ! */
12440
12441-#define KM_TYPE_NR 14
12442+#define KM_TYPE_NR 15
12443
12444 #endif
12445diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12446index 71c5d13..4c7b9f1 100644
12447--- a/arch/um/include/asm/page.h
12448+++ b/arch/um/include/asm/page.h
12449@@ -14,6 +14,9 @@
12450 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12451 #define PAGE_MASK (~(PAGE_SIZE-1))
12452
12453+#define ktla_ktva(addr) (addr)
12454+#define ktva_ktla(addr) (addr)
12455+
12456 #ifndef __ASSEMBLY__
12457
12458 struct page;
12459diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12460index 0032f92..cd151e0 100644
12461--- a/arch/um/include/asm/pgtable-3level.h
12462+++ b/arch/um/include/asm/pgtable-3level.h
12463@@ -58,6 +58,7 @@
12464 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12465 #define pud_populate(mm, pud, pmd) \
12466 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12467+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12468
12469 #ifdef CONFIG_64BIT
12470 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12471diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12472index f17bca8..48adb87 100644
12473--- a/arch/um/kernel/process.c
12474+++ b/arch/um/kernel/process.c
12475@@ -356,22 +356,6 @@ int singlestepping(void * t)
12476 return 2;
12477 }
12478
12479-/*
12480- * Only x86 and x86_64 have an arch_align_stack().
12481- * All other arches have "#define arch_align_stack(x) (x)"
12482- * in their asm/exec.h
12483- * As this is included in UML from asm-um/system-generic.h,
12484- * we can use it to behave as the subarch does.
12485- */
12486-#ifndef arch_align_stack
12487-unsigned long arch_align_stack(unsigned long sp)
12488-{
12489- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12490- sp -= get_random_int() % 8192;
12491- return sp & ~0xf;
12492-}
12493-#endif
12494-
12495 unsigned long get_wchan(struct task_struct *p)
12496 {
12497 unsigned long stack_page, sp, ip;
12498diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12499index ad8f795..2c7eec6 100644
12500--- a/arch/unicore32/include/asm/cache.h
12501+++ b/arch/unicore32/include/asm/cache.h
12502@@ -12,8 +12,10 @@
12503 #ifndef __UNICORE_CACHE_H__
12504 #define __UNICORE_CACHE_H__
12505
12506-#define L1_CACHE_SHIFT (5)
12507-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12508+#include <linux/const.h>
12509+
12510+#define L1_CACHE_SHIFT 5
12511+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12512
12513 /*
12514 * Memory returned by kmalloc() may be used for DMA, so we must make
12515diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12516index 0dc9d01..98df103 100644
12517--- a/arch/x86/Kconfig
12518+++ b/arch/x86/Kconfig
12519@@ -130,7 +130,7 @@ config X86
12520 select RTC_LIB
12521 select HAVE_DEBUG_STACKOVERFLOW
12522 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12523- select HAVE_CC_STACKPROTECTOR
12524+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12525 select GENERIC_CPU_AUTOPROBE
12526 select HAVE_ARCH_AUDITSYSCALL
12527 select ARCH_SUPPORTS_ATOMIC_RMW
12528@@ -263,7 +263,7 @@ config X86_HT
12529
12530 config X86_32_LAZY_GS
12531 def_bool y
12532- depends on X86_32 && !CC_STACKPROTECTOR
12533+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12534
12535 config ARCH_HWEIGHT_CFLAGS
12536 string
12537@@ -601,6 +601,7 @@ config SCHED_OMIT_FRAME_POINTER
12538
12539 menuconfig HYPERVISOR_GUEST
12540 bool "Linux guest support"
12541+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12542 ---help---
12543 Say Y here to enable options for running Linux under various hyper-
12544 visors. This option enables basic hypervisor detection and platform
12545@@ -978,6 +979,7 @@ config VM86
12546
12547 config X86_16BIT
12548 bool "Enable support for 16-bit segments" if EXPERT
12549+ depends on !GRKERNSEC
12550 default y
12551 ---help---
12552 This option is required by programs like Wine to run 16-bit
12553@@ -1151,6 +1153,7 @@ choice
12554
12555 config NOHIGHMEM
12556 bool "off"
12557+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12558 ---help---
12559 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12560 However, the address space of 32-bit x86 processors is only 4
12561@@ -1187,6 +1190,7 @@ config NOHIGHMEM
12562
12563 config HIGHMEM4G
12564 bool "4GB"
12565+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12566 ---help---
12567 Select this if you have a 32-bit processor and between 1 and 4
12568 gigabytes of physical RAM.
12569@@ -1239,7 +1243,7 @@ config PAGE_OFFSET
12570 hex
12571 default 0xB0000000 if VMSPLIT_3G_OPT
12572 default 0x80000000 if VMSPLIT_2G
12573- default 0x78000000 if VMSPLIT_2G_OPT
12574+ default 0x70000000 if VMSPLIT_2G_OPT
12575 default 0x40000000 if VMSPLIT_1G
12576 default 0xC0000000
12577 depends on X86_32
12578@@ -1680,6 +1684,7 @@ source kernel/Kconfig.hz
12579
12580 config KEXEC
12581 bool "kexec system call"
12582+ depends on !GRKERNSEC_KMEM
12583 ---help---
12584 kexec is a system call that implements the ability to shutdown your
12585 current kernel, and to start another kernel. It is like a reboot
12586@@ -1865,7 +1870,9 @@ config X86_NEED_RELOCS
12587
12588 config PHYSICAL_ALIGN
12589 hex "Alignment value to which kernel should be aligned"
12590- default "0x200000"
12591+ default "0x1000000"
12592+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12593+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12594 range 0x2000 0x1000000 if X86_32
12595 range 0x200000 0x1000000 if X86_64
12596 ---help---
12597@@ -1948,6 +1955,7 @@ config COMPAT_VDSO
12598 def_bool n
12599 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12600 depends on X86_32 || IA32_EMULATION
12601+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12602 ---help---
12603 Certain buggy versions of glibc will crash if they are
12604 presented with a 32-bit vDSO that is not mapped at the address
12605diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12606index 6983314..54ad7e8 100644
12607--- a/arch/x86/Kconfig.cpu
12608+++ b/arch/x86/Kconfig.cpu
12609@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12610
12611 config X86_F00F_BUG
12612 def_bool y
12613- depends on M586MMX || M586TSC || M586 || M486
12614+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12615
12616 config X86_INVD_BUG
12617 def_bool y
12618@@ -327,7 +327,7 @@ config X86_INVD_BUG
12619
12620 config X86_ALIGNMENT_16
12621 def_bool y
12622- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12623+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12624
12625 config X86_INTEL_USERCOPY
12626 def_bool y
12627@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12628 # generates cmov.
12629 config X86_CMOV
12630 def_bool y
12631- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12632+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12633
12634 config X86_MINIMUM_CPU_FAMILY
12635 int
12636diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12637index 61bd2ad..50b625d 100644
12638--- a/arch/x86/Kconfig.debug
12639+++ b/arch/x86/Kconfig.debug
12640@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12641 config DEBUG_RODATA
12642 bool "Write protect kernel read-only data structures"
12643 default y
12644- depends on DEBUG_KERNEL
12645+ depends on DEBUG_KERNEL && BROKEN
12646 ---help---
12647 Mark the kernel read-only data as write-protected in the pagetables,
12648 in order to catch accidental (and incorrect) writes to such const
12649@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12650
12651 config DEBUG_SET_MODULE_RONX
12652 bool "Set loadable kernel module data as NX and text as RO"
12653- depends on MODULES
12654+ depends on MODULES && BROKEN
12655 ---help---
12656 This option helps catch unintended modifications to loadable
12657 kernel module's text and read-only data. It also prevents execution
12658diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12659index 920e616..ac3d4df 100644
12660--- a/arch/x86/Makefile
12661+++ b/arch/x86/Makefile
12662@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12663 # CPU-specific tuning. Anything which can be shared with UML should go here.
12664 include $(srctree)/arch/x86/Makefile_32.cpu
12665 KBUILD_CFLAGS += $(cflags-y)
12666-
12667- # temporary until string.h is fixed
12668- KBUILD_CFLAGS += -ffreestanding
12669 else
12670 BITS := 64
12671 UTS_MACHINE := x86_64
12672@@ -107,6 +104,9 @@ else
12673 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12674 endif
12675
12676+# temporary until string.h is fixed
12677+KBUILD_CFLAGS += -ffreestanding
12678+
12679 # Make sure compiler does not have buggy stack-protector support.
12680 ifdef CONFIG_CC_STACKPROTECTOR
12681 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12682@@ -180,6 +180,7 @@ archheaders:
12683 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12684
12685 archprepare:
12686+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12687 ifeq ($(CONFIG_KEXEC_FILE),y)
12688 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12689 endif
12690@@ -263,3 +264,9 @@ define archhelp
12691 echo ' FDARGS="..." arguments for the booted kernel'
12692 echo ' FDINITRD=file initrd for the booted kernel'
12693 endef
12694+
12695+define OLD_LD
12696+
12697+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12698+*** Please upgrade your binutils to 2.18 or newer
12699+endef
12700diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12701index 3db07f3..9d81d0f 100644
12702--- a/arch/x86/boot/Makefile
12703+++ b/arch/x86/boot/Makefile
12704@@ -56,6 +56,9 @@ clean-files += cpustr.h
12705 # ---------------------------------------------------------------------------
12706
12707 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12708+ifdef CONSTIFY_PLUGIN
12709+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12710+endif
12711 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12712 GCOV_PROFILE := n
12713
12714diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12715index 878e4b9..20537ab 100644
12716--- a/arch/x86/boot/bitops.h
12717+++ b/arch/x86/boot/bitops.h
12718@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12719 u8 v;
12720 const u32 *p = (const u32 *)addr;
12721
12722- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12723+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12724 return v;
12725 }
12726
12727@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12728
12729 static inline void set_bit(int nr, void *addr)
12730 {
12731- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12732+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12733 }
12734
12735 #endif /* BOOT_BITOPS_H */
12736diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12737index bd49ec6..94c7f58 100644
12738--- a/arch/x86/boot/boot.h
12739+++ b/arch/x86/boot/boot.h
12740@@ -84,7 +84,7 @@ static inline void io_delay(void)
12741 static inline u16 ds(void)
12742 {
12743 u16 seg;
12744- asm("movw %%ds,%0" : "=rm" (seg));
12745+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12746 return seg;
12747 }
12748
12749diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12750index 8bd44e8..6b111e9 100644
12751--- a/arch/x86/boot/compressed/Makefile
12752+++ b/arch/x86/boot/compressed/Makefile
12753@@ -28,6 +28,9 @@ KBUILD_CFLAGS += $(cflags-y)
12754 KBUILD_CFLAGS += -mno-mmx -mno-sse
12755 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12756 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12757+ifdef CONSTIFY_PLUGIN
12758+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12759+endif
12760
12761 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12762 GCOV_PROFILE := n
12763diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12764index a53440e..c3dbf1e 100644
12765--- a/arch/x86/boot/compressed/efi_stub_32.S
12766+++ b/arch/x86/boot/compressed/efi_stub_32.S
12767@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12768 * parameter 2, ..., param n. To make things easy, we save the return
12769 * address of efi_call_phys in a global variable.
12770 */
12771- popl %ecx
12772- movl %ecx, saved_return_addr(%edx)
12773- /* get the function pointer into ECX*/
12774- popl %ecx
12775- movl %ecx, efi_rt_function_ptr(%edx)
12776+ popl saved_return_addr(%edx)
12777+ popl efi_rt_function_ptr(%edx)
12778
12779 /*
12780 * 3. Call the physical function.
12781 */
12782- call *%ecx
12783+ call *efi_rt_function_ptr(%edx)
12784
12785 /*
12786 * 4. Balance the stack. And because EAX contain the return value,
12787@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12788 1: popl %edx
12789 subl $1b, %edx
12790
12791- movl efi_rt_function_ptr(%edx), %ecx
12792- pushl %ecx
12793+ pushl efi_rt_function_ptr(%edx)
12794
12795 /*
12796 * 10. Push the saved return address onto the stack and return.
12797 */
12798- movl saved_return_addr(%edx), %ecx
12799- pushl %ecx
12800- ret
12801+ jmpl *saved_return_addr(%edx)
12802 ENDPROC(efi_call_phys)
12803 .previous
12804
12805diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12806index 630384a..278e788 100644
12807--- a/arch/x86/boot/compressed/efi_thunk_64.S
12808+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12809@@ -189,8 +189,8 @@ efi_gdt64:
12810 .long 0 /* Filled out by user */
12811 .word 0
12812 .quad 0x0000000000000000 /* NULL descriptor */
12813- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12814- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12815+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12816+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12817 .quad 0x0080890000000000 /* TS descriptor */
12818 .quad 0x0000000000000000 /* TS continued */
12819 efi_gdt64_end:
12820diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12821index 1d7fbbc..36ecd58 100644
12822--- a/arch/x86/boot/compressed/head_32.S
12823+++ b/arch/x86/boot/compressed/head_32.S
12824@@ -140,10 +140,10 @@ preferred_addr:
12825 addl %eax, %ebx
12826 notl %eax
12827 andl %eax, %ebx
12828- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12829+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12830 jge 1f
12831 #endif
12832- movl $LOAD_PHYSICAL_ADDR, %ebx
12833+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12834 1:
12835
12836 /* Target address to relocate to for decompression */
12837diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12838index 6b1766c..ad465c9 100644
12839--- a/arch/x86/boot/compressed/head_64.S
12840+++ b/arch/x86/boot/compressed/head_64.S
12841@@ -94,10 +94,10 @@ ENTRY(startup_32)
12842 addl %eax, %ebx
12843 notl %eax
12844 andl %eax, %ebx
12845- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12846+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12847 jge 1f
12848 #endif
12849- movl $LOAD_PHYSICAL_ADDR, %ebx
12850+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12851 1:
12852
12853 /* Target address to relocate to for decompression */
12854@@ -322,10 +322,10 @@ preferred_addr:
12855 addq %rax, %rbp
12856 notq %rax
12857 andq %rax, %rbp
12858- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12859+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12860 jge 1f
12861 #endif
12862- movq $LOAD_PHYSICAL_ADDR, %rbp
12863+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12864 1:
12865
12866 /* Target address to relocate to for decompression */
12867@@ -434,8 +434,8 @@ gdt:
12868 .long gdt
12869 .word 0
12870 .quad 0x0000000000000000 /* NULL descriptor */
12871- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12872- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12873+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12874+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12875 .quad 0x0080890000000000 /* TS descriptor */
12876 .quad 0x0000000000000000 /* TS continued */
12877 gdt_end:
12878diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12879index a950864..c710239 100644
12880--- a/arch/x86/boot/compressed/misc.c
12881+++ b/arch/x86/boot/compressed/misc.c
12882@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12883 * Calculate the delta between where vmlinux was linked to load
12884 * and where it was actually loaded.
12885 */
12886- delta = min_addr - LOAD_PHYSICAL_ADDR;
12887+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12888 if (!delta) {
12889 debug_putstr("No relocation needed... ");
12890 return;
12891@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12892 Elf32_Ehdr ehdr;
12893 Elf32_Phdr *phdrs, *phdr;
12894 #endif
12895- void *dest;
12896+ void *dest, *prev;
12897 int i;
12898
12899 memcpy(&ehdr, output, sizeof(ehdr));
12900@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12901 case PT_LOAD:
12902 #ifdef CONFIG_RELOCATABLE
12903 dest = output;
12904- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12905+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12906 #else
12907 dest = (void *)(phdr->p_paddr);
12908 #endif
12909 memcpy(dest,
12910 output + phdr->p_offset,
12911 phdr->p_filesz);
12912+ if (i)
12913+ memset(prev, 0xff, dest - prev);
12914+ prev = dest + phdr->p_filesz;
12915 break;
12916 default: /* Ignore other PT_* */ break;
12917 }
12918@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12919 error("Destination address too large");
12920 #endif
12921 #ifndef CONFIG_RELOCATABLE
12922- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12923+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12924 error("Wrong destination address");
12925 #endif
12926
12927diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12928index 1fd7d57..0f7d096 100644
12929--- a/arch/x86/boot/cpucheck.c
12930+++ b/arch/x86/boot/cpucheck.c
12931@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12932 u32 ecx = MSR_K7_HWCR;
12933 u32 eax, edx;
12934
12935- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12936+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12937 eax &= ~(1 << 15);
12938- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12939+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12940
12941 get_cpuflags(); /* Make sure it really did something */
12942 err = check_cpuflags();
12943@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12944 u32 ecx = MSR_VIA_FCR;
12945 u32 eax, edx;
12946
12947- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12948+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12949 eax |= (1<<1)|(1<<7);
12950- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12951+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12952
12953 set_bit(X86_FEATURE_CX8, cpu.flags);
12954 err = check_cpuflags();
12955@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12956 u32 eax, edx;
12957 u32 level = 1;
12958
12959- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12960- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12961- asm("cpuid"
12962+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12963+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12964+ asm volatile("cpuid"
12965 : "+a" (level), "=d" (cpu.flags[0])
12966 : : "ecx", "ebx");
12967- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12968+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12969
12970 err = check_cpuflags();
12971 } else if (err == 0x01 &&
12972diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12973index 16ef025..91e033b 100644
12974--- a/arch/x86/boot/header.S
12975+++ b/arch/x86/boot/header.S
12976@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12977 # single linked list of
12978 # struct setup_data
12979
12980-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12981+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12982
12983 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12984+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12985+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12986+#else
12987 #define VO_INIT_SIZE (VO__end - VO__text)
12988+#endif
12989 #if ZO_INIT_SIZE > VO_INIT_SIZE
12990 #define INIT_SIZE ZO_INIT_SIZE
12991 #else
12992diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12993index db75d07..8e6d0af 100644
12994--- a/arch/x86/boot/memory.c
12995+++ b/arch/x86/boot/memory.c
12996@@ -19,7 +19,7 @@
12997
12998 static int detect_memory_e820(void)
12999 {
13000- int count = 0;
13001+ unsigned int count = 0;
13002 struct biosregs ireg, oreg;
13003 struct e820entry *desc = boot_params.e820_map;
13004 static struct e820entry buf; /* static so it is zeroed */
13005diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13006index ba3e100..6501b8f 100644
13007--- a/arch/x86/boot/video-vesa.c
13008+++ b/arch/x86/boot/video-vesa.c
13009@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13010
13011 boot_params.screen_info.vesapm_seg = oreg.es;
13012 boot_params.screen_info.vesapm_off = oreg.di;
13013+ boot_params.screen_info.vesapm_size = oreg.cx;
13014 }
13015
13016 /*
13017diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13018index 43eda28..5ab5fdb 100644
13019--- a/arch/x86/boot/video.c
13020+++ b/arch/x86/boot/video.c
13021@@ -96,7 +96,7 @@ static void store_mode_params(void)
13022 static unsigned int get_entry(void)
13023 {
13024 char entry_buf[4];
13025- int i, len = 0;
13026+ unsigned int i, len = 0;
13027 int key;
13028 unsigned int v;
13029
13030diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13031index 9105655..41779c1 100644
13032--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13033+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13034@@ -8,6 +8,8 @@
13035 * including this sentence is retained in full.
13036 */
13037
13038+#include <asm/alternative-asm.h>
13039+
13040 .extern crypto_ft_tab
13041 .extern crypto_it_tab
13042 .extern crypto_fl_tab
13043@@ -70,6 +72,8 @@
13044 je B192; \
13045 leaq 32(r9),r9;
13046
13047+#define ret pax_force_retaddr; ret
13048+
13049 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13050 movq r1,r2; \
13051 movq r3,r4; \
13052diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13053index 477e9d7..c92c7d8 100644
13054--- a/arch/x86/crypto/aesni-intel_asm.S
13055+++ b/arch/x86/crypto/aesni-intel_asm.S
13056@@ -31,6 +31,7 @@
13057
13058 #include <linux/linkage.h>
13059 #include <asm/inst.h>
13060+#include <asm/alternative-asm.h>
13061
13062 #ifdef __x86_64__
13063 .data
13064@@ -205,7 +206,7 @@ enc: .octa 0x2
13065 * num_initial_blocks = b mod 4
13066 * encrypt the initial num_initial_blocks blocks and apply ghash on
13067 * the ciphertext
13068-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13069+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13070 * are clobbered
13071 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13072 */
13073@@ -214,8 +215,8 @@ enc: .octa 0x2
13074 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13075 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13076 mov arg7, %r10 # %r10 = AAD
13077- mov arg8, %r12 # %r12 = aadLen
13078- mov %r12, %r11
13079+ mov arg8, %r15 # %r15 = aadLen
13080+ mov %r15, %r11
13081 pxor %xmm\i, %xmm\i
13082 _get_AAD_loop\num_initial_blocks\operation:
13083 movd (%r10), \TMP1
13084@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13085 psrldq $4, %xmm\i
13086 pxor \TMP1, %xmm\i
13087 add $4, %r10
13088- sub $4, %r12
13089+ sub $4, %r15
13090 jne _get_AAD_loop\num_initial_blocks\operation
13091 cmp $16, %r11
13092 je _get_AAD_loop2_done\num_initial_blocks\operation
13093- mov $16, %r12
13094+ mov $16, %r15
13095 _get_AAD_loop2\num_initial_blocks\operation:
13096 psrldq $4, %xmm\i
13097- sub $4, %r12
13098- cmp %r11, %r12
13099+ sub $4, %r15
13100+ cmp %r11, %r15
13101 jne _get_AAD_loop2\num_initial_blocks\operation
13102 _get_AAD_loop2_done\num_initial_blocks\operation:
13103 movdqa SHUF_MASK(%rip), %xmm14
13104@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13105 * num_initial_blocks = b mod 4
13106 * encrypt the initial num_initial_blocks blocks and apply ghash on
13107 * the ciphertext
13108-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13109+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13110 * are clobbered
13111 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13112 */
13113@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13114 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13115 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13116 mov arg7, %r10 # %r10 = AAD
13117- mov arg8, %r12 # %r12 = aadLen
13118- mov %r12, %r11
13119+ mov arg8, %r15 # %r15 = aadLen
13120+ mov %r15, %r11
13121 pxor %xmm\i, %xmm\i
13122 _get_AAD_loop\num_initial_blocks\operation:
13123 movd (%r10), \TMP1
13124@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13125 psrldq $4, %xmm\i
13126 pxor \TMP1, %xmm\i
13127 add $4, %r10
13128- sub $4, %r12
13129+ sub $4, %r15
13130 jne _get_AAD_loop\num_initial_blocks\operation
13131 cmp $16, %r11
13132 je _get_AAD_loop2_done\num_initial_blocks\operation
13133- mov $16, %r12
13134+ mov $16, %r15
13135 _get_AAD_loop2\num_initial_blocks\operation:
13136 psrldq $4, %xmm\i
13137- sub $4, %r12
13138- cmp %r11, %r12
13139+ sub $4, %r15
13140+ cmp %r11, %r15
13141 jne _get_AAD_loop2\num_initial_blocks\operation
13142 _get_AAD_loop2_done\num_initial_blocks\operation:
13143 movdqa SHUF_MASK(%rip), %xmm14
13144@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13145 *
13146 *****************************************************************************/
13147 ENTRY(aesni_gcm_dec)
13148- push %r12
13149+ push %r15
13150 push %r13
13151 push %r14
13152 mov %rsp, %r14
13153@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13154 */
13155 sub $VARIABLE_OFFSET, %rsp
13156 and $~63, %rsp # align rsp to 64 bytes
13157- mov %arg6, %r12
13158- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13159+ mov %arg6, %r15
13160+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13161 movdqa SHUF_MASK(%rip), %xmm2
13162 PSHUFB_XMM %xmm2, %xmm13
13163
13164@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13165 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13166 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13167 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13168- mov %r13, %r12
13169- and $(3<<4), %r12
13170+ mov %r13, %r15
13171+ and $(3<<4), %r15
13172 jz _initial_num_blocks_is_0_decrypt
13173- cmp $(2<<4), %r12
13174+ cmp $(2<<4), %r15
13175 jb _initial_num_blocks_is_1_decrypt
13176 je _initial_num_blocks_is_2_decrypt
13177 _initial_num_blocks_is_3_decrypt:
13178@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13179 sub $16, %r11
13180 add %r13, %r11
13181 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13182- lea SHIFT_MASK+16(%rip), %r12
13183- sub %r13, %r12
13184+ lea SHIFT_MASK+16(%rip), %r15
13185+ sub %r13, %r15
13186 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13187 # (%r13 is the number of bytes in plaintext mod 16)
13188- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13189+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13190 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13191
13192 movdqa %xmm1, %xmm2
13193 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13194- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13195+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13196 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13197 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13198 pand %xmm1, %xmm2
13199@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13200 sub $1, %r13
13201 jne _less_than_8_bytes_left_decrypt
13202 _multiple_of_16_bytes_decrypt:
13203- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13204- shl $3, %r12 # convert into number of bits
13205- movd %r12d, %xmm15 # len(A) in %xmm15
13206+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13207+ shl $3, %r15 # convert into number of bits
13208+ movd %r15d, %xmm15 # len(A) in %xmm15
13209 shl $3, %arg4 # len(C) in bits (*128)
13210 MOVQ_R64_XMM %arg4, %xmm1
13211 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13212@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13213 mov %r14, %rsp
13214 pop %r14
13215 pop %r13
13216- pop %r12
13217+ pop %r15
13218+ pax_force_retaddr
13219 ret
13220 ENDPROC(aesni_gcm_dec)
13221
13222@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13223 * poly = x^128 + x^127 + x^126 + x^121 + 1
13224 ***************************************************************************/
13225 ENTRY(aesni_gcm_enc)
13226- push %r12
13227+ push %r15
13228 push %r13
13229 push %r14
13230 mov %rsp, %r14
13231@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13232 #
13233 sub $VARIABLE_OFFSET, %rsp
13234 and $~63, %rsp
13235- mov %arg6, %r12
13236- movdqu (%r12), %xmm13
13237+ mov %arg6, %r15
13238+ movdqu (%r15), %xmm13
13239 movdqa SHUF_MASK(%rip), %xmm2
13240 PSHUFB_XMM %xmm2, %xmm13
13241
13242@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13243 movdqa %xmm13, HashKey(%rsp)
13244 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13245 and $-16, %r13
13246- mov %r13, %r12
13247+ mov %r13, %r15
13248
13249 # Encrypt first few blocks
13250
13251- and $(3<<4), %r12
13252+ and $(3<<4), %r15
13253 jz _initial_num_blocks_is_0_encrypt
13254- cmp $(2<<4), %r12
13255+ cmp $(2<<4), %r15
13256 jb _initial_num_blocks_is_1_encrypt
13257 je _initial_num_blocks_is_2_encrypt
13258 _initial_num_blocks_is_3_encrypt:
13259@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13260 sub $16, %r11
13261 add %r13, %r11
13262 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13263- lea SHIFT_MASK+16(%rip), %r12
13264- sub %r13, %r12
13265+ lea SHIFT_MASK+16(%rip), %r15
13266+ sub %r13, %r15
13267 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13268 # (%r13 is the number of bytes in plaintext mod 16)
13269- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13270+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13271 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13272 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13273- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13274+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13275 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13276 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13277 movdqa SHUF_MASK(%rip), %xmm10
13278@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13279 sub $1, %r13
13280 jne _less_than_8_bytes_left_encrypt
13281 _multiple_of_16_bytes_encrypt:
13282- mov arg8, %r12 # %r12 = addLen (number of bytes)
13283- shl $3, %r12
13284- movd %r12d, %xmm15 # len(A) in %xmm15
13285+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13286+ shl $3, %r15
13287+ movd %r15d, %xmm15 # len(A) in %xmm15
13288 shl $3, %arg4 # len(C) in bits (*128)
13289 MOVQ_R64_XMM %arg4, %xmm1
13290 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13291@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13292 mov %r14, %rsp
13293 pop %r14
13294 pop %r13
13295- pop %r12
13296+ pop %r15
13297+ pax_force_retaddr
13298 ret
13299 ENDPROC(aesni_gcm_enc)
13300
13301@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13302 pxor %xmm1, %xmm0
13303 movaps %xmm0, (TKEYP)
13304 add $0x10, TKEYP
13305+ pax_force_retaddr
13306 ret
13307 ENDPROC(_key_expansion_128)
13308 ENDPROC(_key_expansion_256a)
13309@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13310 shufps $0b01001110, %xmm2, %xmm1
13311 movaps %xmm1, 0x10(TKEYP)
13312 add $0x20, TKEYP
13313+ pax_force_retaddr
13314 ret
13315 ENDPROC(_key_expansion_192a)
13316
13317@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13318
13319 movaps %xmm0, (TKEYP)
13320 add $0x10, TKEYP
13321+ pax_force_retaddr
13322 ret
13323 ENDPROC(_key_expansion_192b)
13324
13325@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13326 pxor %xmm1, %xmm2
13327 movaps %xmm2, (TKEYP)
13328 add $0x10, TKEYP
13329+ pax_force_retaddr
13330 ret
13331 ENDPROC(_key_expansion_256b)
13332
13333@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13334 #ifndef __x86_64__
13335 popl KEYP
13336 #endif
13337+ pax_force_retaddr
13338 ret
13339 ENDPROC(aesni_set_key)
13340
13341@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13342 popl KLEN
13343 popl KEYP
13344 #endif
13345+ pax_force_retaddr
13346 ret
13347 ENDPROC(aesni_enc)
13348
13349@@ -1974,6 +1983,7 @@ _aesni_enc1:
13350 AESENC KEY STATE
13351 movaps 0x70(TKEYP), KEY
13352 AESENCLAST KEY STATE
13353+ pax_force_retaddr
13354 ret
13355 ENDPROC(_aesni_enc1)
13356
13357@@ -2083,6 +2093,7 @@ _aesni_enc4:
13358 AESENCLAST KEY STATE2
13359 AESENCLAST KEY STATE3
13360 AESENCLAST KEY STATE4
13361+ pax_force_retaddr
13362 ret
13363 ENDPROC(_aesni_enc4)
13364
13365@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13366 popl KLEN
13367 popl KEYP
13368 #endif
13369+ pax_force_retaddr
13370 ret
13371 ENDPROC(aesni_dec)
13372
13373@@ -2164,6 +2176,7 @@ _aesni_dec1:
13374 AESDEC KEY STATE
13375 movaps 0x70(TKEYP), KEY
13376 AESDECLAST KEY STATE
13377+ pax_force_retaddr
13378 ret
13379 ENDPROC(_aesni_dec1)
13380
13381@@ -2273,6 +2286,7 @@ _aesni_dec4:
13382 AESDECLAST KEY STATE2
13383 AESDECLAST KEY STATE3
13384 AESDECLAST KEY STATE4
13385+ pax_force_retaddr
13386 ret
13387 ENDPROC(_aesni_dec4)
13388
13389@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13390 popl KEYP
13391 popl LEN
13392 #endif
13393+ pax_force_retaddr
13394 ret
13395 ENDPROC(aesni_ecb_enc)
13396
13397@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13398 popl KEYP
13399 popl LEN
13400 #endif
13401+ pax_force_retaddr
13402 ret
13403 ENDPROC(aesni_ecb_dec)
13404
13405@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13406 popl LEN
13407 popl IVP
13408 #endif
13409+ pax_force_retaddr
13410 ret
13411 ENDPROC(aesni_cbc_enc)
13412
13413@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13414 popl LEN
13415 popl IVP
13416 #endif
13417+ pax_force_retaddr
13418 ret
13419 ENDPROC(aesni_cbc_dec)
13420
13421@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13422 mov $1, TCTR_LOW
13423 MOVQ_R64_XMM TCTR_LOW INC
13424 MOVQ_R64_XMM CTR TCTR_LOW
13425+ pax_force_retaddr
13426 ret
13427 ENDPROC(_aesni_inc_init)
13428
13429@@ -2579,6 +2598,7 @@ _aesni_inc:
13430 .Linc_low:
13431 movaps CTR, IV
13432 PSHUFB_XMM BSWAP_MASK IV
13433+ pax_force_retaddr
13434 ret
13435 ENDPROC(_aesni_inc)
13436
13437@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13438 .Lctr_enc_ret:
13439 movups IV, (IVP)
13440 .Lctr_enc_just_ret:
13441+ pax_force_retaddr
13442 ret
13443 ENDPROC(aesni_ctr_enc)
13444
13445@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13446 pxor INC, STATE4
13447 movdqu STATE4, 0x70(OUTP)
13448
13449+ pax_force_retaddr
13450 ret
13451 ENDPROC(aesni_xts_crypt8)
13452
13453diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13454index 246c670..466e2d6 100644
13455--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13456+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13457@@ -21,6 +21,7 @@
13458 */
13459
13460 #include <linux/linkage.h>
13461+#include <asm/alternative-asm.h>
13462
13463 .file "blowfish-x86_64-asm.S"
13464 .text
13465@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13466 jnz .L__enc_xor;
13467
13468 write_block();
13469+ pax_force_retaddr
13470 ret;
13471 .L__enc_xor:
13472 xor_block();
13473+ pax_force_retaddr
13474 ret;
13475 ENDPROC(__blowfish_enc_blk)
13476
13477@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13478
13479 movq %r11, %rbp;
13480
13481+ pax_force_retaddr
13482 ret;
13483 ENDPROC(blowfish_dec_blk)
13484
13485@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13486
13487 popq %rbx;
13488 popq %rbp;
13489+ pax_force_retaddr
13490 ret;
13491
13492 .L__enc_xor4:
13493@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13494
13495 popq %rbx;
13496 popq %rbp;
13497+ pax_force_retaddr
13498 ret;
13499 ENDPROC(__blowfish_enc_blk_4way)
13500
13501@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13502 popq %rbx;
13503 popq %rbp;
13504
13505+ pax_force_retaddr
13506 ret;
13507 ENDPROC(blowfish_dec_blk_4way)
13508diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13509index ce71f92..1dce7ec 100644
13510--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13511+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13512@@ -16,6 +16,7 @@
13513 */
13514
13515 #include <linux/linkage.h>
13516+#include <asm/alternative-asm.h>
13517
13518 #define CAMELLIA_TABLE_BYTE_LEN 272
13519
13520@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13521 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13522 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13523 %rcx, (%r9));
13524+ pax_force_retaddr
13525 ret;
13526 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13527
13528@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13529 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13530 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13531 %rax, (%r9));
13532+ pax_force_retaddr
13533 ret;
13534 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13535
13536@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13537 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13538 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13539
13540+ pax_force_retaddr
13541 ret;
13542
13543 .align 8
13544@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13545 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13546 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13547
13548+ pax_force_retaddr
13549 ret;
13550
13551 .align 8
13552@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13553 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13554 %xmm8, %rsi);
13555
13556+ pax_force_retaddr
13557 ret;
13558 ENDPROC(camellia_ecb_enc_16way)
13559
13560@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13561 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13562 %xmm8, %rsi);
13563
13564+ pax_force_retaddr
13565 ret;
13566 ENDPROC(camellia_ecb_dec_16way)
13567
13568@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13569 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13570 %xmm8, %rsi);
13571
13572+ pax_force_retaddr
13573 ret;
13574 ENDPROC(camellia_cbc_dec_16way)
13575
13576@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13577 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13578 %xmm8, %rsi);
13579
13580+ pax_force_retaddr
13581 ret;
13582 ENDPROC(camellia_ctr_16way)
13583
13584@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13585 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13586 %xmm8, %rsi);
13587
13588+ pax_force_retaddr
13589 ret;
13590 ENDPROC(camellia_xts_crypt_16way)
13591
13592diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13593index 0e0b886..5a3123c 100644
13594--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13595+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13596@@ -11,6 +11,7 @@
13597 */
13598
13599 #include <linux/linkage.h>
13600+#include <asm/alternative-asm.h>
13601
13602 #define CAMELLIA_TABLE_BYTE_LEN 272
13603
13604@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13605 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13606 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13607 %rcx, (%r9));
13608+ pax_force_retaddr
13609 ret;
13610 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13611
13612@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13613 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13614 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13615 %rax, (%r9));
13616+ pax_force_retaddr
13617 ret;
13618 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13619
13620@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13621 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13622 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13623
13624+ pax_force_retaddr
13625 ret;
13626
13627 .align 8
13628@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13629 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13630 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13631
13632+ pax_force_retaddr
13633 ret;
13634
13635 .align 8
13636@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13637
13638 vzeroupper;
13639
13640+ pax_force_retaddr
13641 ret;
13642 ENDPROC(camellia_ecb_enc_32way)
13643
13644@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13645
13646 vzeroupper;
13647
13648+ pax_force_retaddr
13649 ret;
13650 ENDPROC(camellia_ecb_dec_32way)
13651
13652@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13653
13654 vzeroupper;
13655
13656+ pax_force_retaddr
13657 ret;
13658 ENDPROC(camellia_cbc_dec_32way)
13659
13660@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13661
13662 vzeroupper;
13663
13664+ pax_force_retaddr
13665 ret;
13666 ENDPROC(camellia_ctr_32way)
13667
13668@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13669
13670 vzeroupper;
13671
13672+ pax_force_retaddr
13673 ret;
13674 ENDPROC(camellia_xts_crypt_32way)
13675
13676diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13677index 310319c..db3d7b5 100644
13678--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13679+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13680@@ -21,6 +21,7 @@
13681 */
13682
13683 #include <linux/linkage.h>
13684+#include <asm/alternative-asm.h>
13685
13686 .file "camellia-x86_64-asm_64.S"
13687 .text
13688@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13689 enc_outunpack(mov, RT1);
13690
13691 movq RRBP, %rbp;
13692+ pax_force_retaddr
13693 ret;
13694
13695 .L__enc_xor:
13696 enc_outunpack(xor, RT1);
13697
13698 movq RRBP, %rbp;
13699+ pax_force_retaddr
13700 ret;
13701 ENDPROC(__camellia_enc_blk)
13702
13703@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13704 dec_outunpack();
13705
13706 movq RRBP, %rbp;
13707+ pax_force_retaddr
13708 ret;
13709 ENDPROC(camellia_dec_blk)
13710
13711@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13712
13713 movq RRBP, %rbp;
13714 popq %rbx;
13715+ pax_force_retaddr
13716 ret;
13717
13718 .L__enc2_xor:
13719@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13720
13721 movq RRBP, %rbp;
13722 popq %rbx;
13723+ pax_force_retaddr
13724 ret;
13725 ENDPROC(__camellia_enc_blk_2way)
13726
13727@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13728
13729 movq RRBP, %rbp;
13730 movq RXOR, %rbx;
13731+ pax_force_retaddr
13732 ret;
13733 ENDPROC(camellia_dec_blk_2way)
13734diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13735index c35fd5d..2d8c7db 100644
13736--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13737+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13738@@ -24,6 +24,7 @@
13739 */
13740
13741 #include <linux/linkage.h>
13742+#include <asm/alternative-asm.h>
13743
13744 .file "cast5-avx-x86_64-asm_64.S"
13745
13746@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13747 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13748 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13749
13750+ pax_force_retaddr
13751 ret;
13752 ENDPROC(__cast5_enc_blk16)
13753
13754@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13755 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13756 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13757
13758+ pax_force_retaddr
13759 ret;
13760
13761 .L__skip_dec:
13762@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13763 vmovdqu RR4, (6*4*4)(%r11);
13764 vmovdqu RL4, (7*4*4)(%r11);
13765
13766+ pax_force_retaddr
13767 ret;
13768 ENDPROC(cast5_ecb_enc_16way)
13769
13770@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13771 vmovdqu RR4, (6*4*4)(%r11);
13772 vmovdqu RL4, (7*4*4)(%r11);
13773
13774+ pax_force_retaddr
13775 ret;
13776 ENDPROC(cast5_ecb_dec_16way)
13777
13778@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13779 * %rdx: src
13780 */
13781
13782- pushq %r12;
13783+ pushq %r14;
13784
13785 movq %rsi, %r11;
13786- movq %rdx, %r12;
13787+ movq %rdx, %r14;
13788
13789 vmovdqu (0*16)(%rdx), RL1;
13790 vmovdqu (1*16)(%rdx), RR1;
13791@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13792 call __cast5_dec_blk16;
13793
13794 /* xor with src */
13795- vmovq (%r12), RX;
13796+ vmovq (%r14), RX;
13797 vpshufd $0x4f, RX, RX;
13798 vpxor RX, RR1, RR1;
13799- vpxor 0*16+8(%r12), RL1, RL1;
13800- vpxor 1*16+8(%r12), RR2, RR2;
13801- vpxor 2*16+8(%r12), RL2, RL2;
13802- vpxor 3*16+8(%r12), RR3, RR3;
13803- vpxor 4*16+8(%r12), RL3, RL3;
13804- vpxor 5*16+8(%r12), RR4, RR4;
13805- vpxor 6*16+8(%r12), RL4, RL4;
13806+ vpxor 0*16+8(%r14), RL1, RL1;
13807+ vpxor 1*16+8(%r14), RR2, RR2;
13808+ vpxor 2*16+8(%r14), RL2, RL2;
13809+ vpxor 3*16+8(%r14), RR3, RR3;
13810+ vpxor 4*16+8(%r14), RL3, RL3;
13811+ vpxor 5*16+8(%r14), RR4, RR4;
13812+ vpxor 6*16+8(%r14), RL4, RL4;
13813
13814 vmovdqu RR1, (0*16)(%r11);
13815 vmovdqu RL1, (1*16)(%r11);
13816@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13817 vmovdqu RR4, (6*16)(%r11);
13818 vmovdqu RL4, (7*16)(%r11);
13819
13820- popq %r12;
13821+ popq %r14;
13822
13823+ pax_force_retaddr
13824 ret;
13825 ENDPROC(cast5_cbc_dec_16way)
13826
13827@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13828 * %rcx: iv (big endian, 64bit)
13829 */
13830
13831- pushq %r12;
13832+ pushq %r14;
13833
13834 movq %rsi, %r11;
13835- movq %rdx, %r12;
13836+ movq %rdx, %r14;
13837
13838 vpcmpeqd RTMP, RTMP, RTMP;
13839 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13840@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13841 call __cast5_enc_blk16;
13842
13843 /* dst = src ^ iv */
13844- vpxor (0*16)(%r12), RR1, RR1;
13845- vpxor (1*16)(%r12), RL1, RL1;
13846- vpxor (2*16)(%r12), RR2, RR2;
13847- vpxor (3*16)(%r12), RL2, RL2;
13848- vpxor (4*16)(%r12), RR3, RR3;
13849- vpxor (5*16)(%r12), RL3, RL3;
13850- vpxor (6*16)(%r12), RR4, RR4;
13851- vpxor (7*16)(%r12), RL4, RL4;
13852+ vpxor (0*16)(%r14), RR1, RR1;
13853+ vpxor (1*16)(%r14), RL1, RL1;
13854+ vpxor (2*16)(%r14), RR2, RR2;
13855+ vpxor (3*16)(%r14), RL2, RL2;
13856+ vpxor (4*16)(%r14), RR3, RR3;
13857+ vpxor (5*16)(%r14), RL3, RL3;
13858+ vpxor (6*16)(%r14), RR4, RR4;
13859+ vpxor (7*16)(%r14), RL4, RL4;
13860 vmovdqu RR1, (0*16)(%r11);
13861 vmovdqu RL1, (1*16)(%r11);
13862 vmovdqu RR2, (2*16)(%r11);
13863@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13864 vmovdqu RR4, (6*16)(%r11);
13865 vmovdqu RL4, (7*16)(%r11);
13866
13867- popq %r12;
13868+ popq %r14;
13869
13870+ pax_force_retaddr
13871 ret;
13872 ENDPROC(cast5_ctr_16way)
13873diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13874index e3531f8..e123f35 100644
13875--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13876+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13877@@ -24,6 +24,7 @@
13878 */
13879
13880 #include <linux/linkage.h>
13881+#include <asm/alternative-asm.h>
13882 #include "glue_helper-asm-avx.S"
13883
13884 .file "cast6-avx-x86_64-asm_64.S"
13885@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13886 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13887 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13888
13889+ pax_force_retaddr
13890 ret;
13891 ENDPROC(__cast6_enc_blk8)
13892
13893@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13894 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13895 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13896
13897+ pax_force_retaddr
13898 ret;
13899 ENDPROC(__cast6_dec_blk8)
13900
13901@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13902
13903 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13904
13905+ pax_force_retaddr
13906 ret;
13907 ENDPROC(cast6_ecb_enc_8way)
13908
13909@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13910
13911 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13912
13913+ pax_force_retaddr
13914 ret;
13915 ENDPROC(cast6_ecb_dec_8way)
13916
13917@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13918 * %rdx: src
13919 */
13920
13921- pushq %r12;
13922+ pushq %r14;
13923
13924 movq %rsi, %r11;
13925- movq %rdx, %r12;
13926+ movq %rdx, %r14;
13927
13928 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13929
13930 call __cast6_dec_blk8;
13931
13932- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13933+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13934
13935- popq %r12;
13936+ popq %r14;
13937
13938+ pax_force_retaddr
13939 ret;
13940 ENDPROC(cast6_cbc_dec_8way)
13941
13942@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13943 * %rcx: iv (little endian, 128bit)
13944 */
13945
13946- pushq %r12;
13947+ pushq %r14;
13948
13949 movq %rsi, %r11;
13950- movq %rdx, %r12;
13951+ movq %rdx, %r14;
13952
13953 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13954 RD2, RX, RKR, RKM);
13955
13956 call __cast6_enc_blk8;
13957
13958- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13959+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13960
13961- popq %r12;
13962+ popq %r14;
13963
13964+ pax_force_retaddr
13965 ret;
13966 ENDPROC(cast6_ctr_8way)
13967
13968@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13969 /* dst <= regs xor IVs(in dst) */
13970 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13971
13972+ pax_force_retaddr
13973 ret;
13974 ENDPROC(cast6_xts_enc_8way)
13975
13976@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13977 /* dst <= regs xor IVs(in dst) */
13978 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13979
13980+ pax_force_retaddr
13981 ret;
13982 ENDPROC(cast6_xts_dec_8way)
13983diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13984index 26d49eb..c0a8c84 100644
13985--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13986+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13987@@ -45,6 +45,7 @@
13988
13989 #include <asm/inst.h>
13990 #include <linux/linkage.h>
13991+#include <asm/alternative-asm.h>
13992
13993 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13994
13995@@ -309,6 +310,7 @@ do_return:
13996 popq %rsi
13997 popq %rdi
13998 popq %rbx
13999+ pax_force_retaddr
14000 ret
14001
14002 ################################################################
14003diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14004index 5d1e007..098cb4f 100644
14005--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14006+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14007@@ -18,6 +18,7 @@
14008
14009 #include <linux/linkage.h>
14010 #include <asm/inst.h>
14011+#include <asm/alternative-asm.h>
14012
14013 .data
14014
14015@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14016 psrlq $1, T2
14017 pxor T2, T1
14018 pxor T1, DATA
14019+ pax_force_retaddr
14020 ret
14021 ENDPROC(__clmul_gf128mul_ble)
14022
14023@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14024 call __clmul_gf128mul_ble
14025 PSHUFB_XMM BSWAP DATA
14026 movups DATA, (%rdi)
14027+ pax_force_retaddr
14028 ret
14029 ENDPROC(clmul_ghash_mul)
14030
14031@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14032 PSHUFB_XMM BSWAP DATA
14033 movups DATA, (%rdi)
14034 .Lupdate_just_ret:
14035+ pax_force_retaddr
14036 ret
14037 ENDPROC(clmul_ghash_update)
14038diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14039index 9279e0b..c4b3d2c 100644
14040--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14041+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14042@@ -1,4 +1,5 @@
14043 #include <linux/linkage.h>
14044+#include <asm/alternative-asm.h>
14045
14046 # enter salsa20_encrypt_bytes
14047 ENTRY(salsa20_encrypt_bytes)
14048@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14049 add %r11,%rsp
14050 mov %rdi,%rax
14051 mov %rsi,%rdx
14052+ pax_force_retaddr
14053 ret
14054 # bytesatleast65:
14055 ._bytesatleast65:
14056@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14057 add %r11,%rsp
14058 mov %rdi,%rax
14059 mov %rsi,%rdx
14060+ pax_force_retaddr
14061 ret
14062 ENDPROC(salsa20_keysetup)
14063
14064@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14065 add %r11,%rsp
14066 mov %rdi,%rax
14067 mov %rsi,%rdx
14068+ pax_force_retaddr
14069 ret
14070 ENDPROC(salsa20_ivsetup)
14071diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14072index 2f202f4..d9164d6 100644
14073--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14074+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14075@@ -24,6 +24,7 @@
14076 */
14077
14078 #include <linux/linkage.h>
14079+#include <asm/alternative-asm.h>
14080 #include "glue_helper-asm-avx.S"
14081
14082 .file "serpent-avx-x86_64-asm_64.S"
14083@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14084 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14085 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14086
14087+ pax_force_retaddr
14088 ret;
14089 ENDPROC(__serpent_enc_blk8_avx)
14090
14091@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14092 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14093 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14094
14095+ pax_force_retaddr
14096 ret;
14097 ENDPROC(__serpent_dec_blk8_avx)
14098
14099@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14100
14101 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14102
14103+ pax_force_retaddr
14104 ret;
14105 ENDPROC(serpent_ecb_enc_8way_avx)
14106
14107@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14108
14109 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14110
14111+ pax_force_retaddr
14112 ret;
14113 ENDPROC(serpent_ecb_dec_8way_avx)
14114
14115@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14116
14117 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14118
14119+ pax_force_retaddr
14120 ret;
14121 ENDPROC(serpent_cbc_dec_8way_avx)
14122
14123@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14124
14125 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14126
14127+ pax_force_retaddr
14128 ret;
14129 ENDPROC(serpent_ctr_8way_avx)
14130
14131@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14132 /* dst <= regs xor IVs(in dst) */
14133 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14134
14135+ pax_force_retaddr
14136 ret;
14137 ENDPROC(serpent_xts_enc_8way_avx)
14138
14139@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14140 /* dst <= regs xor IVs(in dst) */
14141 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14142
14143+ pax_force_retaddr
14144 ret;
14145 ENDPROC(serpent_xts_dec_8way_avx)
14146diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14147index b222085..abd483c 100644
14148--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14149+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14150@@ -15,6 +15,7 @@
14151 */
14152
14153 #include <linux/linkage.h>
14154+#include <asm/alternative-asm.h>
14155 #include "glue_helper-asm-avx2.S"
14156
14157 .file "serpent-avx2-asm_64.S"
14158@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14159 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14160 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14161
14162+ pax_force_retaddr
14163 ret;
14164 ENDPROC(__serpent_enc_blk16)
14165
14166@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14167 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14168 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14169
14170+ pax_force_retaddr
14171 ret;
14172 ENDPROC(__serpent_dec_blk16)
14173
14174@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14175
14176 vzeroupper;
14177
14178+ pax_force_retaddr
14179 ret;
14180 ENDPROC(serpent_ecb_enc_16way)
14181
14182@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14183
14184 vzeroupper;
14185
14186+ pax_force_retaddr
14187 ret;
14188 ENDPROC(serpent_ecb_dec_16way)
14189
14190@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14191
14192 vzeroupper;
14193
14194+ pax_force_retaddr
14195 ret;
14196 ENDPROC(serpent_cbc_dec_16way)
14197
14198@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14199
14200 vzeroupper;
14201
14202+ pax_force_retaddr
14203 ret;
14204 ENDPROC(serpent_ctr_16way)
14205
14206@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14207
14208 vzeroupper;
14209
14210+ pax_force_retaddr
14211 ret;
14212 ENDPROC(serpent_xts_enc_16way)
14213
14214@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14215
14216 vzeroupper;
14217
14218+ pax_force_retaddr
14219 ret;
14220 ENDPROC(serpent_xts_dec_16way)
14221diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14222index acc066c..1559cc4 100644
14223--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14224+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14225@@ -25,6 +25,7 @@
14226 */
14227
14228 #include <linux/linkage.h>
14229+#include <asm/alternative-asm.h>
14230
14231 .file "serpent-sse2-x86_64-asm_64.S"
14232 .text
14233@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14234 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14235 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14236
14237+ pax_force_retaddr
14238 ret;
14239
14240 .L__enc_xor8:
14241 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14242 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14243
14244+ pax_force_retaddr
14245 ret;
14246 ENDPROC(__serpent_enc_blk_8way)
14247
14248@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14249 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14250 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14251
14252+ pax_force_retaddr
14253 ret;
14254 ENDPROC(serpent_dec_blk_8way)
14255diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14256index a410950..9dfe7ad 100644
14257--- a/arch/x86/crypto/sha1_ssse3_asm.S
14258+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14259@@ -29,6 +29,7 @@
14260 */
14261
14262 #include <linux/linkage.h>
14263+#include <asm/alternative-asm.h>
14264
14265 #define CTX %rdi // arg1
14266 #define BUF %rsi // arg2
14267@@ -75,9 +76,9 @@
14268
14269 push %rbx
14270 push %rbp
14271- push %r12
14272+ push %r14
14273
14274- mov %rsp, %r12
14275+ mov %rsp, %r14
14276 sub $64, %rsp # allocate workspace
14277 and $~15, %rsp # align stack
14278
14279@@ -99,11 +100,12 @@
14280 xor %rax, %rax
14281 rep stosq
14282
14283- mov %r12, %rsp # deallocate workspace
14284+ mov %r14, %rsp # deallocate workspace
14285
14286- pop %r12
14287+ pop %r14
14288 pop %rbp
14289 pop %rbx
14290+ pax_force_retaddr
14291 ret
14292
14293 ENDPROC(\name)
14294diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14295index 642f156..51a513c 100644
14296--- a/arch/x86/crypto/sha256-avx-asm.S
14297+++ b/arch/x86/crypto/sha256-avx-asm.S
14298@@ -49,6 +49,7 @@
14299
14300 #ifdef CONFIG_AS_AVX
14301 #include <linux/linkage.h>
14302+#include <asm/alternative-asm.h>
14303
14304 ## assume buffers not aligned
14305 #define VMOVDQ vmovdqu
14306@@ -460,6 +461,7 @@ done_hash:
14307 popq %r13
14308 popq %rbp
14309 popq %rbx
14310+ pax_force_retaddr
14311 ret
14312 ENDPROC(sha256_transform_avx)
14313
14314diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14315index 9e86944..3795e6a 100644
14316--- a/arch/x86/crypto/sha256-avx2-asm.S
14317+++ b/arch/x86/crypto/sha256-avx2-asm.S
14318@@ -50,6 +50,7 @@
14319
14320 #ifdef CONFIG_AS_AVX2
14321 #include <linux/linkage.h>
14322+#include <asm/alternative-asm.h>
14323
14324 ## assume buffers not aligned
14325 #define VMOVDQ vmovdqu
14326@@ -720,6 +721,7 @@ done_hash:
14327 popq %r12
14328 popq %rbp
14329 popq %rbx
14330+ pax_force_retaddr
14331 ret
14332 ENDPROC(sha256_transform_rorx)
14333
14334diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14335index f833b74..8c62a9e 100644
14336--- a/arch/x86/crypto/sha256-ssse3-asm.S
14337+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14338@@ -47,6 +47,7 @@
14339 ########################################################################
14340
14341 #include <linux/linkage.h>
14342+#include <asm/alternative-asm.h>
14343
14344 ## assume buffers not aligned
14345 #define MOVDQ movdqu
14346@@ -471,6 +472,7 @@ done_hash:
14347 popq %rbp
14348 popq %rbx
14349
14350+ pax_force_retaddr
14351 ret
14352 ENDPROC(sha256_transform_ssse3)
14353
14354diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14355index 974dde9..a823ff9 100644
14356--- a/arch/x86/crypto/sha512-avx-asm.S
14357+++ b/arch/x86/crypto/sha512-avx-asm.S
14358@@ -49,6 +49,7 @@
14359
14360 #ifdef CONFIG_AS_AVX
14361 #include <linux/linkage.h>
14362+#include <asm/alternative-asm.h>
14363
14364 .text
14365
14366@@ -364,6 +365,7 @@ updateblock:
14367 mov frame_RSPSAVE(%rsp), %rsp
14368
14369 nowork:
14370+ pax_force_retaddr
14371 ret
14372 ENDPROC(sha512_transform_avx)
14373
14374diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14375index 568b961..ed20c37 100644
14376--- a/arch/x86/crypto/sha512-avx2-asm.S
14377+++ b/arch/x86/crypto/sha512-avx2-asm.S
14378@@ -51,6 +51,7 @@
14379
14380 #ifdef CONFIG_AS_AVX2
14381 #include <linux/linkage.h>
14382+#include <asm/alternative-asm.h>
14383
14384 .text
14385
14386@@ -678,6 +679,7 @@ done_hash:
14387
14388 # Restore Stack Pointer
14389 mov frame_RSPSAVE(%rsp), %rsp
14390+ pax_force_retaddr
14391 ret
14392 ENDPROC(sha512_transform_rorx)
14393
14394diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14395index fb56855..6edd768 100644
14396--- a/arch/x86/crypto/sha512-ssse3-asm.S
14397+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14398@@ -48,6 +48,7 @@
14399 ########################################################################
14400
14401 #include <linux/linkage.h>
14402+#include <asm/alternative-asm.h>
14403
14404 .text
14405
14406@@ -363,6 +364,7 @@ updateblock:
14407 mov frame_RSPSAVE(%rsp), %rsp
14408
14409 nowork:
14410+ pax_force_retaddr
14411 ret
14412 ENDPROC(sha512_transform_ssse3)
14413
14414diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14415index 0505813..b067311 100644
14416--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14417+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14418@@ -24,6 +24,7 @@
14419 */
14420
14421 #include <linux/linkage.h>
14422+#include <asm/alternative-asm.h>
14423 #include "glue_helper-asm-avx.S"
14424
14425 .file "twofish-avx-x86_64-asm_64.S"
14426@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14427 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14428 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14429
14430+ pax_force_retaddr
14431 ret;
14432 ENDPROC(__twofish_enc_blk8)
14433
14434@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14435 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14436 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14437
14438+ pax_force_retaddr
14439 ret;
14440 ENDPROC(__twofish_dec_blk8)
14441
14442@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14443
14444 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14445
14446+ pax_force_retaddr
14447 ret;
14448 ENDPROC(twofish_ecb_enc_8way)
14449
14450@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14451
14452 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14453
14454+ pax_force_retaddr
14455 ret;
14456 ENDPROC(twofish_ecb_dec_8way)
14457
14458@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14459 * %rdx: src
14460 */
14461
14462- pushq %r12;
14463+ pushq %r14;
14464
14465 movq %rsi, %r11;
14466- movq %rdx, %r12;
14467+ movq %rdx, %r14;
14468
14469 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14470
14471 call __twofish_dec_blk8;
14472
14473- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14474+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14475
14476- popq %r12;
14477+ popq %r14;
14478
14479+ pax_force_retaddr
14480 ret;
14481 ENDPROC(twofish_cbc_dec_8way)
14482
14483@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14484 * %rcx: iv (little endian, 128bit)
14485 */
14486
14487- pushq %r12;
14488+ pushq %r14;
14489
14490 movq %rsi, %r11;
14491- movq %rdx, %r12;
14492+ movq %rdx, %r14;
14493
14494 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14495 RD2, RX0, RX1, RY0);
14496
14497 call __twofish_enc_blk8;
14498
14499- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14500+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14501
14502- popq %r12;
14503+ popq %r14;
14504
14505+ pax_force_retaddr
14506 ret;
14507 ENDPROC(twofish_ctr_8way)
14508
14509@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14510 /* dst <= regs xor IVs(in dst) */
14511 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14512
14513+ pax_force_retaddr
14514 ret;
14515 ENDPROC(twofish_xts_enc_8way)
14516
14517@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14518 /* dst <= regs xor IVs(in dst) */
14519 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14520
14521+ pax_force_retaddr
14522 ret;
14523 ENDPROC(twofish_xts_dec_8way)
14524diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14525index 1c3b7ce..02f578d 100644
14526--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14527+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14528@@ -21,6 +21,7 @@
14529 */
14530
14531 #include <linux/linkage.h>
14532+#include <asm/alternative-asm.h>
14533
14534 .file "twofish-x86_64-asm-3way.S"
14535 .text
14536@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14537 popq %r13;
14538 popq %r14;
14539 popq %r15;
14540+ pax_force_retaddr
14541 ret;
14542
14543 .L__enc_xor3:
14544@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14545 popq %r13;
14546 popq %r14;
14547 popq %r15;
14548+ pax_force_retaddr
14549 ret;
14550 ENDPROC(__twofish_enc_blk_3way)
14551
14552@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14553 popq %r13;
14554 popq %r14;
14555 popq %r15;
14556+ pax_force_retaddr
14557 ret;
14558 ENDPROC(twofish_dec_blk_3way)
14559diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14560index a039d21..524b8b2 100644
14561--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14562+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14563@@ -22,6 +22,7 @@
14564
14565 #include <linux/linkage.h>
14566 #include <asm/asm-offsets.h>
14567+#include <asm/alternative-asm.h>
14568
14569 #define a_offset 0
14570 #define b_offset 4
14571@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14572
14573 popq R1
14574 movq $1,%rax
14575+ pax_force_retaddr
14576 ret
14577 ENDPROC(twofish_enc_blk)
14578
14579@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14580
14581 popq R1
14582 movq $1,%rax
14583+ pax_force_retaddr
14584 ret
14585 ENDPROC(twofish_dec_blk)
14586diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14587index ae6aad1..719d6d9 100644
14588--- a/arch/x86/ia32/ia32_aout.c
14589+++ b/arch/x86/ia32/ia32_aout.c
14590@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14591 unsigned long dump_start, dump_size;
14592 struct user32 dump;
14593
14594+ memset(&dump, 0, sizeof(dump));
14595+
14596 fs = get_fs();
14597 set_fs(KERNEL_DS);
14598 has_dumped = 1;
14599diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14600index f9e181a..300544c 100644
14601--- a/arch/x86/ia32/ia32_signal.c
14602+++ b/arch/x86/ia32/ia32_signal.c
14603@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14604 if (__get_user(set.sig[0], &frame->sc.oldmask)
14605 || (_COMPAT_NSIG_WORDS > 1
14606 && __copy_from_user((((char *) &set.sig) + 4),
14607- &frame->extramask,
14608+ frame->extramask,
14609 sizeof(frame->extramask))))
14610 goto badframe;
14611
14612@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14613 sp -= frame_size;
14614 /* Align the stack pointer according to the i386 ABI,
14615 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14616- sp = ((sp + 4) & -16ul) - 4;
14617+ sp = ((sp - 12) & -16ul) - 4;
14618 return (void __user *) sp;
14619 }
14620
14621@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14622 } else {
14623 /* Return stub is in 32bit vsyscall page */
14624 if (current->mm->context.vdso)
14625- restorer = current->mm->context.vdso +
14626- selected_vdso32->sym___kernel_sigreturn;
14627+ restorer = (void __force_user *)(current->mm->context.vdso +
14628+ selected_vdso32->sym___kernel_sigreturn);
14629 else
14630- restorer = &frame->retcode;
14631+ restorer = frame->retcode;
14632 }
14633
14634 put_user_try {
14635@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14636 * These are actually not used anymore, but left because some
14637 * gdb versions depend on them as a marker.
14638 */
14639- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14640+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14641 } put_user_catch(err);
14642
14643 if (err)
14644@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14645 0xb8,
14646 __NR_ia32_rt_sigreturn,
14647 0x80cd,
14648- 0,
14649+ 0
14650 };
14651
14652 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14653@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14654
14655 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14656 restorer = ksig->ka.sa.sa_restorer;
14657+ else if (current->mm->context.vdso)
14658+ /* Return stub is in 32bit vsyscall page */
14659+ restorer = (void __force_user *)(current->mm->context.vdso +
14660+ selected_vdso32->sym___kernel_rt_sigreturn);
14661 else
14662- restorer = current->mm->context.vdso +
14663- selected_vdso32->sym___kernel_rt_sigreturn;
14664+ restorer = frame->retcode;
14665 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14666
14667 /*
14668 * Not actually used anymore, but left because some gdb
14669 * versions need it.
14670 */
14671- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14672+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14673 } put_user_catch(err);
14674
14675 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14676diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14677index 82e8a1d..4e998d5 100644
14678--- a/arch/x86/ia32/ia32entry.S
14679+++ b/arch/x86/ia32/ia32entry.S
14680@@ -15,8 +15,10 @@
14681 #include <asm/irqflags.h>
14682 #include <asm/asm.h>
14683 #include <asm/smap.h>
14684+#include <asm/pgtable.h>
14685 #include <linux/linkage.h>
14686 #include <linux/err.h>
14687+#include <asm/alternative-asm.h>
14688
14689 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14690 #include <linux/elf-em.h>
14691@@ -62,12 +64,12 @@
14692 */
14693 .macro LOAD_ARGS32 offset, _r9=0
14694 .if \_r9
14695- movl \offset+16(%rsp),%r9d
14696+ movl \offset+R9(%rsp),%r9d
14697 .endif
14698- movl \offset+40(%rsp),%ecx
14699- movl \offset+48(%rsp),%edx
14700- movl \offset+56(%rsp),%esi
14701- movl \offset+64(%rsp),%edi
14702+ movl \offset+RCX(%rsp),%ecx
14703+ movl \offset+RDX(%rsp),%edx
14704+ movl \offset+RSI(%rsp),%esi
14705+ movl \offset+RDI(%rsp),%edi
14706 movl %eax,%eax /* zero extension */
14707 .endm
14708
14709@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14710 ENDPROC(native_irq_enable_sysexit)
14711 #endif
14712
14713+ .macro pax_enter_kernel_user
14714+ pax_set_fptr_mask
14715+#ifdef CONFIG_PAX_MEMORY_UDEREF
14716+ call pax_enter_kernel_user
14717+#endif
14718+ .endm
14719+
14720+ .macro pax_exit_kernel_user
14721+#ifdef CONFIG_PAX_MEMORY_UDEREF
14722+ call pax_exit_kernel_user
14723+#endif
14724+#ifdef CONFIG_PAX_RANDKSTACK
14725+ pushq %rax
14726+ pushq %r11
14727+ call pax_randomize_kstack
14728+ popq %r11
14729+ popq %rax
14730+#endif
14731+ .endm
14732+
14733+ .macro pax_erase_kstack
14734+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14735+ call pax_erase_kstack
14736+#endif
14737+ .endm
14738+
14739 /*
14740 * 32bit SYSENTER instruction entry.
14741 *
14742@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14743 CFI_REGISTER rsp,rbp
14744 SWAPGS_UNSAFE_STACK
14745 movq PER_CPU_VAR(kernel_stack), %rsp
14746- addq $(KERNEL_STACK_OFFSET),%rsp
14747- /*
14748- * No need to follow this irqs on/off section: the syscall
14749- * disabled irqs, here we enable it straight after entry:
14750- */
14751- ENABLE_INTERRUPTS(CLBR_NONE)
14752 movl %ebp,%ebp /* zero extension */
14753 pushq_cfi $__USER32_DS
14754 /*CFI_REL_OFFSET ss,0*/
14755@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14756 CFI_REL_OFFSET rsp,0
14757 pushfq_cfi
14758 /*CFI_REL_OFFSET rflags,0*/
14759- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14760- CFI_REGISTER rip,r10
14761+ orl $X86_EFLAGS_IF,(%rsp)
14762+ GET_THREAD_INFO(%r11)
14763+ movl TI_sysenter_return(%r11), %r11d
14764+ CFI_REGISTER rip,r11
14765 pushq_cfi $__USER32_CS
14766 /*CFI_REL_OFFSET cs,0*/
14767 movl %eax, %eax
14768- pushq_cfi %r10
14769+ pushq_cfi %r11
14770 CFI_REL_OFFSET rip,0
14771 pushq_cfi %rax
14772 cld
14773 SAVE_ARGS 0,1,0
14774+ pax_enter_kernel_user
14775+
14776+#ifdef CONFIG_PAX_RANDKSTACK
14777+ pax_erase_kstack
14778+#endif
14779+
14780+ /*
14781+ * No need to follow this irqs on/off section: the syscall
14782+ * disabled irqs, here we enable it straight after entry:
14783+ */
14784+ ENABLE_INTERRUPTS(CLBR_NONE)
14785 /* no need to do an access_ok check here because rbp has been
14786 32bit zero extended */
14787+
14788+#ifdef CONFIG_PAX_MEMORY_UDEREF
14789+ addq pax_user_shadow_base,%rbp
14790+ ASM_PAX_OPEN_USERLAND
14791+#endif
14792+
14793 ASM_STAC
14794 1: movl (%rbp),%ebp
14795 _ASM_EXTABLE(1b,ia32_badarg)
14796 ASM_CLAC
14797
14798+#ifdef CONFIG_PAX_MEMORY_UDEREF
14799+ ASM_PAX_CLOSE_USERLAND
14800+#endif
14801+
14802 /*
14803 * Sysenter doesn't filter flags, so we need to clear NT
14804 * ourselves. To save a few cycles, we can check whether
14805@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14806 jnz sysenter_fix_flags
14807 sysenter_flags_fixed:
14808
14809- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14810- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14811+ GET_THREAD_INFO(%r11)
14812+ orl $TS_COMPAT,TI_status(%r11)
14813+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14814 CFI_REMEMBER_STATE
14815 jnz sysenter_tracesys
14816 cmpq $(IA32_NR_syscalls-1),%rax
14817@@ -172,15 +218,18 @@ sysenter_do_call:
14818 sysenter_dispatch:
14819 call *ia32_sys_call_table(,%rax,8)
14820 movq %rax,RAX-ARGOFFSET(%rsp)
14821+ GET_THREAD_INFO(%r11)
14822 DISABLE_INTERRUPTS(CLBR_NONE)
14823 TRACE_IRQS_OFF
14824- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14825+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14826 jnz sysexit_audit
14827 sysexit_from_sys_call:
14828- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14829+ pax_exit_kernel_user
14830+ pax_erase_kstack
14831+ andl $~TS_COMPAT,TI_status(%r11)
14832 /* clear IF, that popfq doesn't enable interrupts early */
14833- andl $~0x200,EFLAGS-R11(%rsp)
14834- movl RIP-R11(%rsp),%edx /* User %eip */
14835+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
14836+ movl RIP(%rsp),%edx /* User %eip */
14837 CFI_REGISTER rip,rdx
14838 RESTORE_ARGS 0,24,0,0,0,0
14839 xorq %r8,%r8
14840@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14841 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14842 movl %eax,%edi /* 1st arg: syscall number */
14843 call __audit_syscall_entry
14844+
14845+ pax_erase_kstack
14846+
14847 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14848 cmpq $(IA32_NR_syscalls-1),%rax
14849 ja ia32_badsys
14850@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14851 .endm
14852
14853 .macro auditsys_exit exit
14854- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14855+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14856 jnz ia32_ret_from_sys_call
14857 TRACE_IRQS_ON
14858 ENABLE_INTERRUPTS(CLBR_NONE)
14859@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14860 1: setbe %al /* 1 if error, 0 if not */
14861 movzbl %al,%edi /* zero-extend that into %edi */
14862 call __audit_syscall_exit
14863+ GET_THREAD_INFO(%r11)
14864 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14865 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14866 DISABLE_INTERRUPTS(CLBR_NONE)
14867 TRACE_IRQS_OFF
14868- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14869+ testl %edi,TI_flags(%r11)
14870 jz \exit
14871 CLEAR_RREGS -ARGOFFSET
14872 jmp int_with_check
14873@@ -253,7 +306,7 @@ sysenter_fix_flags:
14874
14875 sysenter_tracesys:
14876 #ifdef CONFIG_AUDITSYSCALL
14877- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14878+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14879 jz sysenter_auditsys
14880 #endif
14881 SAVE_REST
14882@@ -265,6 +318,9 @@ sysenter_tracesys:
14883 RESTORE_REST
14884 cmpq $(IA32_NR_syscalls-1),%rax
14885 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14886+
14887+ pax_erase_kstack
14888+
14889 jmp sysenter_do_call
14890 CFI_ENDPROC
14891 ENDPROC(ia32_sysenter_target)
14892@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14893 ENTRY(ia32_cstar_target)
14894 CFI_STARTPROC32 simple
14895 CFI_SIGNAL_FRAME
14896- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14897+ CFI_DEF_CFA rsp,0
14898 CFI_REGISTER rip,rcx
14899 /*CFI_REGISTER rflags,r11*/
14900 SWAPGS_UNSAFE_STACK
14901 movl %esp,%r8d
14902 CFI_REGISTER rsp,r8
14903 movq PER_CPU_VAR(kernel_stack),%rsp
14904+ SAVE_ARGS 8*6,0,0
14905+ pax_enter_kernel_user
14906+
14907+#ifdef CONFIG_PAX_RANDKSTACK
14908+ pax_erase_kstack
14909+#endif
14910+
14911 /*
14912 * No need to follow this irqs on/off section: the syscall
14913 * disabled irqs and here we enable it straight after entry:
14914 */
14915 ENABLE_INTERRUPTS(CLBR_NONE)
14916- SAVE_ARGS 8,0,0
14917 movl %eax,%eax /* zero extension */
14918 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14919 movq %rcx,RIP-ARGOFFSET(%rsp)
14920@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14921 /* no need to do an access_ok check here because r8 has been
14922 32bit zero extended */
14923 /* hardware stack frame is complete now */
14924+
14925+#ifdef CONFIG_PAX_MEMORY_UDEREF
14926+ ASM_PAX_OPEN_USERLAND
14927+ movq pax_user_shadow_base,%r8
14928+ addq RSP-ARGOFFSET(%rsp),%r8
14929+#endif
14930+
14931 ASM_STAC
14932 1: movl (%r8),%r9d
14933 _ASM_EXTABLE(1b,ia32_badarg)
14934 ASM_CLAC
14935- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14936- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14937+
14938+#ifdef CONFIG_PAX_MEMORY_UDEREF
14939+ ASM_PAX_CLOSE_USERLAND
14940+#endif
14941+
14942+ GET_THREAD_INFO(%r11)
14943+ orl $TS_COMPAT,TI_status(%r11)
14944+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14945 CFI_REMEMBER_STATE
14946 jnz cstar_tracesys
14947 cmpq $IA32_NR_syscalls-1,%rax
14948@@ -335,13 +410,16 @@ cstar_do_call:
14949 cstar_dispatch:
14950 call *ia32_sys_call_table(,%rax,8)
14951 movq %rax,RAX-ARGOFFSET(%rsp)
14952+ GET_THREAD_INFO(%r11)
14953 DISABLE_INTERRUPTS(CLBR_NONE)
14954 TRACE_IRQS_OFF
14955- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14956+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14957 jnz sysretl_audit
14958 sysretl_from_sys_call:
14959- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14960- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14961+ pax_exit_kernel_user
14962+ pax_erase_kstack
14963+ andl $~TS_COMPAT,TI_status(%r11)
14964+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14965 movl RIP-ARGOFFSET(%rsp),%ecx
14966 CFI_REGISTER rip,rcx
14967 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14968@@ -368,7 +446,7 @@ sysretl_audit:
14969
14970 cstar_tracesys:
14971 #ifdef CONFIG_AUDITSYSCALL
14972- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14973+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14974 jz cstar_auditsys
14975 #endif
14976 xchgl %r9d,%ebp
14977@@ -382,11 +460,19 @@ cstar_tracesys:
14978 xchgl %ebp,%r9d
14979 cmpq $(IA32_NR_syscalls-1),%rax
14980 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14981+
14982+ pax_erase_kstack
14983+
14984 jmp cstar_do_call
14985 END(ia32_cstar_target)
14986
14987 ia32_badarg:
14988 ASM_CLAC
14989+
14990+#ifdef CONFIG_PAX_MEMORY_UDEREF
14991+ ASM_PAX_CLOSE_USERLAND
14992+#endif
14993+
14994 movq $-EFAULT,%rax
14995 jmp ia32_sysret
14996 CFI_ENDPROC
14997@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
14998 CFI_REL_OFFSET rip,RIP-RIP
14999 PARAVIRT_ADJUST_EXCEPTION_FRAME
15000 SWAPGS
15001- /*
15002- * No need to follow this irqs on/off section: the syscall
15003- * disabled irqs and here we enable it straight after entry:
15004- */
15005- ENABLE_INTERRUPTS(CLBR_NONE)
15006 movl %eax,%eax
15007 pushq_cfi %rax
15008 cld
15009 /* note the registers are not zero extended to the sf.
15010 this could be a problem. */
15011 SAVE_ARGS 0,1,0
15012- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15013- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15014+ pax_enter_kernel_user
15015+
15016+#ifdef CONFIG_PAX_RANDKSTACK
15017+ pax_erase_kstack
15018+#endif
15019+
15020+ /*
15021+ * No need to follow this irqs on/off section: the syscall
15022+ * disabled irqs and here we enable it straight after entry:
15023+ */
15024+ ENABLE_INTERRUPTS(CLBR_NONE)
15025+ GET_THREAD_INFO(%r11)
15026+ orl $TS_COMPAT,TI_status(%r11)
15027+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15028 jnz ia32_tracesys
15029 cmpq $(IA32_NR_syscalls-1),%rax
15030 ja ia32_badsys
15031@@ -458,6 +551,9 @@ ia32_tracesys:
15032 RESTORE_REST
15033 cmpq $(IA32_NR_syscalls-1),%rax
15034 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15035+
15036+ pax_erase_kstack
15037+
15038 jmp ia32_do_call
15039 END(ia32_syscall)
15040
15041diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15042index 8e0ceec..af13504 100644
15043--- a/arch/x86/ia32/sys_ia32.c
15044+++ b/arch/x86/ia32/sys_ia32.c
15045@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15046 */
15047 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15048 {
15049- typeof(ubuf->st_uid) uid = 0;
15050- typeof(ubuf->st_gid) gid = 0;
15051+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15052+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15053 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15054 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15055 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15056diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15057index 372231c..51b537d 100644
15058--- a/arch/x86/include/asm/alternative-asm.h
15059+++ b/arch/x86/include/asm/alternative-asm.h
15060@@ -18,6 +18,45 @@
15061 .endm
15062 #endif
15063
15064+#ifdef KERNEXEC_PLUGIN
15065+ .macro pax_force_retaddr_bts rip=0
15066+ btsq $63,\rip(%rsp)
15067+ .endm
15068+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15069+ .macro pax_force_retaddr rip=0, reload=0
15070+ btsq $63,\rip(%rsp)
15071+ .endm
15072+ .macro pax_force_fptr ptr
15073+ btsq $63,\ptr
15074+ .endm
15075+ .macro pax_set_fptr_mask
15076+ .endm
15077+#endif
15078+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15079+ .macro pax_force_retaddr rip=0, reload=0
15080+ .if \reload
15081+ pax_set_fptr_mask
15082+ .endif
15083+ orq %r12,\rip(%rsp)
15084+ .endm
15085+ .macro pax_force_fptr ptr
15086+ orq %r12,\ptr
15087+ .endm
15088+ .macro pax_set_fptr_mask
15089+ movabs $0x8000000000000000,%r12
15090+ .endm
15091+#endif
15092+#else
15093+ .macro pax_force_retaddr rip=0, reload=0
15094+ .endm
15095+ .macro pax_force_fptr ptr
15096+ .endm
15097+ .macro pax_force_retaddr_bts rip=0
15098+ .endm
15099+ .macro pax_set_fptr_mask
15100+ .endm
15101+#endif
15102+
15103 .macro altinstruction_entry orig alt feature orig_len alt_len
15104 .long \orig - .
15105 .long \alt - .
15106diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15107index 473bdbe..b1e3377 100644
15108--- a/arch/x86/include/asm/alternative.h
15109+++ b/arch/x86/include/asm/alternative.h
15110@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15111 ".pushsection .discard,\"aw\",@progbits\n" \
15112 DISCARD_ENTRY(1) \
15113 ".popsection\n" \
15114- ".pushsection .altinstr_replacement, \"ax\"\n" \
15115+ ".pushsection .altinstr_replacement, \"a\"\n" \
15116 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15117 ".popsection"
15118
15119@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15120 DISCARD_ENTRY(1) \
15121 DISCARD_ENTRY(2) \
15122 ".popsection\n" \
15123- ".pushsection .altinstr_replacement, \"ax\"\n" \
15124+ ".pushsection .altinstr_replacement, \"a\"\n" \
15125 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15126 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15127 ".popsection"
15128diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15129index 465b309..ab7e51f 100644
15130--- a/arch/x86/include/asm/apic.h
15131+++ b/arch/x86/include/asm/apic.h
15132@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15133
15134 #ifdef CONFIG_X86_LOCAL_APIC
15135
15136-extern unsigned int apic_verbosity;
15137+extern int apic_verbosity;
15138 extern int local_apic_timer_c2_ok;
15139
15140 extern int disable_apic;
15141diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15142index 20370c6..a2eb9b0 100644
15143--- a/arch/x86/include/asm/apm.h
15144+++ b/arch/x86/include/asm/apm.h
15145@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15146 __asm__ __volatile__(APM_DO_ZERO_SEGS
15147 "pushl %%edi\n\t"
15148 "pushl %%ebp\n\t"
15149- "lcall *%%cs:apm_bios_entry\n\t"
15150+ "lcall *%%ss:apm_bios_entry\n\t"
15151 "setc %%al\n\t"
15152 "popl %%ebp\n\t"
15153 "popl %%edi\n\t"
15154@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15155 __asm__ __volatile__(APM_DO_ZERO_SEGS
15156 "pushl %%edi\n\t"
15157 "pushl %%ebp\n\t"
15158- "lcall *%%cs:apm_bios_entry\n\t"
15159+ "lcall *%%ss:apm_bios_entry\n\t"
15160 "setc %%bl\n\t"
15161 "popl %%ebp\n\t"
15162 "popl %%edi\n\t"
15163diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15164index 5e5cd12..51cdc93 100644
15165--- a/arch/x86/include/asm/atomic.h
15166+++ b/arch/x86/include/asm/atomic.h
15167@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15168 }
15169
15170 /**
15171+ * atomic_read_unchecked - read atomic variable
15172+ * @v: pointer of type atomic_unchecked_t
15173+ *
15174+ * Atomically reads the value of @v.
15175+ */
15176+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15177+{
15178+ return ACCESS_ONCE((v)->counter);
15179+}
15180+
15181+/**
15182 * atomic_set - set atomic variable
15183 * @v: pointer of type atomic_t
15184 * @i: required value
15185@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15186 }
15187
15188 /**
15189+ * atomic_set_unchecked - set atomic variable
15190+ * @v: pointer of type atomic_unchecked_t
15191+ * @i: required value
15192+ *
15193+ * Atomically sets the value of @v to @i.
15194+ */
15195+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15196+{
15197+ v->counter = i;
15198+}
15199+
15200+/**
15201 * atomic_add - add integer to atomic variable
15202 * @i: integer value to add
15203 * @v: pointer of type atomic_t
15204@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15205 */
15206 static inline void atomic_add(int i, atomic_t *v)
15207 {
15208- asm volatile(LOCK_PREFIX "addl %1,%0"
15209+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15210+
15211+#ifdef CONFIG_PAX_REFCOUNT
15212+ "jno 0f\n"
15213+ LOCK_PREFIX "subl %1,%0\n"
15214+ "int $4\n0:\n"
15215+ _ASM_EXTABLE(0b, 0b)
15216+#endif
15217+
15218+ : "+m" (v->counter)
15219+ : "ir" (i));
15220+}
15221+
15222+/**
15223+ * atomic_add_unchecked - add integer to atomic variable
15224+ * @i: integer value to add
15225+ * @v: pointer of type atomic_unchecked_t
15226+ *
15227+ * Atomically adds @i to @v.
15228+ */
15229+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15230+{
15231+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15232 : "+m" (v->counter)
15233 : "ir" (i));
15234 }
15235@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15236 */
15237 static inline void atomic_sub(int i, atomic_t *v)
15238 {
15239- asm volatile(LOCK_PREFIX "subl %1,%0"
15240+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15241+
15242+#ifdef CONFIG_PAX_REFCOUNT
15243+ "jno 0f\n"
15244+ LOCK_PREFIX "addl %1,%0\n"
15245+ "int $4\n0:\n"
15246+ _ASM_EXTABLE(0b, 0b)
15247+#endif
15248+
15249+ : "+m" (v->counter)
15250+ : "ir" (i));
15251+}
15252+
15253+/**
15254+ * atomic_sub_unchecked - subtract integer from atomic variable
15255+ * @i: integer value to subtract
15256+ * @v: pointer of type atomic_unchecked_t
15257+ *
15258+ * Atomically subtracts @i from @v.
15259+ */
15260+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15261+{
15262+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15263 : "+m" (v->counter)
15264 : "ir" (i));
15265 }
15266@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15267 */
15268 static inline int atomic_sub_and_test(int i, atomic_t *v)
15269 {
15270- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15271+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15272 }
15273
15274 /**
15275@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15276 */
15277 static inline void atomic_inc(atomic_t *v)
15278 {
15279- asm volatile(LOCK_PREFIX "incl %0"
15280+ asm volatile(LOCK_PREFIX "incl %0\n"
15281+
15282+#ifdef CONFIG_PAX_REFCOUNT
15283+ "jno 0f\n"
15284+ LOCK_PREFIX "decl %0\n"
15285+ "int $4\n0:\n"
15286+ _ASM_EXTABLE(0b, 0b)
15287+#endif
15288+
15289+ : "+m" (v->counter));
15290+}
15291+
15292+/**
15293+ * atomic_inc_unchecked - increment atomic variable
15294+ * @v: pointer of type atomic_unchecked_t
15295+ *
15296+ * Atomically increments @v by 1.
15297+ */
15298+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15299+{
15300+ asm volatile(LOCK_PREFIX "incl %0\n"
15301 : "+m" (v->counter));
15302 }
15303
15304@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15305 */
15306 static inline void atomic_dec(atomic_t *v)
15307 {
15308- asm volatile(LOCK_PREFIX "decl %0"
15309+ asm volatile(LOCK_PREFIX "decl %0\n"
15310+
15311+#ifdef CONFIG_PAX_REFCOUNT
15312+ "jno 0f\n"
15313+ LOCK_PREFIX "incl %0\n"
15314+ "int $4\n0:\n"
15315+ _ASM_EXTABLE(0b, 0b)
15316+#endif
15317+
15318+ : "+m" (v->counter));
15319+}
15320+
15321+/**
15322+ * atomic_dec_unchecked - decrement atomic variable
15323+ * @v: pointer of type atomic_unchecked_t
15324+ *
15325+ * Atomically decrements @v by 1.
15326+ */
15327+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15328+{
15329+ asm volatile(LOCK_PREFIX "decl %0\n"
15330 : "+m" (v->counter));
15331 }
15332
15333@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15334 */
15335 static inline int atomic_dec_and_test(atomic_t *v)
15336 {
15337- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15338+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15339 }
15340
15341 /**
15342@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15343 */
15344 static inline int atomic_inc_and_test(atomic_t *v)
15345 {
15346- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15347+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15348+}
15349+
15350+/**
15351+ * atomic_inc_and_test_unchecked - increment and test
15352+ * @v: pointer of type atomic_unchecked_t
15353+ *
15354+ * Atomically increments @v by 1
15355+ * and returns true if the result is zero, or false for all
15356+ * other cases.
15357+ */
15358+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15359+{
15360+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15361 }
15362
15363 /**
15364@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15365 */
15366 static inline int atomic_add_negative(int i, atomic_t *v)
15367 {
15368- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15369+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15370 }
15371
15372 /**
15373@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15374 *
15375 * Atomically adds @i to @v and returns @i + @v
15376 */
15377-static inline int atomic_add_return(int i, atomic_t *v)
15378+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15379+{
15380+ return i + xadd_check_overflow(&v->counter, i);
15381+}
15382+
15383+/**
15384+ * atomic_add_return_unchecked - add integer and return
15385+ * @i: integer value to add
15386+ * @v: pointer of type atomic_unchecked_t
15387+ *
15388+ * Atomically adds @i to @v and returns @i + @v
15389+ */
15390+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15391 {
15392 return i + xadd(&v->counter, i);
15393 }
15394@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15395 *
15396 * Atomically subtracts @i from @v and returns @v - @i
15397 */
15398-static inline int atomic_sub_return(int i, atomic_t *v)
15399+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15400 {
15401 return atomic_add_return(-i, v);
15402 }
15403
15404 #define atomic_inc_return(v) (atomic_add_return(1, v))
15405+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15406+{
15407+ return atomic_add_return_unchecked(1, v);
15408+}
15409 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15410
15411-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15412+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15413+{
15414+ return cmpxchg(&v->counter, old, new);
15415+}
15416+
15417+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15418 {
15419 return cmpxchg(&v->counter, old, new);
15420 }
15421@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15422 return xchg(&v->counter, new);
15423 }
15424
15425+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15426+{
15427+ return xchg(&v->counter, new);
15428+}
15429+
15430 /**
15431 * __atomic_add_unless - add unless the number is already a given value
15432 * @v: pointer of type atomic_t
15433@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15434 */
15435 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15436 {
15437- int c, old;
15438+ int c, old, new;
15439 c = atomic_read(v);
15440 for (;;) {
15441- if (unlikely(c == (u)))
15442+ if (unlikely(c == u))
15443 break;
15444- old = atomic_cmpxchg((v), c, c + (a));
15445+
15446+ asm volatile("addl %2,%0\n"
15447+
15448+#ifdef CONFIG_PAX_REFCOUNT
15449+ "jno 0f\n"
15450+ "subl %2,%0\n"
15451+ "int $4\n0:\n"
15452+ _ASM_EXTABLE(0b, 0b)
15453+#endif
15454+
15455+ : "=r" (new)
15456+ : "0" (c), "ir" (a));
15457+
15458+ old = atomic_cmpxchg(v, c, new);
15459 if (likely(old == c))
15460 break;
15461 c = old;
15462@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15463 }
15464
15465 /**
15466+ * atomic_inc_not_zero_hint - increment if not null
15467+ * @v: pointer of type atomic_t
15468+ * @hint: probable value of the atomic before the increment
15469+ *
15470+ * This version of atomic_inc_not_zero() gives a hint of probable
15471+ * value of the atomic. This helps processor to not read the memory
15472+ * before doing the atomic read/modify/write cycle, lowering
15473+ * number of bus transactions on some arches.
15474+ *
15475+ * Returns: 0 if increment was not done, 1 otherwise.
15476+ */
15477+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15478+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15479+{
15480+ int val, c = hint, new;
15481+
15482+ /* sanity test, should be removed by compiler if hint is a constant */
15483+ if (!hint)
15484+ return __atomic_add_unless(v, 1, 0);
15485+
15486+ do {
15487+ asm volatile("incl %0\n"
15488+
15489+#ifdef CONFIG_PAX_REFCOUNT
15490+ "jno 0f\n"
15491+ "decl %0\n"
15492+ "int $4\n0:\n"
15493+ _ASM_EXTABLE(0b, 0b)
15494+#endif
15495+
15496+ : "=r" (new)
15497+ : "0" (c));
15498+
15499+ val = atomic_cmpxchg(v, c, new);
15500+ if (val == c)
15501+ return 1;
15502+ c = val;
15503+ } while (c);
15504+
15505+ return 0;
15506+}
15507+
15508+/**
15509 * atomic_inc_short - increment of a short integer
15510 * @v: pointer to type int
15511 *
15512@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15513 }
15514
15515 /* These are x86-specific, used by some header files */
15516-#define atomic_clear_mask(mask, addr) \
15517- asm volatile(LOCK_PREFIX "andl %0,%1" \
15518- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15519+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15520+{
15521+ asm volatile(LOCK_PREFIX "andl %1,%0"
15522+ : "+m" (v->counter)
15523+ : "r" (~(mask))
15524+ : "memory");
15525+}
15526
15527-#define atomic_set_mask(mask, addr) \
15528- asm volatile(LOCK_PREFIX "orl %0,%1" \
15529- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15530- : "memory")
15531+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15532+{
15533+ asm volatile(LOCK_PREFIX "andl %1,%0"
15534+ : "+m" (v->counter)
15535+ : "r" (~(mask))
15536+ : "memory");
15537+}
15538+
15539+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15540+{
15541+ asm volatile(LOCK_PREFIX "orl %1,%0"
15542+ : "+m" (v->counter)
15543+ : "r" (mask)
15544+ : "memory");
15545+}
15546+
15547+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15548+{
15549+ asm volatile(LOCK_PREFIX "orl %1,%0"
15550+ : "+m" (v->counter)
15551+ : "r" (mask)
15552+ : "memory");
15553+}
15554
15555 #ifdef CONFIG_X86_32
15556 # include <asm/atomic64_32.h>
15557diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15558index b154de7..bf18a5a 100644
15559--- a/arch/x86/include/asm/atomic64_32.h
15560+++ b/arch/x86/include/asm/atomic64_32.h
15561@@ -12,6 +12,14 @@ typedef struct {
15562 u64 __aligned(8) counter;
15563 } atomic64_t;
15564
15565+#ifdef CONFIG_PAX_REFCOUNT
15566+typedef struct {
15567+ u64 __aligned(8) counter;
15568+} atomic64_unchecked_t;
15569+#else
15570+typedef atomic64_t atomic64_unchecked_t;
15571+#endif
15572+
15573 #define ATOMIC64_INIT(val) { (val) }
15574
15575 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15576@@ -37,21 +45,31 @@ typedef struct {
15577 ATOMIC64_DECL_ONE(sym##_386)
15578
15579 ATOMIC64_DECL_ONE(add_386);
15580+ATOMIC64_DECL_ONE(add_unchecked_386);
15581 ATOMIC64_DECL_ONE(sub_386);
15582+ATOMIC64_DECL_ONE(sub_unchecked_386);
15583 ATOMIC64_DECL_ONE(inc_386);
15584+ATOMIC64_DECL_ONE(inc_unchecked_386);
15585 ATOMIC64_DECL_ONE(dec_386);
15586+ATOMIC64_DECL_ONE(dec_unchecked_386);
15587 #endif
15588
15589 #define alternative_atomic64(f, out, in...) \
15590 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15591
15592 ATOMIC64_DECL(read);
15593+ATOMIC64_DECL(read_unchecked);
15594 ATOMIC64_DECL(set);
15595+ATOMIC64_DECL(set_unchecked);
15596 ATOMIC64_DECL(xchg);
15597 ATOMIC64_DECL(add_return);
15598+ATOMIC64_DECL(add_return_unchecked);
15599 ATOMIC64_DECL(sub_return);
15600+ATOMIC64_DECL(sub_return_unchecked);
15601 ATOMIC64_DECL(inc_return);
15602+ATOMIC64_DECL(inc_return_unchecked);
15603 ATOMIC64_DECL(dec_return);
15604+ATOMIC64_DECL(dec_return_unchecked);
15605 ATOMIC64_DECL(dec_if_positive);
15606 ATOMIC64_DECL(inc_not_zero);
15607 ATOMIC64_DECL(add_unless);
15608@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15609 }
15610
15611 /**
15612+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15613+ * @p: pointer to type atomic64_unchecked_t
15614+ * @o: expected value
15615+ * @n: new value
15616+ *
15617+ * Atomically sets @v to @n if it was equal to @o and returns
15618+ * the old value.
15619+ */
15620+
15621+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15622+{
15623+ return cmpxchg64(&v->counter, o, n);
15624+}
15625+
15626+/**
15627 * atomic64_xchg - xchg atomic64 variable
15628 * @v: pointer to type atomic64_t
15629 * @n: value to assign
15630@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15631 }
15632
15633 /**
15634+ * atomic64_set_unchecked - set atomic64 variable
15635+ * @v: pointer to type atomic64_unchecked_t
15636+ * @n: value to assign
15637+ *
15638+ * Atomically sets the value of @v to @n.
15639+ */
15640+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15641+{
15642+ unsigned high = (unsigned)(i >> 32);
15643+ unsigned low = (unsigned)i;
15644+ alternative_atomic64(set, /* no output */,
15645+ "S" (v), "b" (low), "c" (high)
15646+ : "eax", "edx", "memory");
15647+}
15648+
15649+/**
15650 * atomic64_read - read atomic64 variable
15651 * @v: pointer to type atomic64_t
15652 *
15653@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15654 }
15655
15656 /**
15657+ * atomic64_read_unchecked - read atomic64 variable
15658+ * @v: pointer to type atomic64_unchecked_t
15659+ *
15660+ * Atomically reads the value of @v and returns it.
15661+ */
15662+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15663+{
15664+ long long r;
15665+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15666+ return r;
15667+ }
15668+
15669+/**
15670 * atomic64_add_return - add and return
15671 * @i: integer value to add
15672 * @v: pointer to type atomic64_t
15673@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15674 return i;
15675 }
15676
15677+/**
15678+ * atomic64_add_return_unchecked - add and return
15679+ * @i: integer value to add
15680+ * @v: pointer to type atomic64_unchecked_t
15681+ *
15682+ * Atomically adds @i to @v and returns @i + *@v
15683+ */
15684+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15685+{
15686+ alternative_atomic64(add_return_unchecked,
15687+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15688+ ASM_NO_INPUT_CLOBBER("memory"));
15689+ return i;
15690+}
15691+
15692 /*
15693 * Other variants with different arithmetic operators:
15694 */
15695@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15696 return a;
15697 }
15698
15699+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15700+{
15701+ long long a;
15702+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15703+ "S" (v) : "memory", "ecx");
15704+ return a;
15705+}
15706+
15707 static inline long long atomic64_dec_return(atomic64_t *v)
15708 {
15709 long long a;
15710@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15711 }
15712
15713 /**
15714+ * atomic64_add_unchecked - add integer to atomic64 variable
15715+ * @i: integer value to add
15716+ * @v: pointer to type atomic64_unchecked_t
15717+ *
15718+ * Atomically adds @i to @v.
15719+ */
15720+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15721+{
15722+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15723+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15724+ ASM_NO_INPUT_CLOBBER("memory"));
15725+ return i;
15726+}
15727+
15728+/**
15729 * atomic64_sub - subtract the atomic64 variable
15730 * @i: integer value to subtract
15731 * @v: pointer to type atomic64_t
15732diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15733index f8d273e..02f39f3 100644
15734--- a/arch/x86/include/asm/atomic64_64.h
15735+++ b/arch/x86/include/asm/atomic64_64.h
15736@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15737 }
15738
15739 /**
15740+ * atomic64_read_unchecked - read atomic64 variable
15741+ * @v: pointer of type atomic64_unchecked_t
15742+ *
15743+ * Atomically reads the value of @v.
15744+ * Doesn't imply a read memory barrier.
15745+ */
15746+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15747+{
15748+ return ACCESS_ONCE((v)->counter);
15749+}
15750+
15751+/**
15752 * atomic64_set - set atomic64 variable
15753 * @v: pointer to type atomic64_t
15754 * @i: required value
15755@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15756 }
15757
15758 /**
15759+ * atomic64_set_unchecked - set atomic64 variable
15760+ * @v: pointer to type atomic64_unchecked_t
15761+ * @i: required value
15762+ *
15763+ * Atomically sets the value of @v to @i.
15764+ */
15765+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15766+{
15767+ v->counter = i;
15768+}
15769+
15770+/**
15771 * atomic64_add - add integer to atomic64 variable
15772 * @i: integer value to add
15773 * @v: pointer to type atomic64_t
15774@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15775 */
15776 static inline void atomic64_add(long i, atomic64_t *v)
15777 {
15778+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15779+
15780+#ifdef CONFIG_PAX_REFCOUNT
15781+ "jno 0f\n"
15782+ LOCK_PREFIX "subq %1,%0\n"
15783+ "int $4\n0:\n"
15784+ _ASM_EXTABLE(0b, 0b)
15785+#endif
15786+
15787+ : "=m" (v->counter)
15788+ : "er" (i), "m" (v->counter));
15789+}
15790+
15791+/**
15792+ * atomic64_add_unchecked - add integer to atomic64 variable
15793+ * @i: integer value to add
15794+ * @v: pointer to type atomic64_unchecked_t
15795+ *
15796+ * Atomically adds @i to @v.
15797+ */
15798+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15799+{
15800 asm volatile(LOCK_PREFIX "addq %1,%0"
15801 : "=m" (v->counter)
15802 : "er" (i), "m" (v->counter));
15803@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15804 */
15805 static inline void atomic64_sub(long i, atomic64_t *v)
15806 {
15807- asm volatile(LOCK_PREFIX "subq %1,%0"
15808+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15809+
15810+#ifdef CONFIG_PAX_REFCOUNT
15811+ "jno 0f\n"
15812+ LOCK_PREFIX "addq %1,%0\n"
15813+ "int $4\n0:\n"
15814+ _ASM_EXTABLE(0b, 0b)
15815+#endif
15816+
15817+ : "=m" (v->counter)
15818+ : "er" (i), "m" (v->counter));
15819+}
15820+
15821+/**
15822+ * atomic64_sub_unchecked - subtract the atomic64 variable
15823+ * @i: integer value to subtract
15824+ * @v: pointer to type atomic64_unchecked_t
15825+ *
15826+ * Atomically subtracts @i from @v.
15827+ */
15828+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15829+{
15830+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15831 : "=m" (v->counter)
15832 : "er" (i), "m" (v->counter));
15833 }
15834@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15835 */
15836 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15837 {
15838- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15839+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15840 }
15841
15842 /**
15843@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15844 */
15845 static inline void atomic64_inc(atomic64_t *v)
15846 {
15847+ asm volatile(LOCK_PREFIX "incq %0\n"
15848+
15849+#ifdef CONFIG_PAX_REFCOUNT
15850+ "jno 0f\n"
15851+ LOCK_PREFIX "decq %0\n"
15852+ "int $4\n0:\n"
15853+ _ASM_EXTABLE(0b, 0b)
15854+#endif
15855+
15856+ : "=m" (v->counter)
15857+ : "m" (v->counter));
15858+}
15859+
15860+/**
15861+ * atomic64_inc_unchecked - increment atomic64 variable
15862+ * @v: pointer to type atomic64_unchecked_t
15863+ *
15864+ * Atomically increments @v by 1.
15865+ */
15866+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15867+{
15868 asm volatile(LOCK_PREFIX "incq %0"
15869 : "=m" (v->counter)
15870 : "m" (v->counter));
15871@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15872 */
15873 static inline void atomic64_dec(atomic64_t *v)
15874 {
15875- asm volatile(LOCK_PREFIX "decq %0"
15876+ asm volatile(LOCK_PREFIX "decq %0\n"
15877+
15878+#ifdef CONFIG_PAX_REFCOUNT
15879+ "jno 0f\n"
15880+ LOCK_PREFIX "incq %0\n"
15881+ "int $4\n0:\n"
15882+ _ASM_EXTABLE(0b, 0b)
15883+#endif
15884+
15885+ : "=m" (v->counter)
15886+ : "m" (v->counter));
15887+}
15888+
15889+/**
15890+ * atomic64_dec_unchecked - decrement atomic64 variable
15891+ * @v: pointer to type atomic64_t
15892+ *
15893+ * Atomically decrements @v by 1.
15894+ */
15895+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15896+{
15897+ asm volatile(LOCK_PREFIX "decq %0\n"
15898 : "=m" (v->counter)
15899 : "m" (v->counter));
15900 }
15901@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15902 */
15903 static inline int atomic64_dec_and_test(atomic64_t *v)
15904 {
15905- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15906+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15907 }
15908
15909 /**
15910@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15911 */
15912 static inline int atomic64_inc_and_test(atomic64_t *v)
15913 {
15914- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15915+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15916 }
15917
15918 /**
15919@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15920 */
15921 static inline int atomic64_add_negative(long i, atomic64_t *v)
15922 {
15923- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15924+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15925 }
15926
15927 /**
15928@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15929 */
15930 static inline long atomic64_add_return(long i, atomic64_t *v)
15931 {
15932+ return i + xadd_check_overflow(&v->counter, i);
15933+}
15934+
15935+/**
15936+ * atomic64_add_return_unchecked - add and return
15937+ * @i: integer value to add
15938+ * @v: pointer to type atomic64_unchecked_t
15939+ *
15940+ * Atomically adds @i to @v and returns @i + @v
15941+ */
15942+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15943+{
15944 return i + xadd(&v->counter, i);
15945 }
15946
15947@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15948 }
15949
15950 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15951+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15952+{
15953+ return atomic64_add_return_unchecked(1, v);
15954+}
15955 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15956
15957 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15958@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15959 return cmpxchg(&v->counter, old, new);
15960 }
15961
15962+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15963+{
15964+ return cmpxchg(&v->counter, old, new);
15965+}
15966+
15967 static inline long atomic64_xchg(atomic64_t *v, long new)
15968 {
15969 return xchg(&v->counter, new);
15970@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15971 */
15972 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15973 {
15974- long c, old;
15975+ long c, old, new;
15976 c = atomic64_read(v);
15977 for (;;) {
15978- if (unlikely(c == (u)))
15979+ if (unlikely(c == u))
15980 break;
15981- old = atomic64_cmpxchg((v), c, c + (a));
15982+
15983+ asm volatile("add %2,%0\n"
15984+
15985+#ifdef CONFIG_PAX_REFCOUNT
15986+ "jno 0f\n"
15987+ "sub %2,%0\n"
15988+ "int $4\n0:\n"
15989+ _ASM_EXTABLE(0b, 0b)
15990+#endif
15991+
15992+ : "=r" (new)
15993+ : "0" (c), "ir" (a));
15994+
15995+ old = atomic64_cmpxchg(v, c, new);
15996 if (likely(old == c))
15997 break;
15998 c = old;
15999 }
16000- return c != (u);
16001+ return c != u;
16002 }
16003
16004 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16005diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16006index 2ab1eb3..1e8cc5d 100644
16007--- a/arch/x86/include/asm/barrier.h
16008+++ b/arch/x86/include/asm/barrier.h
16009@@ -57,7 +57,7 @@
16010 do { \
16011 compiletime_assert_atomic_type(*p); \
16012 smp_mb(); \
16013- ACCESS_ONCE(*p) = (v); \
16014+ ACCESS_ONCE_RW(*p) = (v); \
16015 } while (0)
16016
16017 #define smp_load_acquire(p) \
16018@@ -74,7 +74,7 @@ do { \
16019 do { \
16020 compiletime_assert_atomic_type(*p); \
16021 barrier(); \
16022- ACCESS_ONCE(*p) = (v); \
16023+ ACCESS_ONCE_RW(*p) = (v); \
16024 } while (0)
16025
16026 #define smp_load_acquire(p) \
16027diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16028index cfe3b95..d01b118 100644
16029--- a/arch/x86/include/asm/bitops.h
16030+++ b/arch/x86/include/asm/bitops.h
16031@@ -50,7 +50,7 @@
16032 * a mask operation on a byte.
16033 */
16034 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16035-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16036+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16037 #define CONST_MASK(nr) (1 << ((nr) & 7))
16038
16039 /**
16040@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16041 */
16042 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16043 {
16044- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16045+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16046 }
16047
16048 /**
16049@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16050 */
16051 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16052 {
16053- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16054+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16055 }
16056
16057 /**
16058@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16059 */
16060 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16061 {
16062- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16063+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16064 }
16065
16066 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16067@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16068 *
16069 * Undefined if no bit exists, so code should check against 0 first.
16070 */
16071-static inline unsigned long __ffs(unsigned long word)
16072+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16073 {
16074 asm("rep; bsf %1,%0"
16075 : "=r" (word)
16076@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16077 *
16078 * Undefined if no zero exists, so code should check against ~0UL first.
16079 */
16080-static inline unsigned long ffz(unsigned long word)
16081+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16082 {
16083 asm("rep; bsf %1,%0"
16084 : "=r" (word)
16085@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16086 *
16087 * Undefined if no set bit exists, so code should check against 0 first.
16088 */
16089-static inline unsigned long __fls(unsigned long word)
16090+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16091 {
16092 asm("bsr %1,%0"
16093 : "=r" (word)
16094@@ -434,7 +434,7 @@ static inline int ffs(int x)
16095 * set bit if value is nonzero. The last (most significant) bit is
16096 * at position 32.
16097 */
16098-static inline int fls(int x)
16099+static inline int __intentional_overflow(-1) fls(int x)
16100 {
16101 int r;
16102
16103@@ -476,7 +476,7 @@ static inline int fls(int x)
16104 * at position 64.
16105 */
16106 #ifdef CONFIG_X86_64
16107-static __always_inline int fls64(__u64 x)
16108+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16109 {
16110 int bitpos = -1;
16111 /*
16112diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16113index 4fa687a..60f2d39 100644
16114--- a/arch/x86/include/asm/boot.h
16115+++ b/arch/x86/include/asm/boot.h
16116@@ -6,10 +6,15 @@
16117 #include <uapi/asm/boot.h>
16118
16119 /* Physical address where kernel should be loaded. */
16120-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16121+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16122 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16123 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16124
16125+#ifndef __ASSEMBLY__
16126+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16127+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16128+#endif
16129+
16130 /* Minimum kernel alignment, as a power of two */
16131 #ifdef CONFIG_X86_64
16132 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16133diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16134index 48f99f1..d78ebf9 100644
16135--- a/arch/x86/include/asm/cache.h
16136+++ b/arch/x86/include/asm/cache.h
16137@@ -5,12 +5,13 @@
16138
16139 /* L1 cache line size */
16140 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16141-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16142+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16143
16144 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16145+#define __read_only __attribute__((__section__(".data..read_only")))
16146
16147 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16148-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16149+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16150
16151 #ifdef CONFIG_X86_VSMP
16152 #ifdef CONFIG_SMP
16153diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16154index 76659b6..72b8439 100644
16155--- a/arch/x86/include/asm/calling.h
16156+++ b/arch/x86/include/asm/calling.h
16157@@ -82,107 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16158 #define RSP 152
16159 #define SS 160
16160
16161-#define ARGOFFSET R11
16162-#define SWFRAME ORIG_RAX
16163+#define ARGOFFSET R15
16164
16165 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16166- subq $9*8+\addskip, %rsp
16167- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16168- movq_cfi rdi, 8*8
16169- movq_cfi rsi, 7*8
16170- movq_cfi rdx, 6*8
16171+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16172+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16173+ movq_cfi rdi, RDI
16174+ movq_cfi rsi, RSI
16175+ movq_cfi rdx, RDX
16176
16177 .if \save_rcx
16178- movq_cfi rcx, 5*8
16179+ movq_cfi rcx, RCX
16180 .endif
16181
16182 .if \rax_enosys
16183- movq $-ENOSYS, 4*8(%rsp)
16184+ movq $-ENOSYS, RAX(%rsp)
16185 .else
16186- movq_cfi rax, 4*8
16187+ movq_cfi rax, RAX
16188 .endif
16189
16190 .if \save_r891011
16191- movq_cfi r8, 3*8
16192- movq_cfi r9, 2*8
16193- movq_cfi r10, 1*8
16194- movq_cfi r11, 0*8
16195+ movq_cfi r8, R8
16196+ movq_cfi r9, R9
16197+ movq_cfi r10, R10
16198+ movq_cfi r11, R11
16199 .endif
16200
16201+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16202+ movq_cfi r12, R12
16203+#endif
16204+
16205 .endm
16206
16207-#define ARG_SKIP (9*8)
16208+#define ARG_SKIP ORIG_RAX
16209
16210 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16211 rstor_r8910=1, rstor_rdx=1
16212+
16213+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16214+ movq_cfi_restore R12, r12
16215+#endif
16216+
16217 .if \rstor_r11
16218- movq_cfi_restore 0*8, r11
16219+ movq_cfi_restore R11, r11
16220 .endif
16221
16222 .if \rstor_r8910
16223- movq_cfi_restore 1*8, r10
16224- movq_cfi_restore 2*8, r9
16225- movq_cfi_restore 3*8, r8
16226+ movq_cfi_restore R10, r10
16227+ movq_cfi_restore R9, r9
16228+ movq_cfi_restore R8, r8
16229 .endif
16230
16231 .if \rstor_rax
16232- movq_cfi_restore 4*8, rax
16233+ movq_cfi_restore RAX, rax
16234 .endif
16235
16236 .if \rstor_rcx
16237- movq_cfi_restore 5*8, rcx
16238+ movq_cfi_restore RCX, rcx
16239 .endif
16240
16241 .if \rstor_rdx
16242- movq_cfi_restore 6*8, rdx
16243+ movq_cfi_restore RDX, rdx
16244 .endif
16245
16246- movq_cfi_restore 7*8, rsi
16247- movq_cfi_restore 8*8, rdi
16248+ movq_cfi_restore RSI, rsi
16249+ movq_cfi_restore RDI, rdi
16250
16251- .if ARG_SKIP+\addskip > 0
16252- addq $ARG_SKIP+\addskip, %rsp
16253- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16254+ .if ORIG_RAX+\addskip > 0
16255+ addq $ORIG_RAX+\addskip, %rsp
16256+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16257 .endif
16258 .endm
16259
16260- .macro LOAD_ARGS offset, skiprax=0
16261- movq \offset(%rsp), %r11
16262- movq \offset+8(%rsp), %r10
16263- movq \offset+16(%rsp), %r9
16264- movq \offset+24(%rsp), %r8
16265- movq \offset+40(%rsp), %rcx
16266- movq \offset+48(%rsp), %rdx
16267- movq \offset+56(%rsp), %rsi
16268- movq \offset+64(%rsp), %rdi
16269+ .macro LOAD_ARGS skiprax=0
16270+ movq R11(%rsp), %r11
16271+ movq R10(%rsp), %r10
16272+ movq R9(%rsp), %r9
16273+ movq R8(%rsp), %r8
16274+ movq RCX(%rsp), %rcx
16275+ movq RDX(%rsp), %rdx
16276+ movq RSI(%rsp), %rsi
16277+ movq RDI(%rsp), %rdi
16278 .if \skiprax
16279 .else
16280- movq \offset+72(%rsp), %rax
16281+ movq ORIG_RAX(%rsp), %rax
16282 .endif
16283 .endm
16284
16285-#define REST_SKIP (6*8)
16286-
16287 .macro SAVE_REST
16288- subq $REST_SKIP, %rsp
16289- CFI_ADJUST_CFA_OFFSET REST_SKIP
16290- movq_cfi rbx, 5*8
16291- movq_cfi rbp, 4*8
16292- movq_cfi r12, 3*8
16293- movq_cfi r13, 2*8
16294- movq_cfi r14, 1*8
16295- movq_cfi r15, 0*8
16296+ movq_cfi rbx, RBX
16297+ movq_cfi rbp, RBP
16298+
16299+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16300+ movq_cfi r12, R12
16301+#endif
16302+
16303+ movq_cfi r13, R13
16304+ movq_cfi r14, R14
16305+ movq_cfi r15, R15
16306 .endm
16307
16308 .macro RESTORE_REST
16309- movq_cfi_restore 0*8, r15
16310- movq_cfi_restore 1*8, r14
16311- movq_cfi_restore 2*8, r13
16312- movq_cfi_restore 3*8, r12
16313- movq_cfi_restore 4*8, rbp
16314- movq_cfi_restore 5*8, rbx
16315- addq $REST_SKIP, %rsp
16316- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16317+ movq_cfi_restore R15, r15
16318+ movq_cfi_restore R14, r14
16319+ movq_cfi_restore R13, r13
16320+
16321+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16322+ movq_cfi_restore R12, r12
16323+#endif
16324+
16325+ movq_cfi_restore RBP, rbp
16326+ movq_cfi_restore RBX, rbx
16327 .endm
16328
16329 .macro SAVE_ALL
16330diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16331index f50de69..2b0a458 100644
16332--- a/arch/x86/include/asm/checksum_32.h
16333+++ b/arch/x86/include/asm/checksum_32.h
16334@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16335 int len, __wsum sum,
16336 int *src_err_ptr, int *dst_err_ptr);
16337
16338+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16339+ int len, __wsum sum,
16340+ int *src_err_ptr, int *dst_err_ptr);
16341+
16342+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16343+ int len, __wsum sum,
16344+ int *src_err_ptr, int *dst_err_ptr);
16345+
16346 /*
16347 * Note: when you get a NULL pointer exception here this means someone
16348 * passed in an incorrect kernel address to one of these functions.
16349@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16350
16351 might_sleep();
16352 stac();
16353- ret = csum_partial_copy_generic((__force void *)src, dst,
16354+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16355 len, sum, err_ptr, NULL);
16356 clac();
16357
16358@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16359 might_sleep();
16360 if (access_ok(VERIFY_WRITE, dst, len)) {
16361 stac();
16362- ret = csum_partial_copy_generic(src, (__force void *)dst,
16363+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16364 len, sum, NULL, err_ptr);
16365 clac();
16366 return ret;
16367diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16368index 99c105d7..2f667ac 100644
16369--- a/arch/x86/include/asm/cmpxchg.h
16370+++ b/arch/x86/include/asm/cmpxchg.h
16371@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16372 __compiletime_error("Bad argument size for cmpxchg");
16373 extern void __xadd_wrong_size(void)
16374 __compiletime_error("Bad argument size for xadd");
16375+extern void __xadd_check_overflow_wrong_size(void)
16376+ __compiletime_error("Bad argument size for xadd_check_overflow");
16377 extern void __add_wrong_size(void)
16378 __compiletime_error("Bad argument size for add");
16379+extern void __add_check_overflow_wrong_size(void)
16380+ __compiletime_error("Bad argument size for add_check_overflow");
16381
16382 /*
16383 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16384@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16385 __ret; \
16386 })
16387
16388+#ifdef CONFIG_PAX_REFCOUNT
16389+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16390+ ({ \
16391+ __typeof__ (*(ptr)) __ret = (arg); \
16392+ switch (sizeof(*(ptr))) { \
16393+ case __X86_CASE_L: \
16394+ asm volatile (lock #op "l %0, %1\n" \
16395+ "jno 0f\n" \
16396+ "mov %0,%1\n" \
16397+ "int $4\n0:\n" \
16398+ _ASM_EXTABLE(0b, 0b) \
16399+ : "+r" (__ret), "+m" (*(ptr)) \
16400+ : : "memory", "cc"); \
16401+ break; \
16402+ case __X86_CASE_Q: \
16403+ asm volatile (lock #op "q %q0, %1\n" \
16404+ "jno 0f\n" \
16405+ "mov %0,%1\n" \
16406+ "int $4\n0:\n" \
16407+ _ASM_EXTABLE(0b, 0b) \
16408+ : "+r" (__ret), "+m" (*(ptr)) \
16409+ : : "memory", "cc"); \
16410+ break; \
16411+ default: \
16412+ __ ## op ## _check_overflow_wrong_size(); \
16413+ } \
16414+ __ret; \
16415+ })
16416+#else
16417+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16418+#endif
16419+
16420 /*
16421 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16422 * Since this is generally used to protect other memory information, we
16423@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16424 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16425 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16426
16427+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16428+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16429+
16430 #define __add(ptr, inc, lock) \
16431 ({ \
16432 __typeof__ (*(ptr)) __ret = (inc); \
16433diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16434index 59c6c40..5e0b22c 100644
16435--- a/arch/x86/include/asm/compat.h
16436+++ b/arch/x86/include/asm/compat.h
16437@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16438 typedef u32 compat_uint_t;
16439 typedef u32 compat_ulong_t;
16440 typedef u64 __attribute__((aligned(4))) compat_u64;
16441-typedef u32 compat_uptr_t;
16442+typedef u32 __user compat_uptr_t;
16443
16444 struct compat_timespec {
16445 compat_time_t tv_sec;
16446diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16447index aede2c3..40d7a8f 100644
16448--- a/arch/x86/include/asm/cpufeature.h
16449+++ b/arch/x86/include/asm/cpufeature.h
16450@@ -212,7 +212,7 @@
16451 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16452 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16453 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16454-
16455+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16456
16457 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16458 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16459@@ -220,7 +220,7 @@
16460 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16461 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16462 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16463-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16464+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16465 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16466 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16467 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16468@@ -388,6 +388,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16469 #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
16470 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16471 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16472+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16473
16474 #if __GNUC__ >= 4
16475 extern void warn_pre_alternatives(void);
16476@@ -439,7 +440,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16477
16478 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16479 t_warn:
16480- warn_pre_alternatives();
16481+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16482+ warn_pre_alternatives();
16483 return false;
16484 #endif
16485
16486@@ -459,7 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16487 ".section .discard,\"aw\",@progbits\n"
16488 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16489 ".previous\n"
16490- ".section .altinstr_replacement,\"ax\"\n"
16491+ ".section .altinstr_replacement,\"a\"\n"
16492 "3: movb $1,%0\n"
16493 "4:\n"
16494 ".previous\n"
16495@@ -496,7 +498,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16496 " .byte 2b - 1b\n" /* src len */
16497 " .byte 4f - 3f\n" /* repl len */
16498 ".previous\n"
16499- ".section .altinstr_replacement,\"ax\"\n"
16500+ ".section .altinstr_replacement,\"a\"\n"
16501 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16502 "4:\n"
16503 ".previous\n"
16504@@ -529,7 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16505 ".section .discard,\"aw\",@progbits\n"
16506 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16507 ".previous\n"
16508- ".section .altinstr_replacement,\"ax\"\n"
16509+ ".section .altinstr_replacement,\"a\"\n"
16510 "3: movb $0,%0\n"
16511 "4:\n"
16512 ".previous\n"
16513@@ -543,7 +545,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16514 ".section .discard,\"aw\",@progbits\n"
16515 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16516 ".previous\n"
16517- ".section .altinstr_replacement,\"ax\"\n"
16518+ ".section .altinstr_replacement,\"a\"\n"
16519 "5: movb $1,%0\n"
16520 "6:\n"
16521 ".previous\n"
16522diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16523index a94b82e..59ecefa 100644
16524--- a/arch/x86/include/asm/desc.h
16525+++ b/arch/x86/include/asm/desc.h
16526@@ -4,6 +4,7 @@
16527 #include <asm/desc_defs.h>
16528 #include <asm/ldt.h>
16529 #include <asm/mmu.h>
16530+#include <asm/pgtable.h>
16531
16532 #include <linux/smp.h>
16533 #include <linux/percpu.h>
16534@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16535
16536 desc->type = (info->read_exec_only ^ 1) << 1;
16537 desc->type |= info->contents << 2;
16538+ desc->type |= info->seg_not_present ^ 1;
16539
16540 desc->s = 1;
16541 desc->dpl = 0x3;
16542@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16543 }
16544
16545 extern struct desc_ptr idt_descr;
16546-extern gate_desc idt_table[];
16547-extern struct desc_ptr debug_idt_descr;
16548-extern gate_desc debug_idt_table[];
16549-
16550-struct gdt_page {
16551- struct desc_struct gdt[GDT_ENTRIES];
16552-} __attribute__((aligned(PAGE_SIZE)));
16553-
16554-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16555+extern gate_desc idt_table[IDT_ENTRIES];
16556+extern const struct desc_ptr debug_idt_descr;
16557+extern gate_desc debug_idt_table[IDT_ENTRIES];
16558
16559+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16560 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16561 {
16562- return per_cpu(gdt_page, cpu).gdt;
16563+ return cpu_gdt_table[cpu];
16564 }
16565
16566 #ifdef CONFIG_X86_64
16567@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16568 unsigned long base, unsigned dpl, unsigned flags,
16569 unsigned short seg)
16570 {
16571- gate->a = (seg << 16) | (base & 0xffff);
16572- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16573+ gate->gate.offset_low = base;
16574+ gate->gate.seg = seg;
16575+ gate->gate.reserved = 0;
16576+ gate->gate.type = type;
16577+ gate->gate.s = 0;
16578+ gate->gate.dpl = dpl;
16579+ gate->gate.p = 1;
16580+ gate->gate.offset_high = base >> 16;
16581 }
16582
16583 #endif
16584@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16585
16586 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16587 {
16588+ pax_open_kernel();
16589 memcpy(&idt[entry], gate, sizeof(*gate));
16590+ pax_close_kernel();
16591 }
16592
16593 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16594 {
16595+ pax_open_kernel();
16596 memcpy(&ldt[entry], desc, 8);
16597+ pax_close_kernel();
16598 }
16599
16600 static inline void
16601@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16602 default: size = sizeof(*gdt); break;
16603 }
16604
16605+ pax_open_kernel();
16606 memcpy(&gdt[entry], desc, size);
16607+ pax_close_kernel();
16608 }
16609
16610 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16611@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16612
16613 static inline void native_load_tr_desc(void)
16614 {
16615+ pax_open_kernel();
16616 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16617+ pax_close_kernel();
16618 }
16619
16620 static inline void native_load_gdt(const struct desc_ptr *dtr)
16621@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16622 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16623 unsigned int i;
16624
16625+ pax_open_kernel();
16626 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16627 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16628+ pax_close_kernel();
16629 }
16630
16631 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16632@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16633 preempt_enable();
16634 }
16635
16636-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16637+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16638 {
16639 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16640 }
16641@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16642 }
16643
16644 #ifdef CONFIG_X86_64
16645-static inline void set_nmi_gate(int gate, void *addr)
16646+static inline void set_nmi_gate(int gate, const void *addr)
16647 {
16648 gate_desc s;
16649
16650@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16651 #endif
16652
16653 #ifdef CONFIG_TRACING
16654-extern struct desc_ptr trace_idt_descr;
16655-extern gate_desc trace_idt_table[];
16656+extern const struct desc_ptr trace_idt_descr;
16657+extern gate_desc trace_idt_table[IDT_ENTRIES];
16658 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16659 {
16660 write_idt_entry(trace_idt_table, entry, gate);
16661 }
16662
16663-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16664+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16665 unsigned dpl, unsigned ist, unsigned seg)
16666 {
16667 gate_desc s;
16668@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16669 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16670 #endif
16671
16672-static inline void _set_gate(int gate, unsigned type, void *addr,
16673+static inline void _set_gate(int gate, unsigned type, const void *addr,
16674 unsigned dpl, unsigned ist, unsigned seg)
16675 {
16676 gate_desc s;
16677@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16678 #define set_intr_gate(n, addr) \
16679 do { \
16680 BUG_ON((unsigned)n > 0xFF); \
16681- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16682+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16683 __KERNEL_CS); \
16684- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16685+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16686 0, 0, __KERNEL_CS); \
16687 } while (0)
16688
16689@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16690 /*
16691 * This routine sets up an interrupt gate at directory privilege level 3.
16692 */
16693-static inline void set_system_intr_gate(unsigned int n, void *addr)
16694+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16695 {
16696 BUG_ON((unsigned)n > 0xFF);
16697 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16698 }
16699
16700-static inline void set_system_trap_gate(unsigned int n, void *addr)
16701+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16702 {
16703 BUG_ON((unsigned)n > 0xFF);
16704 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16705 }
16706
16707-static inline void set_trap_gate(unsigned int n, void *addr)
16708+static inline void set_trap_gate(unsigned int n, const void *addr)
16709 {
16710 BUG_ON((unsigned)n > 0xFF);
16711 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16712@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16713 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16714 {
16715 BUG_ON((unsigned)n > 0xFF);
16716- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16717+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16718 }
16719
16720-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16721+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16722 {
16723 BUG_ON((unsigned)n > 0xFF);
16724 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16725 }
16726
16727-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16728+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16729 {
16730 BUG_ON((unsigned)n > 0xFF);
16731 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16732@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16733 else
16734 load_idt((const struct desc_ptr *)&idt_descr);
16735 }
16736+
16737+#ifdef CONFIG_X86_32
16738+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16739+{
16740+ struct desc_struct d;
16741+
16742+ if (likely(limit))
16743+ limit = (limit - 1UL) >> PAGE_SHIFT;
16744+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16745+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16746+}
16747+#endif
16748+
16749 #endif /* _ASM_X86_DESC_H */
16750diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16751index 278441f..b95a174 100644
16752--- a/arch/x86/include/asm/desc_defs.h
16753+++ b/arch/x86/include/asm/desc_defs.h
16754@@ -31,6 +31,12 @@ struct desc_struct {
16755 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16756 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16757 };
16758+ struct {
16759+ u16 offset_low;
16760+ u16 seg;
16761+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16762+ unsigned offset_high: 16;
16763+ } gate;
16764 };
16765 } __attribute__((packed));
16766
16767diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16768index ced283a..ffe04cc 100644
16769--- a/arch/x86/include/asm/div64.h
16770+++ b/arch/x86/include/asm/div64.h
16771@@ -39,7 +39,7 @@
16772 __mod; \
16773 })
16774
16775-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16776+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16777 {
16778 union {
16779 u64 v64;
16780diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16781index ca3347a..1a5082a 100644
16782--- a/arch/x86/include/asm/elf.h
16783+++ b/arch/x86/include/asm/elf.h
16784@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16785
16786 #include <asm/vdso.h>
16787
16788-#ifdef CONFIG_X86_64
16789-extern unsigned int vdso64_enabled;
16790-#endif
16791 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16792 extern unsigned int vdso32_enabled;
16793 #endif
16794@@ -249,7 +246,25 @@ extern int force_personality32;
16795 the loader. We need to make sure that it is out of the way of the program
16796 that it will "exec", and that there is sufficient room for the brk. */
16797
16798+#ifdef CONFIG_PAX_SEGMEXEC
16799+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16800+#else
16801 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16802+#endif
16803+
16804+#ifdef CONFIG_PAX_ASLR
16805+#ifdef CONFIG_X86_32
16806+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16807+
16808+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16809+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16810+#else
16811+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16812+
16813+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16814+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16815+#endif
16816+#endif
16817
16818 /* This yields a mask that user programs can use to figure out what
16819 instruction set this CPU supports. This could be done in user space,
16820@@ -298,17 +313,13 @@ do { \
16821
16822 #define ARCH_DLINFO \
16823 do { \
16824- if (vdso64_enabled) \
16825- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16826- (unsigned long __force)current->mm->context.vdso); \
16827+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16828 } while (0)
16829
16830 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16831 #define ARCH_DLINFO_X32 \
16832 do { \
16833- if (vdso64_enabled) \
16834- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16835- (unsigned long __force)current->mm->context.vdso); \
16836+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16837 } while (0)
16838
16839 #define AT_SYSINFO 32
16840@@ -323,10 +334,10 @@ else \
16841
16842 #endif /* !CONFIG_X86_32 */
16843
16844-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16845+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16846
16847 #define VDSO_ENTRY \
16848- ((unsigned long)current->mm->context.vdso + \
16849+ (current->mm->context.vdso + \
16850 selected_vdso32->sym___kernel_vsyscall)
16851
16852 struct linux_binprm;
16853@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16854 int uses_interp);
16855 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16856
16857-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16858-#define arch_randomize_brk arch_randomize_brk
16859-
16860 /*
16861 * True on X86_32 or when emulating IA32 on X86_64
16862 */
16863diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16864index 77a99ac..39ff7f5 100644
16865--- a/arch/x86/include/asm/emergency-restart.h
16866+++ b/arch/x86/include/asm/emergency-restart.h
16867@@ -1,6 +1,6 @@
16868 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16869 #define _ASM_X86_EMERGENCY_RESTART_H
16870
16871-extern void machine_emergency_restart(void);
16872+extern void machine_emergency_restart(void) __noreturn;
16873
16874 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16875diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16876index 1c7eefe..d0e4702 100644
16877--- a/arch/x86/include/asm/floppy.h
16878+++ b/arch/x86/include/asm/floppy.h
16879@@ -229,18 +229,18 @@ static struct fd_routine_l {
16880 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16881 } fd_routine[] = {
16882 {
16883- request_dma,
16884- free_dma,
16885- get_dma_residue,
16886- dma_mem_alloc,
16887- hard_dma_setup
16888+ ._request_dma = request_dma,
16889+ ._free_dma = free_dma,
16890+ ._get_dma_residue = get_dma_residue,
16891+ ._dma_mem_alloc = dma_mem_alloc,
16892+ ._dma_setup = hard_dma_setup
16893 },
16894 {
16895- vdma_request_dma,
16896- vdma_nop,
16897- vdma_get_dma_residue,
16898- vdma_mem_alloc,
16899- vdma_dma_setup
16900+ ._request_dma = vdma_request_dma,
16901+ ._free_dma = vdma_nop,
16902+ ._get_dma_residue = vdma_get_dma_residue,
16903+ ._dma_mem_alloc = vdma_mem_alloc,
16904+ ._dma_setup = vdma_dma_setup
16905 }
16906 };
16907
16908diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16909index f895358..800c60d 100644
16910--- a/arch/x86/include/asm/fpu-internal.h
16911+++ b/arch/x86/include/asm/fpu-internal.h
16912@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16913 #define user_insn(insn, output, input...) \
16914 ({ \
16915 int err; \
16916+ pax_open_userland(); \
16917 asm volatile(ASM_STAC "\n" \
16918- "1:" #insn "\n\t" \
16919+ "1:" \
16920+ __copyuser_seg \
16921+ #insn "\n\t" \
16922 "2: " ASM_CLAC "\n" \
16923 ".section .fixup,\"ax\"\n" \
16924 "3: movl $-1,%[err]\n" \
16925@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16926 _ASM_EXTABLE(1b, 3b) \
16927 : [err] "=r" (err), output \
16928 : "0"(0), input); \
16929+ pax_close_userland(); \
16930 err; \
16931 })
16932
16933@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16934 "fnclex\n\t"
16935 "emms\n\t"
16936 "fildl %P[addr]" /* set F?P to defined value */
16937- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16938+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16939 }
16940
16941 return fpu_restore_checking(&tsk->thread.fpu);
16942diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16943index b4c1f54..e290c08 100644
16944--- a/arch/x86/include/asm/futex.h
16945+++ b/arch/x86/include/asm/futex.h
16946@@ -12,6 +12,7 @@
16947 #include <asm/smap.h>
16948
16949 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16950+ typecheck(u32 __user *, uaddr); \
16951 asm volatile("\t" ASM_STAC "\n" \
16952 "1:\t" insn "\n" \
16953 "2:\t" ASM_CLAC "\n" \
16954@@ -20,15 +21,16 @@
16955 "\tjmp\t2b\n" \
16956 "\t.previous\n" \
16957 _ASM_EXTABLE(1b, 3b) \
16958- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16959+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16960 : "i" (-EFAULT), "0" (oparg), "1" (0))
16961
16962 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16963+ typecheck(u32 __user *, uaddr); \
16964 asm volatile("\t" ASM_STAC "\n" \
16965 "1:\tmovl %2, %0\n" \
16966 "\tmovl\t%0, %3\n" \
16967 "\t" insn "\n" \
16968- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16969+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16970 "\tjnz\t1b\n" \
16971 "3:\t" ASM_CLAC "\n" \
16972 "\t.section .fixup,\"ax\"\n" \
16973@@ -38,7 +40,7 @@
16974 _ASM_EXTABLE(1b, 4b) \
16975 _ASM_EXTABLE(2b, 4b) \
16976 : "=&a" (oldval), "=&r" (ret), \
16977- "+m" (*uaddr), "=&r" (tem) \
16978+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16979 : "r" (oparg), "i" (-EFAULT), "1" (0))
16980
16981 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16982@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16983
16984 pagefault_disable();
16985
16986+ pax_open_userland();
16987 switch (op) {
16988 case FUTEX_OP_SET:
16989- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16990+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16991 break;
16992 case FUTEX_OP_ADD:
16993- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16994+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16995 uaddr, oparg);
16996 break;
16997 case FUTEX_OP_OR:
16998@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16999 default:
17000 ret = -ENOSYS;
17001 }
17002+ pax_close_userland();
17003
17004 pagefault_enable();
17005
17006diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17007index 9662290..49ca5e5 100644
17008--- a/arch/x86/include/asm/hw_irq.h
17009+++ b/arch/x86/include/asm/hw_irq.h
17010@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
17011 #endif /* CONFIG_X86_LOCAL_APIC */
17012
17013 /* Statistics */
17014-extern atomic_t irq_err_count;
17015-extern atomic_t irq_mis_count;
17016+extern atomic_unchecked_t irq_err_count;
17017+extern atomic_unchecked_t irq_mis_count;
17018
17019 /* EISA */
17020 extern void eisa_set_level_irq(unsigned int irq);
17021diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17022index ccffa53..3c90c87 100644
17023--- a/arch/x86/include/asm/i8259.h
17024+++ b/arch/x86/include/asm/i8259.h
17025@@ -62,7 +62,7 @@ struct legacy_pic {
17026 void (*init)(int auto_eoi);
17027 int (*irq_pending)(unsigned int irq);
17028 void (*make_irq)(unsigned int irq);
17029-};
17030+} __do_const;
17031
17032 extern struct legacy_pic *legacy_pic;
17033 extern struct legacy_pic null_legacy_pic;
17034diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17035index 34a5b93..27e40a6 100644
17036--- a/arch/x86/include/asm/io.h
17037+++ b/arch/x86/include/asm/io.h
17038@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17039 "m" (*(volatile type __force *)addr) barrier); }
17040
17041 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17042-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17043-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17044+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17045+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17046
17047 build_mmio_read(__readb, "b", unsigned char, "=q", )
17048-build_mmio_read(__readw, "w", unsigned short, "=r", )
17049-build_mmio_read(__readl, "l", unsigned int, "=r", )
17050+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17051+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17052
17053 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17054 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17055@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17056 * this function
17057 */
17058
17059-static inline phys_addr_t virt_to_phys(volatile void *address)
17060+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17061 {
17062 return __pa(address);
17063 }
17064@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17065 return ioremap_nocache(offset, size);
17066 }
17067
17068-extern void iounmap(volatile void __iomem *addr);
17069+extern void iounmap(const volatile void __iomem *addr);
17070
17071 extern void set_iounmap_nonlazy(void);
17072
17073@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17074
17075 #include <linux/vmalloc.h>
17076
17077+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17078+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17079+{
17080+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17081+}
17082+
17083+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17084+{
17085+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17086+}
17087+
17088 /*
17089 * Convert a virtual cached pointer to an uncached pointer
17090 */
17091diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17092index 0a8b519..80e7d5b 100644
17093--- a/arch/x86/include/asm/irqflags.h
17094+++ b/arch/x86/include/asm/irqflags.h
17095@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17096 sti; \
17097 sysexit
17098
17099+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17100+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17101+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17102+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17103+
17104 #else
17105 #define INTERRUPT_RETURN iret
17106 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17107diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17108index 4421b5d..8543006 100644
17109--- a/arch/x86/include/asm/kprobes.h
17110+++ b/arch/x86/include/asm/kprobes.h
17111@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17112 #define RELATIVEJUMP_SIZE 5
17113 #define RELATIVECALL_OPCODE 0xe8
17114 #define RELATIVE_ADDR_SIZE 4
17115-#define MAX_STACK_SIZE 64
17116-#define MIN_STACK_SIZE(ADDR) \
17117- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17118- THREAD_SIZE - (unsigned long)(ADDR))) \
17119- ? (MAX_STACK_SIZE) \
17120- : (((unsigned long)current_thread_info()) + \
17121- THREAD_SIZE - (unsigned long)(ADDR)))
17122+#define MAX_STACK_SIZE 64UL
17123+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17124
17125 #define flush_insn_slot(p) do { } while (0)
17126
17127diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
17128index d89c6b8..e711c69 100644
17129--- a/arch/x86/include/asm/kvm_host.h
17130+++ b/arch/x86/include/asm/kvm_host.h
17131@@ -51,7 +51,7 @@
17132 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
17133
17134 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
17135-#define CR3_PCID_INVD (1UL << 63)
17136+#define CR3_PCID_INVD (1ULL << 63)
17137 #define CR4_RESERVED_BITS \
17138 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
17139 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
17140diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17141index 4ad6560..75c7bdd 100644
17142--- a/arch/x86/include/asm/local.h
17143+++ b/arch/x86/include/asm/local.h
17144@@ -10,33 +10,97 @@ typedef struct {
17145 atomic_long_t a;
17146 } local_t;
17147
17148+typedef struct {
17149+ atomic_long_unchecked_t a;
17150+} local_unchecked_t;
17151+
17152 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17153
17154 #define local_read(l) atomic_long_read(&(l)->a)
17155+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17156 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17157+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17158
17159 static inline void local_inc(local_t *l)
17160 {
17161- asm volatile(_ASM_INC "%0"
17162+ asm volatile(_ASM_INC "%0\n"
17163+
17164+#ifdef CONFIG_PAX_REFCOUNT
17165+ "jno 0f\n"
17166+ _ASM_DEC "%0\n"
17167+ "int $4\n0:\n"
17168+ _ASM_EXTABLE(0b, 0b)
17169+#endif
17170+
17171+ : "+m" (l->a.counter));
17172+}
17173+
17174+static inline void local_inc_unchecked(local_unchecked_t *l)
17175+{
17176+ asm volatile(_ASM_INC "%0\n"
17177 : "+m" (l->a.counter));
17178 }
17179
17180 static inline void local_dec(local_t *l)
17181 {
17182- asm volatile(_ASM_DEC "%0"
17183+ asm volatile(_ASM_DEC "%0\n"
17184+
17185+#ifdef CONFIG_PAX_REFCOUNT
17186+ "jno 0f\n"
17187+ _ASM_INC "%0\n"
17188+ "int $4\n0:\n"
17189+ _ASM_EXTABLE(0b, 0b)
17190+#endif
17191+
17192+ : "+m" (l->a.counter));
17193+}
17194+
17195+static inline void local_dec_unchecked(local_unchecked_t *l)
17196+{
17197+ asm volatile(_ASM_DEC "%0\n"
17198 : "+m" (l->a.counter));
17199 }
17200
17201 static inline void local_add(long i, local_t *l)
17202 {
17203- asm volatile(_ASM_ADD "%1,%0"
17204+ asm volatile(_ASM_ADD "%1,%0\n"
17205+
17206+#ifdef CONFIG_PAX_REFCOUNT
17207+ "jno 0f\n"
17208+ _ASM_SUB "%1,%0\n"
17209+ "int $4\n0:\n"
17210+ _ASM_EXTABLE(0b, 0b)
17211+#endif
17212+
17213+ : "+m" (l->a.counter)
17214+ : "ir" (i));
17215+}
17216+
17217+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17218+{
17219+ asm volatile(_ASM_ADD "%1,%0\n"
17220 : "+m" (l->a.counter)
17221 : "ir" (i));
17222 }
17223
17224 static inline void local_sub(long i, local_t *l)
17225 {
17226- asm volatile(_ASM_SUB "%1,%0"
17227+ asm volatile(_ASM_SUB "%1,%0\n"
17228+
17229+#ifdef CONFIG_PAX_REFCOUNT
17230+ "jno 0f\n"
17231+ _ASM_ADD "%1,%0\n"
17232+ "int $4\n0:\n"
17233+ _ASM_EXTABLE(0b, 0b)
17234+#endif
17235+
17236+ : "+m" (l->a.counter)
17237+ : "ir" (i));
17238+}
17239+
17240+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17241+{
17242+ asm volatile(_ASM_SUB "%1,%0\n"
17243 : "+m" (l->a.counter)
17244 : "ir" (i));
17245 }
17246@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17247 */
17248 static inline int local_sub_and_test(long i, local_t *l)
17249 {
17250- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17251+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17252 }
17253
17254 /**
17255@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17256 */
17257 static inline int local_dec_and_test(local_t *l)
17258 {
17259- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17260+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17261 }
17262
17263 /**
17264@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17265 */
17266 static inline int local_inc_and_test(local_t *l)
17267 {
17268- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17269+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17270 }
17271
17272 /**
17273@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17274 */
17275 static inline int local_add_negative(long i, local_t *l)
17276 {
17277- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17278+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17279 }
17280
17281 /**
17282@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17283 static inline long local_add_return(long i, local_t *l)
17284 {
17285 long __i = i;
17286+ asm volatile(_ASM_XADD "%0, %1\n"
17287+
17288+#ifdef CONFIG_PAX_REFCOUNT
17289+ "jno 0f\n"
17290+ _ASM_MOV "%0,%1\n"
17291+ "int $4\n0:\n"
17292+ _ASM_EXTABLE(0b, 0b)
17293+#endif
17294+
17295+ : "+r" (i), "+m" (l->a.counter)
17296+ : : "memory");
17297+ return i + __i;
17298+}
17299+
17300+/**
17301+ * local_add_return_unchecked - add and return
17302+ * @i: integer value to add
17303+ * @l: pointer to type local_unchecked_t
17304+ *
17305+ * Atomically adds @i to @l and returns @i + @l
17306+ */
17307+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17308+{
17309+ long __i = i;
17310 asm volatile(_ASM_XADD "%0, %1;"
17311 : "+r" (i), "+m" (l->a.counter)
17312 : : "memory");
17313@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17314
17315 #define local_cmpxchg(l, o, n) \
17316 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17317+#define local_cmpxchg_unchecked(l, o, n) \
17318+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17319 /* Always has a lock prefix */
17320 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17321
17322diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17323new file mode 100644
17324index 0000000..2bfd3ba
17325--- /dev/null
17326+++ b/arch/x86/include/asm/mman.h
17327@@ -0,0 +1,15 @@
17328+#ifndef _X86_MMAN_H
17329+#define _X86_MMAN_H
17330+
17331+#include <uapi/asm/mman.h>
17332+
17333+#ifdef __KERNEL__
17334+#ifndef __ASSEMBLY__
17335+#ifdef CONFIG_X86_32
17336+#define arch_mmap_check i386_mmap_check
17337+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17338+#endif
17339+#endif
17340+#endif
17341+
17342+#endif /* X86_MMAN_H */
17343diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17344index 876e74e..e20bfb1 100644
17345--- a/arch/x86/include/asm/mmu.h
17346+++ b/arch/x86/include/asm/mmu.h
17347@@ -9,7 +9,7 @@
17348 * we put the segment information here.
17349 */
17350 typedef struct {
17351- void *ldt;
17352+ struct desc_struct *ldt;
17353 int size;
17354
17355 #ifdef CONFIG_X86_64
17356@@ -18,7 +18,19 @@ typedef struct {
17357 #endif
17358
17359 struct mutex lock;
17360- void __user *vdso;
17361+ unsigned long vdso;
17362+
17363+#ifdef CONFIG_X86_32
17364+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17365+ unsigned long user_cs_base;
17366+ unsigned long user_cs_limit;
17367+
17368+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17369+ cpumask_t cpu_user_cs_mask;
17370+#endif
17371+
17372+#endif
17373+#endif
17374 } mm_context_t;
17375
17376 #ifdef CONFIG_SMP
17377diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17378index 4b75d59..8ffacb6 100644
17379--- a/arch/x86/include/asm/mmu_context.h
17380+++ b/arch/x86/include/asm/mmu_context.h
17381@@ -27,6 +27,20 @@ void destroy_context(struct mm_struct *mm);
17382
17383 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17384 {
17385+
17386+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17387+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17388+ unsigned int i;
17389+ pgd_t *pgd;
17390+
17391+ pax_open_kernel();
17392+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17393+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17394+ set_pgd_batched(pgd+i, native_make_pgd(0));
17395+ pax_close_kernel();
17396+ }
17397+#endif
17398+
17399 #ifdef CONFIG_SMP
17400 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17401 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17402@@ -37,16 +51,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17403 struct task_struct *tsk)
17404 {
17405 unsigned cpu = smp_processor_id();
17406+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17407+ int tlbstate = TLBSTATE_OK;
17408+#endif
17409
17410 if (likely(prev != next)) {
17411 #ifdef CONFIG_SMP
17412+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17413+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17414+#endif
17415 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17416 this_cpu_write(cpu_tlbstate.active_mm, next);
17417 #endif
17418 cpumask_set_cpu(cpu, mm_cpumask(next));
17419
17420 /* Re-load page tables */
17421+#ifdef CONFIG_PAX_PER_CPU_PGD
17422+ pax_open_kernel();
17423+
17424+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17425+ if (static_cpu_has(X86_FEATURE_PCID))
17426+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17427+ else
17428+#endif
17429+
17430+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17431+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17432+ pax_close_kernel();
17433+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17434+
17435+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17436+ if (static_cpu_has(X86_FEATURE_PCID)) {
17437+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17438+ u64 descriptor[2];
17439+ descriptor[0] = PCID_USER;
17440+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17441+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17442+ descriptor[0] = PCID_KERNEL;
17443+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17444+ }
17445+ } else {
17446+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17447+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17448+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17449+ else
17450+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17451+ }
17452+ } else
17453+#endif
17454+
17455+ load_cr3(get_cpu_pgd(cpu, kernel));
17456+#else
17457 load_cr3(next->pgd);
17458+#endif
17459 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17460
17461 /* Stop flush ipis for the previous mm */
17462@@ -64,9 +121,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17463 */
17464 if (unlikely(prev->context.ldt != next->context.ldt))
17465 load_LDT_nolock(&next->context);
17466+
17467+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17468+ if (!(__supported_pte_mask & _PAGE_NX)) {
17469+ smp_mb__before_atomic();
17470+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17471+ smp_mb__after_atomic();
17472+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17473+ }
17474+#endif
17475+
17476+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17477+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17478+ prev->context.user_cs_limit != next->context.user_cs_limit))
17479+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17480+#ifdef CONFIG_SMP
17481+ else if (unlikely(tlbstate != TLBSTATE_OK))
17482+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17483+#endif
17484+#endif
17485+
17486 }
17487+ else {
17488+
17489+#ifdef CONFIG_PAX_PER_CPU_PGD
17490+ pax_open_kernel();
17491+
17492+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17493+ if (static_cpu_has(X86_FEATURE_PCID))
17494+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17495+ else
17496+#endif
17497+
17498+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17499+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17500+ pax_close_kernel();
17501+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17502+
17503+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17504+ if (static_cpu_has(X86_FEATURE_PCID)) {
17505+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17506+ u64 descriptor[2];
17507+ descriptor[0] = PCID_USER;
17508+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17509+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17510+ descriptor[0] = PCID_KERNEL;
17511+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17512+ }
17513+ } else {
17514+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17515+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17516+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17517+ else
17518+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17519+ }
17520+ } else
17521+#endif
17522+
17523+ load_cr3(get_cpu_pgd(cpu, kernel));
17524+#endif
17525+
17526 #ifdef CONFIG_SMP
17527- else {
17528 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17529 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17530
17531@@ -83,12 +198,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17532 * tlb flush IPI delivery. We must reload CR3
17533 * to make sure to use no freed page tables.
17534 */
17535+
17536+#ifndef CONFIG_PAX_PER_CPU_PGD
17537 load_cr3(next->pgd);
17538 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17539+#endif
17540+
17541 load_LDT_nolock(&next->context);
17542+
17543+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17544+ if (!(__supported_pte_mask & _PAGE_NX))
17545+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17546+#endif
17547+
17548+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17549+#ifdef CONFIG_PAX_PAGEEXEC
17550+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17551+#endif
17552+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17553+#endif
17554+
17555 }
17556+#endif
17557 }
17558-#endif
17559 }
17560
17561 #define activate_mm(prev, next) \
17562diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17563index e3b7819..b257c64 100644
17564--- a/arch/x86/include/asm/module.h
17565+++ b/arch/x86/include/asm/module.h
17566@@ -5,6 +5,7 @@
17567
17568 #ifdef CONFIG_X86_64
17569 /* X86_64 does not define MODULE_PROC_FAMILY */
17570+#define MODULE_PROC_FAMILY ""
17571 #elif defined CONFIG_M486
17572 #define MODULE_PROC_FAMILY "486 "
17573 #elif defined CONFIG_M586
17574@@ -57,8 +58,20 @@
17575 #error unknown processor family
17576 #endif
17577
17578-#ifdef CONFIG_X86_32
17579-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17580+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17581+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17582+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17583+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17584+#else
17585+#define MODULE_PAX_KERNEXEC ""
17586 #endif
17587
17588+#ifdef CONFIG_PAX_MEMORY_UDEREF
17589+#define MODULE_PAX_UDEREF "UDEREF "
17590+#else
17591+#define MODULE_PAX_UDEREF ""
17592+#endif
17593+
17594+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17595+
17596 #endif /* _ASM_X86_MODULE_H */
17597diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17598index 5f2fc44..106caa6 100644
17599--- a/arch/x86/include/asm/nmi.h
17600+++ b/arch/x86/include/asm/nmi.h
17601@@ -36,26 +36,35 @@ enum {
17602
17603 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17604
17605+struct nmiaction;
17606+
17607+struct nmiwork {
17608+ const struct nmiaction *action;
17609+ u64 max_duration;
17610+ struct irq_work irq_work;
17611+};
17612+
17613 struct nmiaction {
17614 struct list_head list;
17615 nmi_handler_t handler;
17616- u64 max_duration;
17617- struct irq_work irq_work;
17618 unsigned long flags;
17619 const char *name;
17620-};
17621+ struct nmiwork *work;
17622+} __do_const;
17623
17624 #define register_nmi_handler(t, fn, fg, n, init...) \
17625 ({ \
17626- static struct nmiaction init fn##_na = { \
17627+ static struct nmiwork fn##_nw; \
17628+ static const struct nmiaction init fn##_na = { \
17629 .handler = (fn), \
17630 .name = (n), \
17631 .flags = (fg), \
17632+ .work = &fn##_nw, \
17633 }; \
17634 __register_nmi_handler((t), &fn##_na); \
17635 })
17636
17637-int __register_nmi_handler(unsigned int, struct nmiaction *);
17638+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17639
17640 void unregister_nmi_handler(unsigned int, const char *);
17641
17642diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17643index 802dde3..9183e68 100644
17644--- a/arch/x86/include/asm/page.h
17645+++ b/arch/x86/include/asm/page.h
17646@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17647 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17648
17649 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17650+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17651
17652 #define __boot_va(x) __va(x)
17653 #define __boot_pa(x) __pa(x)
17654@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17655 * virt_to_page(kaddr) returns a valid pointer if and only if
17656 * virt_addr_valid(kaddr) returns true.
17657 */
17658-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17659 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17660 extern bool __virt_addr_valid(unsigned long kaddr);
17661 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17662
17663+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17664+#define virt_to_page(kaddr) \
17665+ ({ \
17666+ const void *__kaddr = (const void *)(kaddr); \
17667+ BUG_ON(!virt_addr_valid(__kaddr)); \
17668+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17669+ })
17670+#else
17671+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17672+#endif
17673+
17674 #endif /* __ASSEMBLY__ */
17675
17676 #include <asm-generic/memory_model.h>
17677diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17678index b3bebf9..e1f5d95 100644
17679--- a/arch/x86/include/asm/page_64.h
17680+++ b/arch/x86/include/asm/page_64.h
17681@@ -7,9 +7,9 @@
17682
17683 /* duplicated to the one in bootmem.h */
17684 extern unsigned long max_pfn;
17685-extern unsigned long phys_base;
17686+extern const unsigned long phys_base;
17687
17688-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17689+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17690 {
17691 unsigned long y = x - __START_KERNEL_map;
17692
17693diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17694index 32444ae..1a1624b 100644
17695--- a/arch/x86/include/asm/paravirt.h
17696+++ b/arch/x86/include/asm/paravirt.h
17697@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17698 return (pmd_t) { ret };
17699 }
17700
17701-static inline pmdval_t pmd_val(pmd_t pmd)
17702+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17703 {
17704 pmdval_t ret;
17705
17706@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17707 val);
17708 }
17709
17710+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17711+{
17712+ pgdval_t val = native_pgd_val(pgd);
17713+
17714+ if (sizeof(pgdval_t) > sizeof(long))
17715+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17716+ val, (u64)val >> 32);
17717+ else
17718+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17719+ val);
17720+}
17721+
17722 static inline void pgd_clear(pgd_t *pgdp)
17723 {
17724 set_pgd(pgdp, __pgd(0));
17725@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17726 pv_mmu_ops.set_fixmap(idx, phys, flags);
17727 }
17728
17729+#ifdef CONFIG_PAX_KERNEXEC
17730+static inline unsigned long pax_open_kernel(void)
17731+{
17732+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17733+}
17734+
17735+static inline unsigned long pax_close_kernel(void)
17736+{
17737+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17738+}
17739+#else
17740+static inline unsigned long pax_open_kernel(void) { return 0; }
17741+static inline unsigned long pax_close_kernel(void) { return 0; }
17742+#endif
17743+
17744 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17745
17746 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17747@@ -906,7 +933,7 @@ extern void default_banner(void);
17748
17749 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17750 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17751-#define PARA_INDIRECT(addr) *%cs:addr
17752+#define PARA_INDIRECT(addr) *%ss:addr
17753 #endif
17754
17755 #define INTERRUPT_RETURN \
17756@@ -981,6 +1008,21 @@ extern void default_banner(void);
17757 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17758 CLBR_NONE, \
17759 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17760+
17761+#define GET_CR0_INTO_RDI \
17762+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17763+ mov %rax,%rdi
17764+
17765+#define SET_RDI_INTO_CR0 \
17766+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17767+
17768+#define GET_CR3_INTO_RDI \
17769+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17770+ mov %rax,%rdi
17771+
17772+#define SET_RDI_INTO_CR3 \
17773+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17774+
17775 #endif /* CONFIG_X86_32 */
17776
17777 #endif /* __ASSEMBLY__ */
17778diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17779index 7549b8b..f0edfda 100644
17780--- a/arch/x86/include/asm/paravirt_types.h
17781+++ b/arch/x86/include/asm/paravirt_types.h
17782@@ -84,7 +84,7 @@ struct pv_init_ops {
17783 */
17784 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17785 unsigned long addr, unsigned len);
17786-};
17787+} __no_const __no_randomize_layout;
17788
17789
17790 struct pv_lazy_ops {
17791@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17792 void (*enter)(void);
17793 void (*leave)(void);
17794 void (*flush)(void);
17795-};
17796+} __no_randomize_layout;
17797
17798 struct pv_time_ops {
17799 unsigned long long (*sched_clock)(void);
17800 unsigned long long (*steal_clock)(int cpu);
17801 unsigned long (*get_tsc_khz)(void);
17802-};
17803+} __no_const __no_randomize_layout;
17804
17805 struct pv_cpu_ops {
17806 /* hooks for various privileged instructions */
17807@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17808
17809 void (*start_context_switch)(struct task_struct *prev);
17810 void (*end_context_switch)(struct task_struct *next);
17811-};
17812+} __no_const __no_randomize_layout;
17813
17814 struct pv_irq_ops {
17815 /*
17816@@ -215,7 +215,7 @@ struct pv_irq_ops {
17817 #ifdef CONFIG_X86_64
17818 void (*adjust_exception_frame)(void);
17819 #endif
17820-};
17821+} __no_randomize_layout;
17822
17823 struct pv_apic_ops {
17824 #ifdef CONFIG_X86_LOCAL_APIC
17825@@ -223,7 +223,7 @@ struct pv_apic_ops {
17826 unsigned long start_eip,
17827 unsigned long start_esp);
17828 #endif
17829-};
17830+} __no_const __no_randomize_layout;
17831
17832 struct pv_mmu_ops {
17833 unsigned long (*read_cr2)(void);
17834@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17835 struct paravirt_callee_save make_pud;
17836
17837 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17838+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17839 #endif /* PAGETABLE_LEVELS == 4 */
17840 #endif /* PAGETABLE_LEVELS >= 3 */
17841
17842@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17843 an mfn. We can tell which is which from the index. */
17844 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17845 phys_addr_t phys, pgprot_t flags);
17846-};
17847+
17848+#ifdef CONFIG_PAX_KERNEXEC
17849+ unsigned long (*pax_open_kernel)(void);
17850+ unsigned long (*pax_close_kernel)(void);
17851+#endif
17852+
17853+} __no_randomize_layout;
17854
17855 struct arch_spinlock;
17856 #ifdef CONFIG_SMP
17857@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17858 struct pv_lock_ops {
17859 struct paravirt_callee_save lock_spinning;
17860 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17861-};
17862+} __no_randomize_layout;
17863
17864 /* This contains all the paravirt structures: we get a convenient
17865 * number for each function using the offset which we use to indicate
17866- * what to patch. */
17867+ * what to patch.
17868+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17869+ */
17870+
17871 struct paravirt_patch_template {
17872 struct pv_init_ops pv_init_ops;
17873 struct pv_time_ops pv_time_ops;
17874@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17875 struct pv_apic_ops pv_apic_ops;
17876 struct pv_mmu_ops pv_mmu_ops;
17877 struct pv_lock_ops pv_lock_ops;
17878-};
17879+} __no_randomize_layout;
17880
17881 extern struct pv_info pv_info;
17882 extern struct pv_init_ops pv_init_ops;
17883diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17884index c4412e9..90e88c5 100644
17885--- a/arch/x86/include/asm/pgalloc.h
17886+++ b/arch/x86/include/asm/pgalloc.h
17887@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17888 pmd_t *pmd, pte_t *pte)
17889 {
17890 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17891+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17892+}
17893+
17894+static inline void pmd_populate_user(struct mm_struct *mm,
17895+ pmd_t *pmd, pte_t *pte)
17896+{
17897+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17898 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17899 }
17900
17901@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17902
17903 #ifdef CONFIG_X86_PAE
17904 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17905+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17906+{
17907+ pud_populate(mm, pudp, pmd);
17908+}
17909 #else /* !CONFIG_X86_PAE */
17910 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17911 {
17912 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17913 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17914 }
17915+
17916+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17917+{
17918+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17919+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17920+}
17921 #endif /* CONFIG_X86_PAE */
17922
17923 #if PAGETABLE_LEVELS > 3
17924@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17925 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17926 }
17927
17928+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17929+{
17930+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17931+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17932+}
17933+
17934 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17935 {
17936 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17937diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17938index 206a87f..1623b06 100644
17939--- a/arch/x86/include/asm/pgtable-2level.h
17940+++ b/arch/x86/include/asm/pgtable-2level.h
17941@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17942
17943 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17944 {
17945+ pax_open_kernel();
17946 *pmdp = pmd;
17947+ pax_close_kernel();
17948 }
17949
17950 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17951diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17952index 81bb91b..9392125 100644
17953--- a/arch/x86/include/asm/pgtable-3level.h
17954+++ b/arch/x86/include/asm/pgtable-3level.h
17955@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17956
17957 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17958 {
17959+ pax_open_kernel();
17960 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17961+ pax_close_kernel();
17962 }
17963
17964 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17965 {
17966+ pax_open_kernel();
17967 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17968+ pax_close_kernel();
17969 }
17970
17971 /*
17972diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17973index e8a5454..1539359 100644
17974--- a/arch/x86/include/asm/pgtable.h
17975+++ b/arch/x86/include/asm/pgtable.h
17976@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17977
17978 #ifndef __PAGETABLE_PUD_FOLDED
17979 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17980+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17981 #define pgd_clear(pgd) native_pgd_clear(pgd)
17982 #endif
17983
17984@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17985
17986 #define arch_end_context_switch(prev) do {} while(0)
17987
17988+#define pax_open_kernel() native_pax_open_kernel()
17989+#define pax_close_kernel() native_pax_close_kernel()
17990 #endif /* CONFIG_PARAVIRT */
17991
17992+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17993+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17994+
17995+#ifdef CONFIG_PAX_KERNEXEC
17996+static inline unsigned long native_pax_open_kernel(void)
17997+{
17998+ unsigned long cr0;
17999+
18000+ preempt_disable();
18001+ barrier();
18002+ cr0 = read_cr0() ^ X86_CR0_WP;
18003+ BUG_ON(cr0 & X86_CR0_WP);
18004+ write_cr0(cr0);
18005+ barrier();
18006+ return cr0 ^ X86_CR0_WP;
18007+}
18008+
18009+static inline unsigned long native_pax_close_kernel(void)
18010+{
18011+ unsigned long cr0;
18012+
18013+ barrier();
18014+ cr0 = read_cr0() ^ X86_CR0_WP;
18015+ BUG_ON(!(cr0 & X86_CR0_WP));
18016+ write_cr0(cr0);
18017+ barrier();
18018+ preempt_enable_no_resched();
18019+ return cr0 ^ X86_CR0_WP;
18020+}
18021+#else
18022+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18023+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18024+#endif
18025+
18026 /*
18027 * The following only work if pte_present() is true.
18028 * Undefined behaviour if not..
18029 */
18030+static inline int pte_user(pte_t pte)
18031+{
18032+ return pte_val(pte) & _PAGE_USER;
18033+}
18034+
18035 static inline int pte_dirty(pte_t pte)
18036 {
18037 return pte_flags(pte) & _PAGE_DIRTY;
18038@@ -161,6 +203,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18039 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18040 }
18041
18042+static inline unsigned long pgd_pfn(pgd_t pgd)
18043+{
18044+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18045+}
18046+
18047 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18048
18049 static inline int pmd_large(pmd_t pte)
18050@@ -214,9 +261,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18051 return pte_clear_flags(pte, _PAGE_RW);
18052 }
18053
18054+static inline pte_t pte_mkread(pte_t pte)
18055+{
18056+ return __pte(pte_val(pte) | _PAGE_USER);
18057+}
18058+
18059 static inline pte_t pte_mkexec(pte_t pte)
18060 {
18061- return pte_clear_flags(pte, _PAGE_NX);
18062+#ifdef CONFIG_X86_PAE
18063+ if (__supported_pte_mask & _PAGE_NX)
18064+ return pte_clear_flags(pte, _PAGE_NX);
18065+ else
18066+#endif
18067+ return pte_set_flags(pte, _PAGE_USER);
18068+}
18069+
18070+static inline pte_t pte_exprotect(pte_t pte)
18071+{
18072+#ifdef CONFIG_X86_PAE
18073+ if (__supported_pte_mask & _PAGE_NX)
18074+ return pte_set_flags(pte, _PAGE_NX);
18075+ else
18076+#endif
18077+ return pte_clear_flags(pte, _PAGE_USER);
18078 }
18079
18080 static inline pte_t pte_mkdirty(pte_t pte)
18081@@ -446,6 +513,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18082 #endif
18083
18084 #ifndef __ASSEMBLY__
18085+
18086+#ifdef CONFIG_PAX_PER_CPU_PGD
18087+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18088+enum cpu_pgd_type {kernel = 0, user = 1};
18089+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18090+{
18091+ return cpu_pgd[cpu][type];
18092+}
18093+#endif
18094+
18095 #include <linux/mm_types.h>
18096 #include <linux/mmdebug.h>
18097 #include <linux/log2.h>
18098@@ -592,7 +669,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18099 * Currently stuck as a macro due to indirect forward reference to
18100 * linux/mmzone.h's __section_mem_map_addr() definition:
18101 */
18102-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18103+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18104
18105 /* Find an entry in the second-level page table.. */
18106 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18107@@ -632,7 +709,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18108 * Currently stuck as a macro due to indirect forward reference to
18109 * linux/mmzone.h's __section_mem_map_addr() definition:
18110 */
18111-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18112+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18113
18114 /* to find an entry in a page-table-directory. */
18115 static inline unsigned long pud_index(unsigned long address)
18116@@ -647,7 +724,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18117
18118 static inline int pgd_bad(pgd_t pgd)
18119 {
18120- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18121+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18122 }
18123
18124 static inline int pgd_none(pgd_t pgd)
18125@@ -670,7 +747,12 @@ static inline int pgd_none(pgd_t pgd)
18126 * pgd_offset() returns a (pgd_t *)
18127 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18128 */
18129-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18130+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18131+
18132+#ifdef CONFIG_PAX_PER_CPU_PGD
18133+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18134+#endif
18135+
18136 /*
18137 * a shortcut which implies the use of the kernel's pgd, instead
18138 * of a process's
18139@@ -681,6 +763,23 @@ static inline int pgd_none(pgd_t pgd)
18140 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18141 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18142
18143+#ifdef CONFIG_X86_32
18144+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18145+#else
18146+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18147+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18148+
18149+#ifdef CONFIG_PAX_MEMORY_UDEREF
18150+#ifdef __ASSEMBLY__
18151+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18152+#else
18153+extern unsigned long pax_user_shadow_base;
18154+extern pgdval_t clone_pgd_mask;
18155+#endif
18156+#endif
18157+
18158+#endif
18159+
18160 #ifndef __ASSEMBLY__
18161
18162 extern int direct_gbpages;
18163@@ -847,11 +946,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18164 * dst and src can be on the same page, but the range must not overlap,
18165 * and must not cross a page boundary.
18166 */
18167-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18168+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18169 {
18170- memcpy(dst, src, count * sizeof(pgd_t));
18171+ pax_open_kernel();
18172+ while (count--)
18173+ *dst++ = *src++;
18174+ pax_close_kernel();
18175 }
18176
18177+#ifdef CONFIG_PAX_PER_CPU_PGD
18178+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18179+#endif
18180+
18181+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18182+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18183+#else
18184+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18185+#endif
18186+
18187 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18188 static inline int page_level_shift(enum pg_level level)
18189 {
18190diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18191index b6c0b40..3535d47 100644
18192--- a/arch/x86/include/asm/pgtable_32.h
18193+++ b/arch/x86/include/asm/pgtable_32.h
18194@@ -25,9 +25,6 @@
18195 struct mm_struct;
18196 struct vm_area_struct;
18197
18198-extern pgd_t swapper_pg_dir[1024];
18199-extern pgd_t initial_page_table[1024];
18200-
18201 static inline void pgtable_cache_init(void) { }
18202 static inline void check_pgt_cache(void) { }
18203 void paging_init(void);
18204@@ -45,6 +42,12 @@ void paging_init(void);
18205 # include <asm/pgtable-2level.h>
18206 #endif
18207
18208+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18209+extern pgd_t initial_page_table[PTRS_PER_PGD];
18210+#ifdef CONFIG_X86_PAE
18211+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18212+#endif
18213+
18214 #if defined(CONFIG_HIGHPTE)
18215 #define pte_offset_map(dir, address) \
18216 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18217@@ -59,12 +62,17 @@ void paging_init(void);
18218 /* Clear a kernel PTE and flush it from the TLB */
18219 #define kpte_clear_flush(ptep, vaddr) \
18220 do { \
18221+ pax_open_kernel(); \
18222 pte_clear(&init_mm, (vaddr), (ptep)); \
18223+ pax_close_kernel(); \
18224 __flush_tlb_one((vaddr)); \
18225 } while (0)
18226
18227 #endif /* !__ASSEMBLY__ */
18228
18229+#define HAVE_ARCH_UNMAPPED_AREA
18230+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18231+
18232 /*
18233 * kern_addr_valid() is (1) for FLATMEM and (0) for
18234 * SPARSEMEM and DISCONTIGMEM
18235diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18236index 9fb2f2b..b04b4bf 100644
18237--- a/arch/x86/include/asm/pgtable_32_types.h
18238+++ b/arch/x86/include/asm/pgtable_32_types.h
18239@@ -8,7 +8,7 @@
18240 */
18241 #ifdef CONFIG_X86_PAE
18242 # include <asm/pgtable-3level_types.h>
18243-# define PMD_SIZE (1UL << PMD_SHIFT)
18244+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18245 # define PMD_MASK (~(PMD_SIZE - 1))
18246 #else
18247 # include <asm/pgtable-2level_types.h>
18248@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18249 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18250 #endif
18251
18252+#ifdef CONFIG_PAX_KERNEXEC
18253+#ifndef __ASSEMBLY__
18254+extern unsigned char MODULES_EXEC_VADDR[];
18255+extern unsigned char MODULES_EXEC_END[];
18256+#endif
18257+#include <asm/boot.h>
18258+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18259+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18260+#else
18261+#define ktla_ktva(addr) (addr)
18262+#define ktva_ktla(addr) (addr)
18263+#endif
18264+
18265 #define MODULES_VADDR VMALLOC_START
18266 #define MODULES_END VMALLOC_END
18267 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18268diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18269index 4572b2f..4430113 100644
18270--- a/arch/x86/include/asm/pgtable_64.h
18271+++ b/arch/x86/include/asm/pgtable_64.h
18272@@ -16,11 +16,16 @@
18273
18274 extern pud_t level3_kernel_pgt[512];
18275 extern pud_t level3_ident_pgt[512];
18276+extern pud_t level3_vmalloc_start_pgt[512];
18277+extern pud_t level3_vmalloc_end_pgt[512];
18278+extern pud_t level3_vmemmap_pgt[512];
18279+extern pud_t level2_vmemmap_pgt[512];
18280 extern pmd_t level2_kernel_pgt[512];
18281 extern pmd_t level2_fixmap_pgt[512];
18282-extern pmd_t level2_ident_pgt[512];
18283+extern pmd_t level2_ident_pgt[512*2];
18284 extern pte_t level1_fixmap_pgt[512];
18285-extern pgd_t init_level4_pgt[];
18286+extern pte_t level1_vsyscall_pgt[512];
18287+extern pgd_t init_level4_pgt[512];
18288
18289 #define swapper_pg_dir init_level4_pgt
18290
18291@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18292
18293 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18294 {
18295+ pax_open_kernel();
18296 *pmdp = pmd;
18297+ pax_close_kernel();
18298 }
18299
18300 static inline void native_pmd_clear(pmd_t *pmd)
18301@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18302
18303 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18304 {
18305+ pax_open_kernel();
18306 *pudp = pud;
18307+ pax_close_kernel();
18308 }
18309
18310 static inline void native_pud_clear(pud_t *pud)
18311@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18312
18313 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18314 {
18315+ pax_open_kernel();
18316+ *pgdp = pgd;
18317+ pax_close_kernel();
18318+}
18319+
18320+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18321+{
18322 *pgdp = pgd;
18323 }
18324
18325diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18326index 602b602..acb53ed 100644
18327--- a/arch/x86/include/asm/pgtable_64_types.h
18328+++ b/arch/x86/include/asm/pgtable_64_types.h
18329@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18330 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18331 #define MODULES_END _AC(0xffffffffff000000, UL)
18332 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18333+#define MODULES_EXEC_VADDR MODULES_VADDR
18334+#define MODULES_EXEC_END MODULES_END
18335 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18336 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18337 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18338 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18339
18340+#define ktla_ktva(addr) (addr)
18341+#define ktva_ktla(addr) (addr)
18342+
18343 #define EARLY_DYNAMIC_PAGE_TABLES 64
18344
18345 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18346diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18347index 25bcd4a..bf3f815 100644
18348--- a/arch/x86/include/asm/pgtable_types.h
18349+++ b/arch/x86/include/asm/pgtable_types.h
18350@@ -110,8 +110,10 @@
18351
18352 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18353 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18354-#else
18355+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18356 #define _PAGE_NX (_AT(pteval_t, 0))
18357+#else
18358+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18359 #endif
18360
18361 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18362@@ -167,6 +169,9 @@ enum page_cache_mode {
18363 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18364 _PAGE_ACCESSED)
18365
18366+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18367+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18368+
18369 #define __PAGE_KERNEL_EXEC \
18370 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18371 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18372@@ -174,7 +179,7 @@ enum page_cache_mode {
18373 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18374 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18375 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18376-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18377+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18378 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18379 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18380 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18381@@ -220,7 +225,7 @@ enum page_cache_mode {
18382 #ifdef CONFIG_X86_64
18383 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18384 #else
18385-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18386+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18387 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18388 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18389 #endif
18390@@ -259,7 +264,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18391 {
18392 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18393 }
18394+#endif
18395
18396+#if PAGETABLE_LEVELS == 3
18397+#include <asm-generic/pgtable-nopud.h>
18398+#endif
18399+
18400+#if PAGETABLE_LEVELS == 2
18401+#include <asm-generic/pgtable-nopmd.h>
18402+#endif
18403+
18404+#ifndef __ASSEMBLY__
18405 #if PAGETABLE_LEVELS > 3
18406 typedef struct { pudval_t pud; } pud_t;
18407
18408@@ -273,8 +288,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18409 return pud.pud;
18410 }
18411 #else
18412-#include <asm-generic/pgtable-nopud.h>
18413-
18414 static inline pudval_t native_pud_val(pud_t pud)
18415 {
18416 return native_pgd_val(pud.pgd);
18417@@ -294,8 +307,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18418 return pmd.pmd;
18419 }
18420 #else
18421-#include <asm-generic/pgtable-nopmd.h>
18422-
18423 static inline pmdval_t native_pmd_val(pmd_t pmd)
18424 {
18425 return native_pgd_val(pmd.pud.pgd);
18426@@ -402,7 +413,6 @@ typedef struct page *pgtable_t;
18427
18428 extern pteval_t __supported_pte_mask;
18429 extern void set_nx(void);
18430-extern int nx_enabled;
18431
18432 #define pgprot_writecombine pgprot_writecombine
18433 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18434diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18435index 8f327184..368fb29 100644
18436--- a/arch/x86/include/asm/preempt.h
18437+++ b/arch/x86/include/asm/preempt.h
18438@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18439 */
18440 static __always_inline bool __preempt_count_dec_and_test(void)
18441 {
18442- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18443+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18444 }
18445
18446 /*
18447diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18448index a092a0c..8e9640b 100644
18449--- a/arch/x86/include/asm/processor.h
18450+++ b/arch/x86/include/asm/processor.h
18451@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18452 /* Index into per_cpu list: */
18453 u16 cpu_index;
18454 u32 microcode;
18455-};
18456+} __randomize_layout;
18457
18458 #define X86_VENDOR_INTEL 0
18459 #define X86_VENDOR_CYRIX 1
18460@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18461 : "memory");
18462 }
18463
18464+/* invpcid (%rdx),%rax */
18465+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18466+
18467+#define INVPCID_SINGLE_ADDRESS 0UL
18468+#define INVPCID_SINGLE_CONTEXT 1UL
18469+#define INVPCID_ALL_GLOBAL 2UL
18470+#define INVPCID_ALL_NONGLOBAL 3UL
18471+
18472+#define PCID_KERNEL 0UL
18473+#define PCID_USER 1UL
18474+#define PCID_NOFLUSH (1UL << 63)
18475+
18476 static inline void load_cr3(pgd_t *pgdir)
18477 {
18478- write_cr3(__pa(pgdir));
18479+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18480 }
18481
18482 #ifdef CONFIG_X86_32
18483@@ -282,7 +294,7 @@ struct tss_struct {
18484
18485 } ____cacheline_aligned;
18486
18487-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18488+extern struct tss_struct init_tss[NR_CPUS];
18489
18490 /*
18491 * Save the original ist values for checking stack pointers during debugging
18492@@ -479,6 +491,7 @@ struct thread_struct {
18493 unsigned short ds;
18494 unsigned short fsindex;
18495 unsigned short gsindex;
18496+ unsigned short ss;
18497 #endif
18498 #ifdef CONFIG_X86_32
18499 unsigned long ip;
18500@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18501 extern unsigned long mmu_cr4_features;
18502 extern u32 *trampoline_cr4_features;
18503
18504-static inline void set_in_cr4(unsigned long mask)
18505-{
18506- unsigned long cr4;
18507-
18508- mmu_cr4_features |= mask;
18509- if (trampoline_cr4_features)
18510- *trampoline_cr4_features = mmu_cr4_features;
18511- cr4 = read_cr4();
18512- cr4 |= mask;
18513- write_cr4(cr4);
18514-}
18515-
18516-static inline void clear_in_cr4(unsigned long mask)
18517-{
18518- unsigned long cr4;
18519-
18520- mmu_cr4_features &= ~mask;
18521- if (trampoline_cr4_features)
18522- *trampoline_cr4_features = mmu_cr4_features;
18523- cr4 = read_cr4();
18524- cr4 &= ~mask;
18525- write_cr4(cr4);
18526-}
18527+extern void set_in_cr4(unsigned long mask);
18528+extern void clear_in_cr4(unsigned long mask);
18529
18530 typedef struct {
18531 unsigned long seg;
18532@@ -838,11 +830,18 @@ static inline void spin_lock_prefetch(const void *x)
18533 */
18534 #define TASK_SIZE PAGE_OFFSET
18535 #define TASK_SIZE_MAX TASK_SIZE
18536+
18537+#ifdef CONFIG_PAX_SEGMEXEC
18538+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18539+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18540+#else
18541 #define STACK_TOP TASK_SIZE
18542-#define STACK_TOP_MAX STACK_TOP
18543+#endif
18544+
18545+#define STACK_TOP_MAX TASK_SIZE
18546
18547 #define INIT_THREAD { \
18548- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18549+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18550 .vm86_info = NULL, \
18551 .sysenter_cs = __KERNEL_CS, \
18552 .io_bitmap_ptr = NULL, \
18553@@ -856,7 +855,7 @@ static inline void spin_lock_prefetch(const void *x)
18554 */
18555 #define INIT_TSS { \
18556 .x86_tss = { \
18557- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18558+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18559 .ss0 = __KERNEL_DS, \
18560 .ss1 = __KERNEL_CS, \
18561 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18562@@ -867,11 +866,7 @@ static inline void spin_lock_prefetch(const void *x)
18563 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18564
18565 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18566-#define KSTK_TOP(info) \
18567-({ \
18568- unsigned long *__ptr = (unsigned long *)(info); \
18569- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18570-})
18571+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18572
18573 /*
18574 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18575@@ -886,7 +881,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18576 #define task_pt_regs(task) \
18577 ({ \
18578 struct pt_regs *__regs__; \
18579- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18580+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18581 __regs__ - 1; \
18582 })
18583
18584@@ -902,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18585 * particular problem by preventing anything from being mapped
18586 * at the maximum canonical address.
18587 */
18588-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18589+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18590
18591 /* This decides where the kernel will search for a free chunk of vm
18592 * space during mmap's.
18593 */
18594 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18595- 0xc0000000 : 0xFFFFe000)
18596+ 0xc0000000 : 0xFFFFf000)
18597
18598 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18599 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18600@@ -919,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18601 #define STACK_TOP_MAX TASK_SIZE_MAX
18602
18603 #define INIT_THREAD { \
18604- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18605+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18606 }
18607
18608 #define INIT_TSS { \
18609- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18610+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18611 }
18612
18613 /*
18614@@ -951,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18615 */
18616 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18617
18618+#ifdef CONFIG_PAX_SEGMEXEC
18619+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18620+#endif
18621+
18622 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18623
18624 /* Get/set a process' ability to use the timestamp counter instruction */
18625@@ -995,7 +994,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18626 return 0;
18627 }
18628
18629-extern unsigned long arch_align_stack(unsigned long sp);
18630+#define arch_align_stack(x) ((x) & ~0xfUL)
18631 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18632
18633 void default_idle(void);
18634@@ -1005,6 +1004,6 @@ bool xen_set_default_idle(void);
18635 #define xen_set_default_idle 0
18636 #endif
18637
18638-void stop_this_cpu(void *dummy);
18639+void stop_this_cpu(void *dummy) __noreturn;
18640 void df_debug(struct pt_regs *regs, long error_code);
18641 #endif /* _ASM_X86_PROCESSOR_H */
18642diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18643index 86fc2bb..bd5049a 100644
18644--- a/arch/x86/include/asm/ptrace.h
18645+++ b/arch/x86/include/asm/ptrace.h
18646@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18647 }
18648
18649 /*
18650- * user_mode_vm(regs) determines whether a register set came from user mode.
18651+ * user_mode(regs) determines whether a register set came from user mode.
18652 * This is true if V8086 mode was enabled OR if the register set was from
18653 * protected mode with RPL-3 CS value. This tricky test checks that with
18654 * one comparison. Many places in the kernel can bypass this full check
18655- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18656+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18657+ * be used.
18658 */
18659-static inline int user_mode(struct pt_regs *regs)
18660+static inline int user_mode_novm(struct pt_regs *regs)
18661 {
18662 #ifdef CONFIG_X86_32
18663 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18664 #else
18665- return !!(regs->cs & 3);
18666+ return !!(regs->cs & SEGMENT_RPL_MASK);
18667 #endif
18668 }
18669
18670-static inline int user_mode_vm(struct pt_regs *regs)
18671+static inline int user_mode(struct pt_regs *regs)
18672 {
18673 #ifdef CONFIG_X86_32
18674 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18675 USER_RPL;
18676 #else
18677- return user_mode(regs);
18678+ return user_mode_novm(regs);
18679 #endif
18680 }
18681
18682@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18683 #ifdef CONFIG_X86_64
18684 static inline bool user_64bit_mode(struct pt_regs *regs)
18685 {
18686+ unsigned long cs = regs->cs & 0xffff;
18687 #ifndef CONFIG_PARAVIRT
18688 /*
18689 * On non-paravirt systems, this is the only long mode CPL 3
18690 * selector. We do not allow long mode selectors in the LDT.
18691 */
18692- return regs->cs == __USER_CS;
18693+ return cs == __USER_CS;
18694 #else
18695 /* Headers are too twisted for this to go in paravirt.h. */
18696- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18697+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18698 #endif
18699 }
18700
18701@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18702 * Traps from the kernel do not save sp and ss.
18703 * Use the helper function to retrieve sp.
18704 */
18705- if (offset == offsetof(struct pt_regs, sp) &&
18706- regs->cs == __KERNEL_CS)
18707- return kernel_stack_pointer(regs);
18708+ if (offset == offsetof(struct pt_regs, sp)) {
18709+ unsigned long cs = regs->cs & 0xffff;
18710+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18711+ return kernel_stack_pointer(regs);
18712+ }
18713 #endif
18714 return *(unsigned long *)((unsigned long)regs + offset);
18715 }
18716diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18717index ae0e241..e80b10b 100644
18718--- a/arch/x86/include/asm/qrwlock.h
18719+++ b/arch/x86/include/asm/qrwlock.h
18720@@ -7,8 +7,8 @@
18721 #define queue_write_unlock queue_write_unlock
18722 static inline void queue_write_unlock(struct qrwlock *lock)
18723 {
18724- barrier();
18725- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18726+ barrier();
18727+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18728 }
18729 #endif
18730
18731diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18732index 9c6b890..5305f53 100644
18733--- a/arch/x86/include/asm/realmode.h
18734+++ b/arch/x86/include/asm/realmode.h
18735@@ -22,16 +22,14 @@ struct real_mode_header {
18736 #endif
18737 /* APM/BIOS reboot */
18738 u32 machine_real_restart_asm;
18739-#ifdef CONFIG_X86_64
18740 u32 machine_real_restart_seg;
18741-#endif
18742 };
18743
18744 /* This must match data at trampoline_32/64.S */
18745 struct trampoline_header {
18746 #ifdef CONFIG_X86_32
18747 u32 start;
18748- u16 gdt_pad;
18749+ u16 boot_cs;
18750 u16 gdt_limit;
18751 u32 gdt_base;
18752 #else
18753diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18754index a82c4f1..ac45053 100644
18755--- a/arch/x86/include/asm/reboot.h
18756+++ b/arch/x86/include/asm/reboot.h
18757@@ -6,13 +6,13 @@
18758 struct pt_regs;
18759
18760 struct machine_ops {
18761- void (*restart)(char *cmd);
18762- void (*halt)(void);
18763- void (*power_off)(void);
18764+ void (* __noreturn restart)(char *cmd);
18765+ void (* __noreturn halt)(void);
18766+ void (* __noreturn power_off)(void);
18767 void (*shutdown)(void);
18768 void (*crash_shutdown)(struct pt_regs *);
18769- void (*emergency_restart)(void);
18770-};
18771+ void (* __noreturn emergency_restart)(void);
18772+} __no_const;
18773
18774 extern struct machine_ops machine_ops;
18775
18776diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18777index 8f7866a..e442f20 100644
18778--- a/arch/x86/include/asm/rmwcc.h
18779+++ b/arch/x86/include/asm/rmwcc.h
18780@@ -3,7 +3,34 @@
18781
18782 #ifdef CC_HAVE_ASM_GOTO
18783
18784-#define __GEN_RMWcc(fullop, var, cc, ...) \
18785+#ifdef CONFIG_PAX_REFCOUNT
18786+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18787+do { \
18788+ asm_volatile_goto (fullop \
18789+ ";jno 0f\n" \
18790+ fullantiop \
18791+ ";int $4\n0:\n" \
18792+ _ASM_EXTABLE(0b, 0b) \
18793+ ";j" cc " %l[cc_label]" \
18794+ : : "m" (var), ## __VA_ARGS__ \
18795+ : "memory" : cc_label); \
18796+ return 0; \
18797+cc_label: \
18798+ return 1; \
18799+} while (0)
18800+#else
18801+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18802+do { \
18803+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18804+ : : "m" (var), ## __VA_ARGS__ \
18805+ : "memory" : cc_label); \
18806+ return 0; \
18807+cc_label: \
18808+ return 1; \
18809+} while (0)
18810+#endif
18811+
18812+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18813 do { \
18814 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18815 : : "m" (var), ## __VA_ARGS__ \
18816@@ -13,15 +40,46 @@ cc_label: \
18817 return 1; \
18818 } while (0)
18819
18820-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18821- __GEN_RMWcc(op " " arg0, var, cc)
18822+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18823+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18824
18825-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18826- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18827+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18828+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18829+
18830+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18831+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18832+
18833+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18834+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18835
18836 #else /* !CC_HAVE_ASM_GOTO */
18837
18838-#define __GEN_RMWcc(fullop, var, cc, ...) \
18839+#ifdef CONFIG_PAX_REFCOUNT
18840+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18841+do { \
18842+ char c; \
18843+ asm volatile (fullop \
18844+ ";jno 0f\n" \
18845+ fullantiop \
18846+ ";int $4\n0:\n" \
18847+ _ASM_EXTABLE(0b, 0b) \
18848+ "; set" cc " %1" \
18849+ : "+m" (var), "=qm" (c) \
18850+ : __VA_ARGS__ : "memory"); \
18851+ return c != 0; \
18852+} while (0)
18853+#else
18854+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18855+do { \
18856+ char c; \
18857+ asm volatile (fullop "; set" cc " %1" \
18858+ : "+m" (var), "=qm" (c) \
18859+ : __VA_ARGS__ : "memory"); \
18860+ return c != 0; \
18861+} while (0)
18862+#endif
18863+
18864+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18865 do { \
18866 char c; \
18867 asm volatile (fullop "; set" cc " %1" \
18868@@ -30,11 +88,17 @@ do { \
18869 return c != 0; \
18870 } while (0)
18871
18872-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18873- __GEN_RMWcc(op " " arg0, var, cc)
18874+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18875+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18876+
18877+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18878+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18879+
18880+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18881+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18882
18883-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18884- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18885+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18886+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18887
18888 #endif /* CC_HAVE_ASM_GOTO */
18889
18890diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18891index cad82c9..2e5c5c1 100644
18892--- a/arch/x86/include/asm/rwsem.h
18893+++ b/arch/x86/include/asm/rwsem.h
18894@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18895 {
18896 asm volatile("# beginning down_read\n\t"
18897 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18898+
18899+#ifdef CONFIG_PAX_REFCOUNT
18900+ "jno 0f\n"
18901+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18902+ "int $4\n0:\n"
18903+ _ASM_EXTABLE(0b, 0b)
18904+#endif
18905+
18906 /* adds 0x00000001 */
18907 " jns 1f\n"
18908 " call call_rwsem_down_read_failed\n"
18909@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18910 "1:\n\t"
18911 " mov %1,%2\n\t"
18912 " add %3,%2\n\t"
18913+
18914+#ifdef CONFIG_PAX_REFCOUNT
18915+ "jno 0f\n"
18916+ "sub %3,%2\n"
18917+ "int $4\n0:\n"
18918+ _ASM_EXTABLE(0b, 0b)
18919+#endif
18920+
18921 " jle 2f\n\t"
18922 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18923 " jnz 1b\n\t"
18924@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18925 long tmp;
18926 asm volatile("# beginning down_write\n\t"
18927 LOCK_PREFIX " xadd %1,(%2)\n\t"
18928+
18929+#ifdef CONFIG_PAX_REFCOUNT
18930+ "jno 0f\n"
18931+ "mov %1,(%2)\n"
18932+ "int $4\n0:\n"
18933+ _ASM_EXTABLE(0b, 0b)
18934+#endif
18935+
18936 /* adds 0xffff0001, returns the old value */
18937 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18938 /* was the active mask 0 before? */
18939@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18940 long tmp;
18941 asm volatile("# beginning __up_read\n\t"
18942 LOCK_PREFIX " xadd %1,(%2)\n\t"
18943+
18944+#ifdef CONFIG_PAX_REFCOUNT
18945+ "jno 0f\n"
18946+ "mov %1,(%2)\n"
18947+ "int $4\n0:\n"
18948+ _ASM_EXTABLE(0b, 0b)
18949+#endif
18950+
18951 /* subtracts 1, returns the old value */
18952 " jns 1f\n\t"
18953 " call call_rwsem_wake\n" /* expects old value in %edx */
18954@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18955 long tmp;
18956 asm volatile("# beginning __up_write\n\t"
18957 LOCK_PREFIX " xadd %1,(%2)\n\t"
18958+
18959+#ifdef CONFIG_PAX_REFCOUNT
18960+ "jno 0f\n"
18961+ "mov %1,(%2)\n"
18962+ "int $4\n0:\n"
18963+ _ASM_EXTABLE(0b, 0b)
18964+#endif
18965+
18966 /* subtracts 0xffff0001, returns the old value */
18967 " jns 1f\n\t"
18968 " call call_rwsem_wake\n" /* expects old value in %edx */
18969@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18970 {
18971 asm volatile("# beginning __downgrade_write\n\t"
18972 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18973+
18974+#ifdef CONFIG_PAX_REFCOUNT
18975+ "jno 0f\n"
18976+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18977+ "int $4\n0:\n"
18978+ _ASM_EXTABLE(0b, 0b)
18979+#endif
18980+
18981 /*
18982 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18983 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18984@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18985 */
18986 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18987 {
18988- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18989+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18990+
18991+#ifdef CONFIG_PAX_REFCOUNT
18992+ "jno 0f\n"
18993+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18994+ "int $4\n0:\n"
18995+ _ASM_EXTABLE(0b, 0b)
18996+#endif
18997+
18998 : "+m" (sem->count)
18999 : "er" (delta));
19000 }
19001@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19002 */
19003 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19004 {
19005- return delta + xadd(&sem->count, delta);
19006+ return delta + xadd_check_overflow(&sem->count, delta);
19007 }
19008
19009 #endif /* __KERNEL__ */
19010diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19011index db257a5..b91bc77 100644
19012--- a/arch/x86/include/asm/segment.h
19013+++ b/arch/x86/include/asm/segment.h
19014@@ -73,10 +73,15 @@
19015 * 26 - ESPFIX small SS
19016 * 27 - per-cpu [ offset to per-cpu data area ]
19017 * 28 - stack_canary-20 [ for stack protector ]
19018- * 29 - unused
19019- * 30 - unused
19020+ * 29 - PCI BIOS CS
19021+ * 30 - PCI BIOS DS
19022 * 31 - TSS for double fault handler
19023 */
19024+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19025+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19026+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19027+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19028+
19029 #define GDT_ENTRY_TLS_MIN 6
19030 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19031
19032@@ -88,6 +93,8 @@
19033
19034 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19035
19036+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19037+
19038 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19039
19040 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19041@@ -113,6 +120,12 @@
19042 #define __KERNEL_STACK_CANARY 0
19043 #endif
19044
19045+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19046+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19047+
19048+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19049+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19050+
19051 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19052
19053 /*
19054@@ -140,7 +153,7 @@
19055 */
19056
19057 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19058-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19059+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19060
19061
19062 #else
19063@@ -164,6 +177,8 @@
19064 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19065 #define __USER32_DS __USER_DS
19066
19067+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19068+
19069 #define GDT_ENTRY_TSS 8 /* needs two entries */
19070 #define GDT_ENTRY_LDT 10 /* needs two entries */
19071 #define GDT_ENTRY_TLS_MIN 12
19072@@ -172,6 +187,8 @@
19073 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19074 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19075
19076+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19077+
19078 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19079 #define FS_TLS 0
19080 #define GS_TLS 1
19081@@ -179,12 +196,14 @@
19082 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19083 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19084
19085-#define GDT_ENTRIES 16
19086+#define GDT_ENTRIES 17
19087
19088 #endif
19089
19090 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19091+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19092 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19093+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19094 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19095 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19096 #ifndef CONFIG_PARAVIRT
19097@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19098 {
19099 unsigned long __limit;
19100 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19101- return __limit + 1;
19102+ return __limit;
19103 }
19104
19105 #endif /* !__ASSEMBLY__ */
19106diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19107index 8d3120f..352b440 100644
19108--- a/arch/x86/include/asm/smap.h
19109+++ b/arch/x86/include/asm/smap.h
19110@@ -25,11 +25,40 @@
19111
19112 #include <asm/alternative-asm.h>
19113
19114+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19115+#define ASM_PAX_OPEN_USERLAND \
19116+ 661: jmp 663f; \
19117+ .pushsection .altinstr_replacement, "a" ; \
19118+ 662: pushq %rax; nop; \
19119+ .popsection ; \
19120+ .pushsection .altinstructions, "a" ; \
19121+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19122+ .popsection ; \
19123+ call __pax_open_userland; \
19124+ popq %rax; \
19125+ 663:
19126+
19127+#define ASM_PAX_CLOSE_USERLAND \
19128+ 661: jmp 663f; \
19129+ .pushsection .altinstr_replacement, "a" ; \
19130+ 662: pushq %rax; nop; \
19131+ .popsection; \
19132+ .pushsection .altinstructions, "a" ; \
19133+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19134+ .popsection; \
19135+ call __pax_close_userland; \
19136+ popq %rax; \
19137+ 663:
19138+#else
19139+#define ASM_PAX_OPEN_USERLAND
19140+#define ASM_PAX_CLOSE_USERLAND
19141+#endif
19142+
19143 #ifdef CONFIG_X86_SMAP
19144
19145 #define ASM_CLAC \
19146 661: ASM_NOP3 ; \
19147- .pushsection .altinstr_replacement, "ax" ; \
19148+ .pushsection .altinstr_replacement, "a" ; \
19149 662: __ASM_CLAC ; \
19150 .popsection ; \
19151 .pushsection .altinstructions, "a" ; \
19152@@ -38,7 +67,7 @@
19153
19154 #define ASM_STAC \
19155 661: ASM_NOP3 ; \
19156- .pushsection .altinstr_replacement, "ax" ; \
19157+ .pushsection .altinstr_replacement, "a" ; \
19158 662: __ASM_STAC ; \
19159 .popsection ; \
19160 .pushsection .altinstructions, "a" ; \
19161@@ -56,6 +85,37 @@
19162
19163 #include <asm/alternative.h>
19164
19165+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19166+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19167+
19168+extern void __pax_open_userland(void);
19169+static __always_inline unsigned long pax_open_userland(void)
19170+{
19171+
19172+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19173+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19174+ :
19175+ : [open] "i" (__pax_open_userland)
19176+ : "memory", "rax");
19177+#endif
19178+
19179+ return 0;
19180+}
19181+
19182+extern void __pax_close_userland(void);
19183+static __always_inline unsigned long pax_close_userland(void)
19184+{
19185+
19186+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19187+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19188+ :
19189+ : [close] "i" (__pax_close_userland)
19190+ : "memory", "rax");
19191+#endif
19192+
19193+ return 0;
19194+}
19195+
19196 #ifdef CONFIG_X86_SMAP
19197
19198 static __always_inline void clac(void)
19199diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19200index 8cd1cc3..827e09e 100644
19201--- a/arch/x86/include/asm/smp.h
19202+++ b/arch/x86/include/asm/smp.h
19203@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19204 /* cpus sharing the last level cache: */
19205 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19206 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19207-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19208+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19209
19210 static inline struct cpumask *cpu_sibling_mask(int cpu)
19211 {
19212@@ -78,7 +78,7 @@ struct smp_ops {
19213
19214 void (*send_call_func_ipi)(const struct cpumask *mask);
19215 void (*send_call_func_single_ipi)(int cpu);
19216-};
19217+} __no_const;
19218
19219 /* Globals due to paravirt */
19220 extern void set_cpu_sibling_map(int cpu);
19221@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19222 extern int safe_smp_processor_id(void);
19223
19224 #elif defined(CONFIG_X86_64_SMP)
19225-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19226-
19227-#define stack_smp_processor_id() \
19228-({ \
19229- struct thread_info *ti; \
19230- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19231- ti->cpu; \
19232-})
19233+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19234+#define stack_smp_processor_id() raw_smp_processor_id()
19235 #define safe_smp_processor_id() smp_processor_id()
19236
19237 #endif
19238diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19239index 6a99859..03cb807 100644
19240--- a/arch/x86/include/asm/stackprotector.h
19241+++ b/arch/x86/include/asm/stackprotector.h
19242@@ -47,7 +47,7 @@
19243 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19244 */
19245 #define GDT_STACK_CANARY_INIT \
19246- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19247+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19248
19249 /*
19250 * Initialize the stackprotector canary value.
19251@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19252
19253 static inline void load_stack_canary_segment(void)
19254 {
19255-#ifdef CONFIG_X86_32
19256+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19257 asm volatile ("mov %0, %%gs" : : "r" (0));
19258 #endif
19259 }
19260diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19261index 70bbe39..4ae2bd4 100644
19262--- a/arch/x86/include/asm/stacktrace.h
19263+++ b/arch/x86/include/asm/stacktrace.h
19264@@ -11,28 +11,20 @@
19265
19266 extern int kstack_depth_to_print;
19267
19268-struct thread_info;
19269+struct task_struct;
19270 struct stacktrace_ops;
19271
19272-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19273- unsigned long *stack,
19274- unsigned long bp,
19275- const struct stacktrace_ops *ops,
19276- void *data,
19277- unsigned long *end,
19278- int *graph);
19279+typedef unsigned long walk_stack_t(struct task_struct *task,
19280+ void *stack_start,
19281+ unsigned long *stack,
19282+ unsigned long bp,
19283+ const struct stacktrace_ops *ops,
19284+ void *data,
19285+ unsigned long *end,
19286+ int *graph);
19287
19288-extern unsigned long
19289-print_context_stack(struct thread_info *tinfo,
19290- unsigned long *stack, unsigned long bp,
19291- const struct stacktrace_ops *ops, void *data,
19292- unsigned long *end, int *graph);
19293-
19294-extern unsigned long
19295-print_context_stack_bp(struct thread_info *tinfo,
19296- unsigned long *stack, unsigned long bp,
19297- const struct stacktrace_ops *ops, void *data,
19298- unsigned long *end, int *graph);
19299+extern walk_stack_t print_context_stack;
19300+extern walk_stack_t print_context_stack_bp;
19301
19302 /* Generic stack tracer with callbacks */
19303
19304@@ -40,7 +32,7 @@ struct stacktrace_ops {
19305 void (*address)(void *data, unsigned long address, int reliable);
19306 /* On negative return stop dumping */
19307 int (*stack)(void *data, char *name);
19308- walk_stack_t walk_stack;
19309+ walk_stack_t *walk_stack;
19310 };
19311
19312 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19313diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19314index 751bf4b..a1278b5 100644
19315--- a/arch/x86/include/asm/switch_to.h
19316+++ b/arch/x86/include/asm/switch_to.h
19317@@ -112,7 +112,7 @@ do { \
19318 "call __switch_to\n\t" \
19319 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19320 __switch_canary \
19321- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19322+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19323 "movq %%rax,%%rdi\n\t" \
19324 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19325 "jnz ret_from_fork\n\t" \
19326@@ -123,7 +123,7 @@ do { \
19327 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19328 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19329 [_tif_fork] "i" (_TIF_FORK), \
19330- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19331+ [thread_info] "m" (current_tinfo), \
19332 [current_task] "m" (current_task) \
19333 __switch_canary_iparam \
19334 : "memory", "cc" __EXTRA_CLOBBER)
19335diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19336index 547e344..6be1175 100644
19337--- a/arch/x86/include/asm/thread_info.h
19338+++ b/arch/x86/include/asm/thread_info.h
19339@@ -24,7 +24,6 @@ struct exec_domain;
19340 #include <linux/atomic.h>
19341
19342 struct thread_info {
19343- struct task_struct *task; /* main task structure */
19344 struct exec_domain *exec_domain; /* execution domain */
19345 __u32 flags; /* low level flags */
19346 __u32 status; /* thread synchronous flags */
19347@@ -33,13 +32,13 @@ struct thread_info {
19348 mm_segment_t addr_limit;
19349 struct restart_block restart_block;
19350 void __user *sysenter_return;
19351+ unsigned long lowest_stack;
19352 unsigned int sig_on_uaccess_error:1;
19353 unsigned int uaccess_err:1; /* uaccess failed */
19354 };
19355
19356-#define INIT_THREAD_INFO(tsk) \
19357+#define INIT_THREAD_INFO \
19358 { \
19359- .task = &tsk, \
19360 .exec_domain = &default_exec_domain, \
19361 .flags = 0, \
19362 .cpu = 0, \
19363@@ -50,7 +49,7 @@ struct thread_info {
19364 }, \
19365 }
19366
19367-#define init_thread_info (init_thread_union.thread_info)
19368+#define init_thread_info (init_thread_union.stack)
19369 #define init_stack (init_thread_union.stack)
19370
19371 #else /* !__ASSEMBLY__ */
19372@@ -91,6 +90,7 @@ struct thread_info {
19373 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19374 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19375 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19376+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19377
19378 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19379 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19380@@ -115,17 +115,18 @@ struct thread_info {
19381 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19382 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19383 #define _TIF_X32 (1 << TIF_X32)
19384+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19385
19386 /* work to do in syscall_trace_enter() */
19387 #define _TIF_WORK_SYSCALL_ENTRY \
19388 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19389 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19390- _TIF_NOHZ)
19391+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19392
19393 /* work to do in syscall_trace_leave() */
19394 #define _TIF_WORK_SYSCALL_EXIT \
19395 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19396- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19397+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19398
19399 /* work to do on interrupt/exception return */
19400 #define _TIF_WORK_MASK \
19401@@ -136,7 +137,7 @@ struct thread_info {
19402 /* work to do on any return to user space */
19403 #define _TIF_ALLWORK_MASK \
19404 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19405- _TIF_NOHZ)
19406+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19407
19408 /* Only used for 64 bit */
19409 #define _TIF_DO_NOTIFY_MASK \
19410@@ -151,7 +152,6 @@ struct thread_info {
19411 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19412
19413 #define STACK_WARN (THREAD_SIZE/8)
19414-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19415
19416 /*
19417 * macros/functions for gaining access to the thread information structure
19418@@ -162,26 +162,18 @@ struct thread_info {
19419
19420 DECLARE_PER_CPU(unsigned long, kernel_stack);
19421
19422+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19423+
19424 static inline struct thread_info *current_thread_info(void)
19425 {
19426- struct thread_info *ti;
19427- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19428- KERNEL_STACK_OFFSET - THREAD_SIZE);
19429- return ti;
19430+ return this_cpu_read_stable(current_tinfo);
19431 }
19432
19433 #else /* !__ASSEMBLY__ */
19434
19435 /* how to get the thread information struct from ASM */
19436 #define GET_THREAD_INFO(reg) \
19437- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19438- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19439-
19440-/*
19441- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19442- * a certain register (to be used in assembler memory operands).
19443- */
19444-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19445+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19446
19447 #endif
19448
19449@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19450 extern void arch_task_cache_init(void);
19451 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19452 extern void arch_release_task_struct(struct task_struct *tsk);
19453+
19454+#define __HAVE_THREAD_FUNCTIONS
19455+#define task_thread_info(task) (&(task)->tinfo)
19456+#define task_stack_page(task) ((task)->stack)
19457+#define setup_thread_stack(p, org) do {} while (0)
19458+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19459+
19460 #endif
19461 #endif /* _ASM_X86_THREAD_INFO_H */
19462diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19463index 04905bf..1178cdf 100644
19464--- a/arch/x86/include/asm/tlbflush.h
19465+++ b/arch/x86/include/asm/tlbflush.h
19466@@ -17,18 +17,44 @@
19467
19468 static inline void __native_flush_tlb(void)
19469 {
19470+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19471+ u64 descriptor[2];
19472+
19473+ descriptor[0] = PCID_KERNEL;
19474+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19475+ return;
19476+ }
19477+
19478+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19479+ if (static_cpu_has(X86_FEATURE_PCID)) {
19480+ unsigned int cpu = raw_get_cpu();
19481+
19482+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19483+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19484+ raw_put_cpu_no_resched();
19485+ return;
19486+ }
19487+#endif
19488+
19489 native_write_cr3(native_read_cr3());
19490 }
19491
19492 static inline void __native_flush_tlb_global_irq_disabled(void)
19493 {
19494- unsigned long cr4;
19495+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19496+ u64 descriptor[2];
19497
19498- cr4 = native_read_cr4();
19499- /* clear PGE */
19500- native_write_cr4(cr4 & ~X86_CR4_PGE);
19501- /* write old PGE again and flush TLBs */
19502- native_write_cr4(cr4);
19503+ descriptor[0] = PCID_KERNEL;
19504+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19505+ } else {
19506+ unsigned long cr4;
19507+
19508+ cr4 = native_read_cr4();
19509+ /* clear PGE */
19510+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19511+ /* write old PGE again and flush TLBs */
19512+ native_write_cr4(cr4);
19513+ }
19514 }
19515
19516 static inline void __native_flush_tlb_global(void)
19517@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19518
19519 static inline void __native_flush_tlb_single(unsigned long addr)
19520 {
19521+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19522+ u64 descriptor[2];
19523+
19524+ descriptor[0] = PCID_KERNEL;
19525+ descriptor[1] = addr;
19526+
19527+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19528+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19529+ if (addr < TASK_SIZE_MAX)
19530+ descriptor[1] += pax_user_shadow_base;
19531+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19532+ }
19533+
19534+ descriptor[0] = PCID_USER;
19535+ descriptor[1] = addr;
19536+#endif
19537+
19538+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19539+ return;
19540+ }
19541+
19542+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19543+ if (static_cpu_has(X86_FEATURE_PCID)) {
19544+ unsigned int cpu = raw_get_cpu();
19545+
19546+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19547+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19548+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19549+ raw_put_cpu_no_resched();
19550+
19551+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19552+ addr += pax_user_shadow_base;
19553+ }
19554+#endif
19555+
19556 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19557 }
19558
19559diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19560index 0d592e0..7430aad 100644
19561--- a/arch/x86/include/asm/uaccess.h
19562+++ b/arch/x86/include/asm/uaccess.h
19563@@ -7,6 +7,7 @@
19564 #include <linux/compiler.h>
19565 #include <linux/thread_info.h>
19566 #include <linux/string.h>
19567+#include <linux/spinlock.h>
19568 #include <asm/asm.h>
19569 #include <asm/page.h>
19570 #include <asm/smap.h>
19571@@ -29,7 +30,12 @@
19572
19573 #define get_ds() (KERNEL_DS)
19574 #define get_fs() (current_thread_info()->addr_limit)
19575+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19576+void __set_fs(mm_segment_t x);
19577+void set_fs(mm_segment_t x);
19578+#else
19579 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19580+#endif
19581
19582 #define segment_eq(a, b) ((a).seg == (b).seg)
19583
19584@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19585 * checks that the pointer is in the user space range - after calling
19586 * this function, memory access functions may still return -EFAULT.
19587 */
19588-#define access_ok(type, addr, size) \
19589- likely(!__range_not_ok(addr, size, user_addr_max()))
19590+extern int _cond_resched(void);
19591+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19592+#define access_ok(type, addr, size) \
19593+({ \
19594+ unsigned long __size = size; \
19595+ unsigned long __addr = (unsigned long)addr; \
19596+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19597+ if (__ret_ao && __size) { \
19598+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19599+ unsigned long __end_ao = __addr + __size - 1; \
19600+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19601+ while (__addr_ao <= __end_ao) { \
19602+ char __c_ao; \
19603+ __addr_ao += PAGE_SIZE; \
19604+ if (__size > PAGE_SIZE) \
19605+ _cond_resched(); \
19606+ if (__get_user(__c_ao, (char __user *)__addr)) \
19607+ break; \
19608+ if (type != VERIFY_WRITE) { \
19609+ __addr = __addr_ao; \
19610+ continue; \
19611+ } \
19612+ if (__put_user(__c_ao, (char __user *)__addr)) \
19613+ break; \
19614+ __addr = __addr_ao; \
19615+ } \
19616+ } \
19617+ } \
19618+ __ret_ao; \
19619+})
19620
19621 /*
19622 * The exception table consists of pairs of addresses relative to the
19623@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19624 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19625 __chk_user_ptr(ptr); \
19626 might_fault(); \
19627+ pax_open_userland(); \
19628 asm volatile("call __get_user_%P3" \
19629 : "=a" (__ret_gu), "=r" (__val_gu) \
19630 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19631 (x) = (__typeof__(*(ptr))) __val_gu; \
19632+ pax_close_userland(); \
19633 __ret_gu; \
19634 })
19635
19636@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19637 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19638 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19639
19640-
19641+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19642+#define __copyuser_seg "gs;"
19643+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19644+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19645+#else
19646+#define __copyuser_seg
19647+#define __COPYUSER_SET_ES
19648+#define __COPYUSER_RESTORE_ES
19649+#endif
19650
19651 #ifdef CONFIG_X86_32
19652 #define __put_user_asm_u64(x, addr, err, errret) \
19653 asm volatile(ASM_STAC "\n" \
19654- "1: movl %%eax,0(%2)\n" \
19655- "2: movl %%edx,4(%2)\n" \
19656+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19657+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19658 "3: " ASM_CLAC "\n" \
19659 ".section .fixup,\"ax\"\n" \
19660 "4: movl %3,%0\n" \
19661@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19662
19663 #define __put_user_asm_ex_u64(x, addr) \
19664 asm volatile(ASM_STAC "\n" \
19665- "1: movl %%eax,0(%1)\n" \
19666- "2: movl %%edx,4(%1)\n" \
19667+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19668+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19669 "3: " ASM_CLAC "\n" \
19670 _ASM_EXTABLE_EX(1b, 2b) \
19671 _ASM_EXTABLE_EX(2b, 3b) \
19672@@ -257,7 +301,8 @@ extern void __put_user_8(void);
19673 __typeof__(*(ptr)) __pu_val; \
19674 __chk_user_ptr(ptr); \
19675 might_fault(); \
19676- __pu_val = x; \
19677+ __pu_val = (x); \
19678+ pax_open_userland(); \
19679 switch (sizeof(*(ptr))) { \
19680 case 1: \
19681 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19682@@ -275,6 +320,7 @@ extern void __put_user_8(void);
19683 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19684 break; \
19685 } \
19686+ pax_close_userland(); \
19687 __ret_pu; \
19688 })
19689
19690@@ -355,8 +401,10 @@ do { \
19691 } while (0)
19692
19693 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19694+do { \
19695+ pax_open_userland(); \
19696 asm volatile(ASM_STAC "\n" \
19697- "1: mov"itype" %2,%"rtype"1\n" \
19698+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19699 "2: " ASM_CLAC "\n" \
19700 ".section .fixup,\"ax\"\n" \
19701 "3: mov %3,%0\n" \
19702@@ -364,8 +412,10 @@ do { \
19703 " jmp 2b\n" \
19704 ".previous\n" \
19705 _ASM_EXTABLE(1b, 3b) \
19706- : "=r" (err), ltype(x) \
19707- : "m" (__m(addr)), "i" (errret), "0" (err))
19708+ : "=r" (err), ltype (x) \
19709+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19710+ pax_close_userland(); \
19711+} while (0)
19712
19713 #define __get_user_size_ex(x, ptr, size) \
19714 do { \
19715@@ -389,7 +439,7 @@ do { \
19716 } while (0)
19717
19718 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19719- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19720+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19721 "2:\n" \
19722 _ASM_EXTABLE_EX(1b, 2b) \
19723 : ltype(x) : "m" (__m(addr)))
19724@@ -406,13 +456,24 @@ do { \
19725 int __gu_err; \
19726 unsigned long __gu_val; \
19727 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19728- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19729+ (x) = (__typeof__(*(ptr)))__gu_val; \
19730 __gu_err; \
19731 })
19732
19733 /* FIXME: this hack is definitely wrong -AK */
19734 struct __large_struct { unsigned long buf[100]; };
19735-#define __m(x) (*(struct __large_struct __user *)(x))
19736+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19737+#define ____m(x) \
19738+({ \
19739+ unsigned long ____x = (unsigned long)(x); \
19740+ if (____x < pax_user_shadow_base) \
19741+ ____x += pax_user_shadow_base; \
19742+ (typeof(x))____x; \
19743+})
19744+#else
19745+#define ____m(x) (x)
19746+#endif
19747+#define __m(x) (*(struct __large_struct __user *)____m(x))
19748
19749 /*
19750 * Tell gcc we read from memory instead of writing: this is because
19751@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
19752 * aliasing issues.
19753 */
19754 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19755+do { \
19756+ pax_open_userland(); \
19757 asm volatile(ASM_STAC "\n" \
19758- "1: mov"itype" %"rtype"1,%2\n" \
19759+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19760 "2: " ASM_CLAC "\n" \
19761 ".section .fixup,\"ax\"\n" \
19762 "3: mov %3,%0\n" \
19763@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
19764 ".previous\n" \
19765 _ASM_EXTABLE(1b, 3b) \
19766 : "=r"(err) \
19767- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19768+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19769+ pax_close_userland(); \
19770+} while (0)
19771
19772 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19773- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19774+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19775 "2:\n" \
19776 _ASM_EXTABLE_EX(1b, 2b) \
19777 : : ltype(x), "m" (__m(addr)))
19778@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
19779 */
19780 #define uaccess_try do { \
19781 current_thread_info()->uaccess_err = 0; \
19782+ pax_open_userland(); \
19783 stac(); \
19784 barrier();
19785
19786 #define uaccess_catch(err) \
19787 clac(); \
19788+ pax_close_userland(); \
19789 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19790 } while (0)
19791
19792@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
19793 * On error, the variable @x is set to zero.
19794 */
19795
19796+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19797+#define __get_user(x, ptr) get_user((x), (ptr))
19798+#else
19799 #define __get_user(x, ptr) \
19800 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19801+#endif
19802
19803 /**
19804 * __put_user: - Write a simple value into user space, with less checking.
19805@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
19806 * Returns zero on success, or -EFAULT on error.
19807 */
19808
19809+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19810+#define __put_user(x, ptr) put_user((x), (ptr))
19811+#else
19812 #define __put_user(x, ptr) \
19813 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19814+#endif
19815
19816 #define __get_user_unaligned __get_user
19817 #define __put_user_unaligned __put_user
19818@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
19819 #define get_user_ex(x, ptr) do { \
19820 unsigned long __gue_val; \
19821 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19822- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19823+ (x) = (__typeof__(*(ptr)))__gue_val; \
19824 } while (0)
19825
19826 #define put_user_try uaccess_try
19827@@ -531,7 +606,7 @@ extern __must_check long strlen_user(const char __user *str);
19828 extern __must_check long strnlen_user(const char __user *str, long n);
19829
19830 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19831-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19832+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19833
19834 extern void __cmpxchg_wrong_size(void)
19835 __compiletime_error("Bad argument size for cmpxchg");
19836@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
19837 __typeof__(ptr) __uval = (uval); \
19838 __typeof__(*(ptr)) __old = (old); \
19839 __typeof__(*(ptr)) __new = (new); \
19840+ pax_open_userland(); \
19841 switch (size) { \
19842 case 1: \
19843 { \
19844 asm volatile("\t" ASM_STAC "\n" \
19845- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19846+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19847 "2:\t" ASM_CLAC "\n" \
19848 "\t.section .fixup, \"ax\"\n" \
19849 "3:\tmov %3, %0\n" \
19850 "\tjmp 2b\n" \
19851 "\t.previous\n" \
19852 _ASM_EXTABLE(1b, 3b) \
19853- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19854+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19855 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19856 : "memory" \
19857 ); \
19858@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
19859 case 2: \
19860 { \
19861 asm volatile("\t" ASM_STAC "\n" \
19862- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19863+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19864 "2:\t" ASM_CLAC "\n" \
19865 "\t.section .fixup, \"ax\"\n" \
19866 "3:\tmov %3, %0\n" \
19867 "\tjmp 2b\n" \
19868 "\t.previous\n" \
19869 _ASM_EXTABLE(1b, 3b) \
19870- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19871+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19872 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19873 : "memory" \
19874 ); \
19875@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
19876 case 4: \
19877 { \
19878 asm volatile("\t" ASM_STAC "\n" \
19879- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19880+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19881 "2:\t" ASM_CLAC "\n" \
19882 "\t.section .fixup, \"ax\"\n" \
19883 "3:\tmov %3, %0\n" \
19884 "\tjmp 2b\n" \
19885 "\t.previous\n" \
19886 _ASM_EXTABLE(1b, 3b) \
19887- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19888+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19889 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19890 : "memory" \
19891 ); \
19892@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
19893 __cmpxchg_wrong_size(); \
19894 \
19895 asm volatile("\t" ASM_STAC "\n" \
19896- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19897+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19898 "2:\t" ASM_CLAC "\n" \
19899 "\t.section .fixup, \"ax\"\n" \
19900 "3:\tmov %3, %0\n" \
19901 "\tjmp 2b\n" \
19902 "\t.previous\n" \
19903 _ASM_EXTABLE(1b, 3b) \
19904- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19905+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19906 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19907 : "memory" \
19908 ); \
19909@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
19910 default: \
19911 __cmpxchg_wrong_size(); \
19912 } \
19913+ pax_close_userland(); \
19914 *__uval = __old; \
19915 __ret; \
19916 })
19917@@ -636,17 +713,6 @@ extern struct movsl_mask {
19918
19919 #define ARCH_HAS_NOCACHE_UACCESS 1
19920
19921-#ifdef CONFIG_X86_32
19922-# include <asm/uaccess_32.h>
19923-#else
19924-# include <asm/uaccess_64.h>
19925-#endif
19926-
19927-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19928- unsigned n);
19929-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19930- unsigned n);
19931-
19932 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19933 # define copy_user_diag __compiletime_error
19934 #else
19935@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19936 extern void copy_user_diag("copy_from_user() buffer size is too small")
19937 copy_from_user_overflow(void);
19938 extern void copy_user_diag("copy_to_user() buffer size is too small")
19939-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19940+copy_to_user_overflow(void);
19941
19942 #undef copy_user_diag
19943
19944@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19945
19946 extern void
19947 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19948-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19949+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19950 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19951
19952 #else
19953@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19954
19955 #endif
19956
19957+#ifdef CONFIG_X86_32
19958+# include <asm/uaccess_32.h>
19959+#else
19960+# include <asm/uaccess_64.h>
19961+#endif
19962+
19963 static inline unsigned long __must_check
19964 copy_from_user(void *to, const void __user *from, unsigned long n)
19965 {
19966- int sz = __compiletime_object_size(to);
19967+ size_t sz = __compiletime_object_size(to);
19968
19969 might_fault();
19970
19971@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19972 * case, and do only runtime checking for non-constant sizes.
19973 */
19974
19975- if (likely(sz < 0 || sz >= n))
19976- n = _copy_from_user(to, from, n);
19977- else if(__builtin_constant_p(n))
19978- copy_from_user_overflow();
19979- else
19980- __copy_from_user_overflow(sz, n);
19981+ if (likely(sz != (size_t)-1 && sz < n)) {
19982+ if(__builtin_constant_p(n))
19983+ copy_from_user_overflow();
19984+ else
19985+ __copy_from_user_overflow(sz, n);
19986+ } else if (access_ok(VERIFY_READ, from, n))
19987+ n = __copy_from_user(to, from, n);
19988+ else if ((long)n > 0)
19989+ memset(to, 0, n);
19990
19991 return n;
19992 }
19993@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19994 static inline unsigned long __must_check
19995 copy_to_user(void __user *to, const void *from, unsigned long n)
19996 {
19997- int sz = __compiletime_object_size(from);
19998+ size_t sz = __compiletime_object_size(from);
19999
20000 might_fault();
20001
20002 /* See the comment in copy_from_user() above. */
20003- if (likely(sz < 0 || sz >= n))
20004- n = _copy_to_user(to, from, n);
20005- else if(__builtin_constant_p(n))
20006- copy_to_user_overflow();
20007- else
20008- __copy_to_user_overflow(sz, n);
20009+ if (likely(sz != (size_t)-1 && sz < n)) {
20010+ if(__builtin_constant_p(n))
20011+ copy_to_user_overflow();
20012+ else
20013+ __copy_to_user_overflow(sz, n);
20014+ } else if (access_ok(VERIFY_WRITE, to, n))
20015+ n = __copy_to_user(to, from, n);
20016
20017 return n;
20018 }
20019diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20020index 3c03a5d..edb68ae 100644
20021--- a/arch/x86/include/asm/uaccess_32.h
20022+++ b/arch/x86/include/asm/uaccess_32.h
20023@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20024 * anything, so this is accurate.
20025 */
20026
20027-static __always_inline unsigned long __must_check
20028+static __always_inline __size_overflow(3) unsigned long __must_check
20029 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20030 {
20031+ if ((long)n < 0)
20032+ return n;
20033+
20034+ check_object_size(from, n, true);
20035+
20036 if (__builtin_constant_p(n)) {
20037 unsigned long ret;
20038
20039@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20040 __copy_to_user(void __user *to, const void *from, unsigned long n)
20041 {
20042 might_fault();
20043+
20044 return __copy_to_user_inatomic(to, from, n);
20045 }
20046
20047-static __always_inline unsigned long
20048+static __always_inline __size_overflow(3) unsigned long
20049 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20050 {
20051+ if ((long)n < 0)
20052+ return n;
20053+
20054 /* Avoid zeroing the tail if the copy fails..
20055 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20056 * but as the zeroing behaviour is only significant when n is not
20057@@ -137,6 +146,12 @@ static __always_inline unsigned long
20058 __copy_from_user(void *to, const void __user *from, unsigned long n)
20059 {
20060 might_fault();
20061+
20062+ if ((long)n < 0)
20063+ return n;
20064+
20065+ check_object_size(to, n, false);
20066+
20067 if (__builtin_constant_p(n)) {
20068 unsigned long ret;
20069
20070@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20071 const void __user *from, unsigned long n)
20072 {
20073 might_fault();
20074+
20075+ if ((long)n < 0)
20076+ return n;
20077+
20078 if (__builtin_constant_p(n)) {
20079 unsigned long ret;
20080
20081@@ -181,7 +200,10 @@ static __always_inline unsigned long
20082 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20083 unsigned long n)
20084 {
20085- return __copy_from_user_ll_nocache_nozero(to, from, n);
20086+ if ((long)n < 0)
20087+ return n;
20088+
20089+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20090 }
20091
20092 #endif /* _ASM_X86_UACCESS_32_H */
20093diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20094index 12a26b9..c36fff5 100644
20095--- a/arch/x86/include/asm/uaccess_64.h
20096+++ b/arch/x86/include/asm/uaccess_64.h
20097@@ -10,6 +10,9 @@
20098 #include <asm/alternative.h>
20099 #include <asm/cpufeature.h>
20100 #include <asm/page.h>
20101+#include <asm/pgtable.h>
20102+
20103+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20104
20105 /*
20106 * Copy To/From Userspace
20107@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20108 __must_check unsigned long
20109 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20110
20111-static __always_inline __must_check unsigned long
20112-copy_user_generic(void *to, const void *from, unsigned len)
20113+static __always_inline __must_check __size_overflow(3) unsigned long
20114+copy_user_generic(void *to, const void *from, unsigned long len)
20115 {
20116 unsigned ret;
20117
20118@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20119 }
20120
20121 __must_check unsigned long
20122-copy_in_user(void __user *to, const void __user *from, unsigned len);
20123+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20124
20125 static __always_inline __must_check
20126-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20127+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20128 {
20129- int ret = 0;
20130+ size_t sz = __compiletime_object_size(dst);
20131+ unsigned ret = 0;
20132+
20133+ if (size > INT_MAX)
20134+ return size;
20135+
20136+ check_object_size(dst, size, false);
20137+
20138+#ifdef CONFIG_PAX_MEMORY_UDEREF
20139+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20140+ return size;
20141+#endif
20142+
20143+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20144+ if(__builtin_constant_p(size))
20145+ copy_from_user_overflow();
20146+ else
20147+ __copy_from_user_overflow(sz, size);
20148+ return size;
20149+ }
20150
20151 if (!__builtin_constant_p(size))
20152- return copy_user_generic(dst, (__force void *)src, size);
20153+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20154 switch (size) {
20155- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20156+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20157 ret, "b", "b", "=q", 1);
20158 return ret;
20159- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20160+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20161 ret, "w", "w", "=r", 2);
20162 return ret;
20163- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20164+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20165 ret, "l", "k", "=r", 4);
20166 return ret;
20167- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20168+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20169 ret, "q", "", "=r", 8);
20170 return ret;
20171 case 10:
20172- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20173+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20174 ret, "q", "", "=r", 10);
20175 if (unlikely(ret))
20176 return ret;
20177 __get_user_asm(*(u16 *)(8 + (char *)dst),
20178- (u16 __user *)(8 + (char __user *)src),
20179+ (const u16 __user *)(8 + (const char __user *)src),
20180 ret, "w", "w", "=r", 2);
20181 return ret;
20182 case 16:
20183- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20184+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20185 ret, "q", "", "=r", 16);
20186 if (unlikely(ret))
20187 return ret;
20188 __get_user_asm(*(u64 *)(8 + (char *)dst),
20189- (u64 __user *)(8 + (char __user *)src),
20190+ (const u64 __user *)(8 + (const char __user *)src),
20191 ret, "q", "", "=r", 8);
20192 return ret;
20193 default:
20194- return copy_user_generic(dst, (__force void *)src, size);
20195+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20196 }
20197 }
20198
20199 static __always_inline __must_check
20200-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20201+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20202 {
20203 might_fault();
20204 return __copy_from_user_nocheck(dst, src, size);
20205 }
20206
20207 static __always_inline __must_check
20208-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20209+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20210 {
20211- int ret = 0;
20212+ size_t sz = __compiletime_object_size(src);
20213+ unsigned ret = 0;
20214+
20215+ if (size > INT_MAX)
20216+ return size;
20217+
20218+ check_object_size(src, size, true);
20219+
20220+#ifdef CONFIG_PAX_MEMORY_UDEREF
20221+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20222+ return size;
20223+#endif
20224+
20225+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20226+ if(__builtin_constant_p(size))
20227+ copy_to_user_overflow();
20228+ else
20229+ __copy_to_user_overflow(sz, size);
20230+ return size;
20231+ }
20232
20233 if (!__builtin_constant_p(size))
20234- return copy_user_generic((__force void *)dst, src, size);
20235+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20236 switch (size) {
20237- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20238+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20239 ret, "b", "b", "iq", 1);
20240 return ret;
20241- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20242+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20243 ret, "w", "w", "ir", 2);
20244 return ret;
20245- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20246+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20247 ret, "l", "k", "ir", 4);
20248 return ret;
20249- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20250+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20251 ret, "q", "", "er", 8);
20252 return ret;
20253 case 10:
20254- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20255+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20256 ret, "q", "", "er", 10);
20257 if (unlikely(ret))
20258 return ret;
20259 asm("":::"memory");
20260- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20261+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20262 ret, "w", "w", "ir", 2);
20263 return ret;
20264 case 16:
20265- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20266+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20267 ret, "q", "", "er", 16);
20268 if (unlikely(ret))
20269 return ret;
20270 asm("":::"memory");
20271- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20272+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20273 ret, "q", "", "er", 8);
20274 return ret;
20275 default:
20276- return copy_user_generic((__force void *)dst, src, size);
20277+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20278 }
20279 }
20280
20281 static __always_inline __must_check
20282-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20283+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20284 {
20285 might_fault();
20286 return __copy_to_user_nocheck(dst, src, size);
20287 }
20288
20289 static __always_inline __must_check
20290-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20291+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20292 {
20293- int ret = 0;
20294+ unsigned ret = 0;
20295
20296 might_fault();
20297+
20298+ if (size > INT_MAX)
20299+ return size;
20300+
20301+#ifdef CONFIG_PAX_MEMORY_UDEREF
20302+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20303+ return size;
20304+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20305+ return size;
20306+#endif
20307+
20308 if (!__builtin_constant_p(size))
20309- return copy_user_generic((__force void *)dst,
20310- (__force void *)src, size);
20311+ return copy_user_generic((__force_kernel void *)____m(dst),
20312+ (__force_kernel const void *)____m(src), size);
20313 switch (size) {
20314 case 1: {
20315 u8 tmp;
20316- __get_user_asm(tmp, (u8 __user *)src,
20317+ __get_user_asm(tmp, (const u8 __user *)src,
20318 ret, "b", "b", "=q", 1);
20319 if (likely(!ret))
20320 __put_user_asm(tmp, (u8 __user *)dst,
20321@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20322 }
20323 case 2: {
20324 u16 tmp;
20325- __get_user_asm(tmp, (u16 __user *)src,
20326+ __get_user_asm(tmp, (const u16 __user *)src,
20327 ret, "w", "w", "=r", 2);
20328 if (likely(!ret))
20329 __put_user_asm(tmp, (u16 __user *)dst,
20330@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20331
20332 case 4: {
20333 u32 tmp;
20334- __get_user_asm(tmp, (u32 __user *)src,
20335+ __get_user_asm(tmp, (const u32 __user *)src,
20336 ret, "l", "k", "=r", 4);
20337 if (likely(!ret))
20338 __put_user_asm(tmp, (u32 __user *)dst,
20339@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20340 }
20341 case 8: {
20342 u64 tmp;
20343- __get_user_asm(tmp, (u64 __user *)src,
20344+ __get_user_asm(tmp, (const u64 __user *)src,
20345 ret, "q", "", "=r", 8);
20346 if (likely(!ret))
20347 __put_user_asm(tmp, (u64 __user *)dst,
20348@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20349 return ret;
20350 }
20351 default:
20352- return copy_user_generic((__force void *)dst,
20353- (__force void *)src, size);
20354+ return copy_user_generic((__force_kernel void *)____m(dst),
20355+ (__force_kernel const void *)____m(src), size);
20356 }
20357 }
20358
20359-static __must_check __always_inline int
20360-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20361+static __must_check __always_inline unsigned long
20362+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20363 {
20364 return __copy_from_user_nocheck(dst, src, size);
20365 }
20366
20367-static __must_check __always_inline int
20368-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20369+static __must_check __always_inline unsigned long
20370+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20371 {
20372 return __copy_to_user_nocheck(dst, src, size);
20373 }
20374
20375-extern long __copy_user_nocache(void *dst, const void __user *src,
20376- unsigned size, int zerorest);
20377+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20378+ unsigned long size, int zerorest);
20379
20380-static inline int
20381-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20382+static inline unsigned long
20383+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20384 {
20385 might_fault();
20386+
20387+ if (size > INT_MAX)
20388+ return size;
20389+
20390+#ifdef CONFIG_PAX_MEMORY_UDEREF
20391+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20392+ return size;
20393+#endif
20394+
20395 return __copy_user_nocache(dst, src, size, 1);
20396 }
20397
20398-static inline int
20399+static inline unsigned long
20400 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20401- unsigned size)
20402+ unsigned long size)
20403 {
20404+ if (size > INT_MAX)
20405+ return size;
20406+
20407+#ifdef CONFIG_PAX_MEMORY_UDEREF
20408+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20409+ return size;
20410+#endif
20411+
20412 return __copy_user_nocache(dst, src, size, 0);
20413 }
20414
20415 unsigned long
20416-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20417+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20418
20419 #endif /* _ASM_X86_UACCESS_64_H */
20420diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20421index 5b238981..77fdd78 100644
20422--- a/arch/x86/include/asm/word-at-a-time.h
20423+++ b/arch/x86/include/asm/word-at-a-time.h
20424@@ -11,7 +11,7 @@
20425 * and shift, for example.
20426 */
20427 struct word_at_a_time {
20428- const unsigned long one_bits, high_bits;
20429+ unsigned long one_bits, high_bits;
20430 };
20431
20432 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20433diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20434index f58a9c7..dc378042a 100644
20435--- a/arch/x86/include/asm/x86_init.h
20436+++ b/arch/x86/include/asm/x86_init.h
20437@@ -129,7 +129,7 @@ struct x86_init_ops {
20438 struct x86_init_timers timers;
20439 struct x86_init_iommu iommu;
20440 struct x86_init_pci pci;
20441-};
20442+} __no_const;
20443
20444 /**
20445 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20446@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20447 void (*setup_percpu_clockev)(void);
20448 void (*early_percpu_clock_init)(void);
20449 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20450-};
20451+} __no_const;
20452
20453 struct timespec;
20454
20455@@ -168,7 +168,7 @@ struct x86_platform_ops {
20456 void (*save_sched_clock_state)(void);
20457 void (*restore_sched_clock_state)(void);
20458 void (*apic_post_init)(void);
20459-};
20460+} __no_const;
20461
20462 struct pci_dev;
20463 struct msi_msg;
20464@@ -182,7 +182,7 @@ struct x86_msi_ops {
20465 void (*teardown_msi_irqs)(struct pci_dev *dev);
20466 void (*restore_msi_irqs)(struct pci_dev *dev);
20467 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20468-};
20469+} __no_const;
20470
20471 struct IO_APIC_route_entry;
20472 struct io_apic_irq_attr;
20473@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20474 unsigned int destination, int vector,
20475 struct io_apic_irq_attr *attr);
20476 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20477-};
20478+} __no_const;
20479
20480 extern struct x86_init_ops x86_init;
20481 extern struct x86_cpuinit_ops x86_cpuinit;
20482diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20483index 5eea099..ff7ef8d 100644
20484--- a/arch/x86/include/asm/xen/page.h
20485+++ b/arch/x86/include/asm/xen/page.h
20486@@ -83,7 +83,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20487 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20488 * cases needing an extended handling.
20489 */
20490-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20491+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20492 {
20493 unsigned long mfn;
20494
20495diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20496index c9a6d68..cb57f42 100644
20497--- a/arch/x86/include/asm/xsave.h
20498+++ b/arch/x86/include/asm/xsave.h
20499@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20500 if (unlikely(err))
20501 return -EFAULT;
20502
20503+ pax_open_userland();
20504 __asm__ __volatile__(ASM_STAC "\n"
20505- "1:"XSAVE"\n"
20506+ "1:"
20507+ __copyuser_seg
20508+ XSAVE"\n"
20509 "2: " ASM_CLAC "\n"
20510 xstate_fault
20511 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20512 : "memory");
20513+ pax_close_userland();
20514 return err;
20515 }
20516
20517@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20518 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20519 {
20520 int err = 0;
20521- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20522+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20523 u32 lmask = mask;
20524 u32 hmask = mask >> 32;
20525
20526+ pax_open_userland();
20527 __asm__ __volatile__(ASM_STAC "\n"
20528- "1:"XRSTOR"\n"
20529+ "1:"
20530+ __copyuser_seg
20531+ XRSTOR"\n"
20532 "2: " ASM_CLAC "\n"
20533 xstate_fault
20534 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20535 : "memory"); /* memory required? */
20536+ pax_close_userland();
20537 return err;
20538 }
20539
20540diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20541index d993e33..8db1b18 100644
20542--- a/arch/x86/include/uapi/asm/e820.h
20543+++ b/arch/x86/include/uapi/asm/e820.h
20544@@ -58,7 +58,7 @@ struct e820map {
20545 #define ISA_START_ADDRESS 0xa0000
20546 #define ISA_END_ADDRESS 0x100000
20547
20548-#define BIOS_BEGIN 0x000a0000
20549+#define BIOS_BEGIN 0x000c0000
20550 #define BIOS_END 0x00100000
20551
20552 #define BIOS_ROM_BASE 0xffe00000
20553diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20554index 7b0a55a..ad115bf 100644
20555--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20556+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20557@@ -49,7 +49,6 @@
20558 #define EFLAGS 144
20559 #define RSP 152
20560 #define SS 160
20561-#define ARGOFFSET R11
20562 #endif /* __ASSEMBLY__ */
20563
20564 /* top of stack page */
20565diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20566index 5d4502c..a567e09 100644
20567--- a/arch/x86/kernel/Makefile
20568+++ b/arch/x86/kernel/Makefile
20569@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20570 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20571 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20572 obj-y += probe_roms.o
20573-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20574+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20575 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20576 obj-$(CONFIG_X86_64) += mcount_64.o
20577 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20578diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20579index b5ddc96..490b4e4 100644
20580--- a/arch/x86/kernel/acpi/boot.c
20581+++ b/arch/x86/kernel/acpi/boot.c
20582@@ -1351,7 +1351,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20583 * If your system is blacklisted here, but you find that acpi=force
20584 * works for you, please contact linux-acpi@vger.kernel.org
20585 */
20586-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20587+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20588 /*
20589 * Boxes that need ACPI disabled
20590 */
20591@@ -1426,7 +1426,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20592 };
20593
20594 /* second table for DMI checks that should run after early-quirks */
20595-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20596+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20597 /*
20598 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20599 * which includes some code which overrides all temperature
20600diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20601index 3136820..e2c6577 100644
20602--- a/arch/x86/kernel/acpi/sleep.c
20603+++ b/arch/x86/kernel/acpi/sleep.c
20604@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20605 #else /* CONFIG_64BIT */
20606 #ifdef CONFIG_SMP
20607 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20608+
20609+ pax_open_kernel();
20610 early_gdt_descr.address =
20611 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20612+ pax_close_kernel();
20613+
20614 initial_gs = per_cpu_offset(smp_processor_id());
20615 #endif
20616 initial_code = (unsigned long)wakeup_long64;
20617diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20618index 665c6b7..eae4d56 100644
20619--- a/arch/x86/kernel/acpi/wakeup_32.S
20620+++ b/arch/x86/kernel/acpi/wakeup_32.S
20621@@ -29,13 +29,11 @@ wakeup_pmode_return:
20622 # and restore the stack ... but you need gdt for this to work
20623 movl saved_context_esp, %esp
20624
20625- movl %cs:saved_magic, %eax
20626- cmpl $0x12345678, %eax
20627+ cmpl $0x12345678, saved_magic
20628 jne bogus_magic
20629
20630 # jump to place where we left off
20631- movl saved_eip, %eax
20632- jmp *%eax
20633+ jmp *(saved_eip)
20634
20635 bogus_magic:
20636 jmp bogus_magic
20637diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20638index 703130f..27a155d 100644
20639--- a/arch/x86/kernel/alternative.c
20640+++ b/arch/x86/kernel/alternative.c
20641@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20642 */
20643 for (a = start; a < end; a++) {
20644 instr = (u8 *)&a->instr_offset + a->instr_offset;
20645+
20646+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20647+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20648+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20649+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20650+#endif
20651+
20652 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20653 BUG_ON(a->replacementlen > a->instrlen);
20654 BUG_ON(a->instrlen > sizeof(insnbuf));
20655@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20656 add_nops(insnbuf + a->replacementlen,
20657 a->instrlen - a->replacementlen);
20658
20659+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20660+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20661+ instr = ktva_ktla(instr);
20662+#endif
20663+
20664 text_poke_early(instr, insnbuf, a->instrlen);
20665 }
20666 }
20667@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20668 for (poff = start; poff < end; poff++) {
20669 u8 *ptr = (u8 *)poff + *poff;
20670
20671+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20672+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20673+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20674+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20675+#endif
20676+
20677 if (!*poff || ptr < text || ptr >= text_end)
20678 continue;
20679 /* turn DS segment override prefix into lock prefix */
20680- if (*ptr == 0x3e)
20681+ if (*ktla_ktva(ptr) == 0x3e)
20682 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20683 }
20684 mutex_unlock(&text_mutex);
20685@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20686 for (poff = start; poff < end; poff++) {
20687 u8 *ptr = (u8 *)poff + *poff;
20688
20689+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20690+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20691+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20692+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20693+#endif
20694+
20695 if (!*poff || ptr < text || ptr >= text_end)
20696 continue;
20697 /* turn lock prefix into DS segment override prefix */
20698- if (*ptr == 0xf0)
20699+ if (*ktla_ktva(ptr) == 0xf0)
20700 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20701 }
20702 mutex_unlock(&text_mutex);
20703@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20704
20705 BUG_ON(p->len > MAX_PATCH_LEN);
20706 /* prep the buffer with the original instructions */
20707- memcpy(insnbuf, p->instr, p->len);
20708+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20709 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20710 (unsigned long)p->instr, p->len);
20711
20712@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20713 if (!uniproc_patched || num_possible_cpus() == 1)
20714 free_init_pages("SMP alternatives",
20715 (unsigned long)__smp_locks,
20716- (unsigned long)__smp_locks_end);
20717+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20718 #endif
20719
20720 apply_paravirt(__parainstructions, __parainstructions_end);
20721@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20722 * instructions. And on the local CPU you need to be protected again NMI or MCE
20723 * handlers seeing an inconsistent instruction while you patch.
20724 */
20725-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20726+void *__kprobes text_poke_early(void *addr, const void *opcode,
20727 size_t len)
20728 {
20729 unsigned long flags;
20730 local_irq_save(flags);
20731- memcpy(addr, opcode, len);
20732+
20733+ pax_open_kernel();
20734+ memcpy(ktla_ktva(addr), opcode, len);
20735 sync_core();
20736+ pax_close_kernel();
20737+
20738 local_irq_restore(flags);
20739 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20740 that causes hangs on some VIA CPUs. */
20741@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20742 */
20743 void *text_poke(void *addr, const void *opcode, size_t len)
20744 {
20745- unsigned long flags;
20746- char *vaddr;
20747+ unsigned char *vaddr = ktla_ktva(addr);
20748 struct page *pages[2];
20749- int i;
20750+ size_t i;
20751
20752 if (!core_kernel_text((unsigned long)addr)) {
20753- pages[0] = vmalloc_to_page(addr);
20754- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20755+ pages[0] = vmalloc_to_page(vaddr);
20756+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20757 } else {
20758- pages[0] = virt_to_page(addr);
20759+ pages[0] = virt_to_page(vaddr);
20760 WARN_ON(!PageReserved(pages[0]));
20761- pages[1] = virt_to_page(addr + PAGE_SIZE);
20762+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20763 }
20764 BUG_ON(!pages[0]);
20765- local_irq_save(flags);
20766- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20767- if (pages[1])
20768- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20769- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20770- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20771- clear_fixmap(FIX_TEXT_POKE0);
20772- if (pages[1])
20773- clear_fixmap(FIX_TEXT_POKE1);
20774- local_flush_tlb();
20775- sync_core();
20776- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20777- that causes hangs on some VIA CPUs. */
20778+ text_poke_early(addr, opcode, len);
20779 for (i = 0; i < len; i++)
20780- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20781- local_irq_restore(flags);
20782+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20783 return addr;
20784 }
20785
20786@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20787 if (likely(!bp_patching_in_progress))
20788 return 0;
20789
20790- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20791+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20792 return 0;
20793
20794 /* set up the specified breakpoint handler */
20795@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20796 */
20797 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20798 {
20799- unsigned char int3 = 0xcc;
20800+ const unsigned char int3 = 0xcc;
20801
20802 bp_int3_handler = handler;
20803 bp_int3_addr = (u8 *)addr + sizeof(int3);
20804diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20805index 29b5b18..3bdfc29 100644
20806--- a/arch/x86/kernel/apic/apic.c
20807+++ b/arch/x86/kernel/apic/apic.c
20808@@ -201,7 +201,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20809 /*
20810 * Debug level, exported for io_apic.c
20811 */
20812-unsigned int apic_verbosity;
20813+int apic_verbosity;
20814
20815 int pic_mode;
20816
20817@@ -1991,7 +1991,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20818 apic_write(APIC_ESR, 0);
20819 v = apic_read(APIC_ESR);
20820 ack_APIC_irq();
20821- atomic_inc(&irq_err_count);
20822+ atomic_inc_unchecked(&irq_err_count);
20823
20824 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20825 smp_processor_id(), v);
20826diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20827index de918c4..32eed23 100644
20828--- a/arch/x86/kernel/apic/apic_flat_64.c
20829+++ b/arch/x86/kernel/apic/apic_flat_64.c
20830@@ -154,7 +154,7 @@ static int flat_probe(void)
20831 return 1;
20832 }
20833
20834-static struct apic apic_flat = {
20835+static struct apic apic_flat __read_only = {
20836 .name = "flat",
20837 .probe = flat_probe,
20838 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20839@@ -260,7 +260,7 @@ static int physflat_probe(void)
20840 return 0;
20841 }
20842
20843-static struct apic apic_physflat = {
20844+static struct apic apic_physflat __read_only = {
20845
20846 .name = "physical flat",
20847 .probe = physflat_probe,
20848diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20849index b205cdb..d8503ff 100644
20850--- a/arch/x86/kernel/apic/apic_noop.c
20851+++ b/arch/x86/kernel/apic/apic_noop.c
20852@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20853 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20854 }
20855
20856-struct apic apic_noop = {
20857+struct apic apic_noop __read_only = {
20858 .name = "noop",
20859 .probe = noop_probe,
20860 .acpi_madt_oem_check = NULL,
20861diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20862index c4a8d63..fe893ac 100644
20863--- a/arch/x86/kernel/apic/bigsmp_32.c
20864+++ b/arch/x86/kernel/apic/bigsmp_32.c
20865@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20866 return dmi_bigsmp;
20867 }
20868
20869-static struct apic apic_bigsmp = {
20870+static struct apic apic_bigsmp __read_only = {
20871
20872 .name = "bigsmp",
20873 .probe = probe_bigsmp,
20874diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20875index 3f5f604..309c0e6 100644
20876--- a/arch/x86/kernel/apic/io_apic.c
20877+++ b/arch/x86/kernel/apic/io_apic.c
20878@@ -1859,7 +1859,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20879 return ret;
20880 }
20881
20882-atomic_t irq_mis_count;
20883+atomic_unchecked_t irq_mis_count;
20884
20885 #ifdef CONFIG_GENERIC_PENDING_IRQ
20886 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20887@@ -2000,7 +2000,7 @@ static void ack_ioapic_level(struct irq_data *data)
20888 * at the cpu.
20889 */
20890 if (!(v & (1 << (i & 0x1f)))) {
20891- atomic_inc(&irq_mis_count);
20892+ atomic_inc_unchecked(&irq_mis_count);
20893
20894 eoi_ioapic_irq(irq, cfg);
20895 }
20896diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20897index bda4886..f9c7195 100644
20898--- a/arch/x86/kernel/apic/probe_32.c
20899+++ b/arch/x86/kernel/apic/probe_32.c
20900@@ -72,7 +72,7 @@ static int probe_default(void)
20901 return 1;
20902 }
20903
20904-static struct apic apic_default = {
20905+static struct apic apic_default __read_only = {
20906
20907 .name = "default",
20908 .probe = probe_default,
20909diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20910index 6cedd79..023ff8e 100644
20911--- a/arch/x86/kernel/apic/vector.c
20912+++ b/arch/x86/kernel/apic/vector.c
20913@@ -21,7 +21,7 @@
20914
20915 static DEFINE_RAW_SPINLOCK(vector_lock);
20916
20917-void lock_vector_lock(void)
20918+void lock_vector_lock(void) __acquires(vector_lock)
20919 {
20920 /* Used to the online set of cpus does not change
20921 * during assign_irq_vector.
20922@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20923 raw_spin_lock(&vector_lock);
20924 }
20925
20926-void unlock_vector_lock(void)
20927+void unlock_vector_lock(void) __releases(vector_lock)
20928 {
20929 raw_spin_unlock(&vector_lock);
20930 }
20931diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20932index e658f21..b695a1a 100644
20933--- a/arch/x86/kernel/apic/x2apic_cluster.c
20934+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20935@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20936 return notifier_from_errno(err);
20937 }
20938
20939-static struct notifier_block __refdata x2apic_cpu_notifier = {
20940+static struct notifier_block x2apic_cpu_notifier = {
20941 .notifier_call = update_clusterinfo,
20942 };
20943
20944@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20945 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20946 }
20947
20948-static struct apic apic_x2apic_cluster = {
20949+static struct apic apic_x2apic_cluster __read_only = {
20950
20951 .name = "cluster x2apic",
20952 .probe = x2apic_cluster_probe,
20953diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20954index 6fae733..5ca17af 100644
20955--- a/arch/x86/kernel/apic/x2apic_phys.c
20956+++ b/arch/x86/kernel/apic/x2apic_phys.c
20957@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20958 return apic == &apic_x2apic_phys;
20959 }
20960
20961-static struct apic apic_x2apic_phys = {
20962+static struct apic apic_x2apic_phys __read_only = {
20963
20964 .name = "physical x2apic",
20965 .probe = x2apic_phys_probe,
20966diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20967index 8e9dcfd..c61b3e4 100644
20968--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20969+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20970@@ -348,7 +348,7 @@ static int uv_probe(void)
20971 return apic == &apic_x2apic_uv_x;
20972 }
20973
20974-static struct apic __refdata apic_x2apic_uv_x = {
20975+static struct apic apic_x2apic_uv_x __read_only = {
20976
20977 .name = "UV large system",
20978 .probe = uv_probe,
20979diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20980index 927ec92..0dc3bd4 100644
20981--- a/arch/x86/kernel/apm_32.c
20982+++ b/arch/x86/kernel/apm_32.c
20983@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20984 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20985 * even though they are called in protected mode.
20986 */
20987-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20988+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20989 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20990
20991 static const char driver_version[] = "1.16ac"; /* no spaces */
20992@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20993 BUG_ON(cpu != 0);
20994 gdt = get_cpu_gdt_table(cpu);
20995 save_desc_40 = gdt[0x40 / 8];
20996+
20997+ pax_open_kernel();
20998 gdt[0x40 / 8] = bad_bios_desc;
20999+ pax_close_kernel();
21000
21001 apm_irq_save(flags);
21002 APM_DO_SAVE_SEGS;
21003@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
21004 &call->esi);
21005 APM_DO_RESTORE_SEGS;
21006 apm_irq_restore(flags);
21007+
21008+ pax_open_kernel();
21009 gdt[0x40 / 8] = save_desc_40;
21010+ pax_close_kernel();
21011+
21012 put_cpu();
21013
21014 return call->eax & 0xff;
21015@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21016 BUG_ON(cpu != 0);
21017 gdt = get_cpu_gdt_table(cpu);
21018 save_desc_40 = gdt[0x40 / 8];
21019+
21020+ pax_open_kernel();
21021 gdt[0x40 / 8] = bad_bios_desc;
21022+ pax_close_kernel();
21023
21024 apm_irq_save(flags);
21025 APM_DO_SAVE_SEGS;
21026@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21027 &call->eax);
21028 APM_DO_RESTORE_SEGS;
21029 apm_irq_restore(flags);
21030+
21031+ pax_open_kernel();
21032 gdt[0x40 / 8] = save_desc_40;
21033+ pax_close_kernel();
21034+
21035 put_cpu();
21036 return error;
21037 }
21038@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21039 * code to that CPU.
21040 */
21041 gdt = get_cpu_gdt_table(0);
21042+
21043+ pax_open_kernel();
21044 set_desc_base(&gdt[APM_CS >> 3],
21045 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21046 set_desc_base(&gdt[APM_CS_16 >> 3],
21047 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21048 set_desc_base(&gdt[APM_DS >> 3],
21049 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21050+ pax_close_kernel();
21051
21052 proc_create("apm", 0, NULL, &apm_file_ops);
21053
21054diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21055index 9f6b934..cf5ffb3 100644
21056--- a/arch/x86/kernel/asm-offsets.c
21057+++ b/arch/x86/kernel/asm-offsets.c
21058@@ -32,6 +32,8 @@ void common(void) {
21059 OFFSET(TI_flags, thread_info, flags);
21060 OFFSET(TI_status, thread_info, status);
21061 OFFSET(TI_addr_limit, thread_info, addr_limit);
21062+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21063+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21064
21065 BLANK();
21066 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21067@@ -52,8 +54,26 @@ void common(void) {
21068 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21069 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21070 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21071+
21072+#ifdef CONFIG_PAX_KERNEXEC
21073+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21074 #endif
21075
21076+#ifdef CONFIG_PAX_MEMORY_UDEREF
21077+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21078+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21079+#ifdef CONFIG_X86_64
21080+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21081+#endif
21082+#endif
21083+
21084+#endif
21085+
21086+ BLANK();
21087+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21088+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21089+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21090+
21091 #ifdef CONFIG_XEN
21092 BLANK();
21093 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21094diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21095index fdcbb4d..036dd93 100644
21096--- a/arch/x86/kernel/asm-offsets_64.c
21097+++ b/arch/x86/kernel/asm-offsets_64.c
21098@@ -80,6 +80,7 @@ int main(void)
21099 BLANK();
21100 #undef ENTRY
21101
21102+ DEFINE(TSS_size, sizeof(struct tss_struct));
21103 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21104 BLANK();
21105
21106diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21107index 80091ae..0c5184f 100644
21108--- a/arch/x86/kernel/cpu/Makefile
21109+++ b/arch/x86/kernel/cpu/Makefile
21110@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21111 CFLAGS_REMOVE_perf_event.o = -pg
21112 endif
21113
21114-# Make sure load_percpu_segment has no stackprotector
21115-nostackp := $(call cc-option, -fno-stack-protector)
21116-CFLAGS_common.o := $(nostackp)
21117-
21118 obj-y := intel_cacheinfo.o scattered.o topology.o
21119 obj-y += common.o
21120 obj-y += rdrand.o
21121diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21122index 15c5df9..d9a604a 100644
21123--- a/arch/x86/kernel/cpu/amd.c
21124+++ b/arch/x86/kernel/cpu/amd.c
21125@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21126 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21127 {
21128 /* AMD errata T13 (order #21922) */
21129- if ((c->x86 == 6)) {
21130+ if (c->x86 == 6) {
21131 /* Duron Rev A0 */
21132 if (c->x86_model == 3 && c->x86_mask == 0)
21133 size = 64;
21134diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21135index c604965..0b0e28a 100644
21136--- a/arch/x86/kernel/cpu/common.c
21137+++ b/arch/x86/kernel/cpu/common.c
21138@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21139
21140 static const struct cpu_dev *this_cpu = &default_cpu;
21141
21142-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21143-#ifdef CONFIG_X86_64
21144- /*
21145- * We need valid kernel segments for data and code in long mode too
21146- * IRET will check the segment types kkeil 2000/10/28
21147- * Also sysret mandates a special GDT layout
21148- *
21149- * TLS descriptors are currently at a different place compared to i386.
21150- * Hopefully nobody expects them at a fixed place (Wine?)
21151- */
21152- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21153- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21154- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21155- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21156- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21157- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21158-#else
21159- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21160- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21161- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21162- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21163- /*
21164- * Segments used for calling PnP BIOS have byte granularity.
21165- * They code segments and data segments have fixed 64k limits,
21166- * the transfer segment sizes are set at run time.
21167- */
21168- /* 32-bit code */
21169- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21170- /* 16-bit code */
21171- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21172- /* 16-bit data */
21173- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21174- /* 16-bit data */
21175- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21176- /* 16-bit data */
21177- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21178- /*
21179- * The APM segments have byte granularity and their bases
21180- * are set at run time. All have 64k limits.
21181- */
21182- /* 32-bit code */
21183- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21184- /* 16-bit code */
21185- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21186- /* data */
21187- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21188-
21189- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21190- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21191- GDT_STACK_CANARY_INIT
21192-#endif
21193-} };
21194-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21195-
21196 static int __init x86_xsave_setup(char *s)
21197 {
21198 if (strlen(s))
21199@@ -305,6 +251,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21200 }
21201 }
21202
21203+#ifdef CONFIG_X86_64
21204+static __init int setup_disable_pcid(char *arg)
21205+{
21206+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21207+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21208+
21209+#ifdef CONFIG_PAX_MEMORY_UDEREF
21210+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21211+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21212+#endif
21213+
21214+ return 1;
21215+}
21216+__setup("nopcid", setup_disable_pcid);
21217+
21218+static void setup_pcid(struct cpuinfo_x86 *c)
21219+{
21220+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21221+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21222+
21223+#ifdef CONFIG_PAX_MEMORY_UDEREF
21224+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21225+ pax_open_kernel();
21226+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21227+ pax_close_kernel();
21228+ printk("PAX: slow and weak UDEREF enabled\n");
21229+ } else
21230+ printk("PAX: UDEREF disabled\n");
21231+#endif
21232+
21233+ return;
21234+ }
21235+
21236+ printk("PAX: PCID detected\n");
21237+ set_in_cr4(X86_CR4_PCIDE);
21238+
21239+#ifdef CONFIG_PAX_MEMORY_UDEREF
21240+ pax_open_kernel();
21241+ clone_pgd_mask = ~(pgdval_t)0UL;
21242+ pax_close_kernel();
21243+ if (pax_user_shadow_base)
21244+ printk("PAX: weak UDEREF enabled\n");
21245+ else {
21246+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21247+ printk("PAX: strong UDEREF enabled\n");
21248+ }
21249+#endif
21250+
21251+ if (cpu_has(c, X86_FEATURE_INVPCID))
21252+ printk("PAX: INVPCID detected\n");
21253+}
21254+#endif
21255+
21256 /*
21257 * Some CPU features depend on higher CPUID levels, which may not always
21258 * be available due to CPUID level capping or broken virtualization
21259@@ -405,7 +404,7 @@ void switch_to_new_gdt(int cpu)
21260 {
21261 struct desc_ptr gdt_descr;
21262
21263- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21264+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21265 gdt_descr.size = GDT_SIZE - 1;
21266 load_gdt(&gdt_descr);
21267 /* Reload the per-cpu base */
21268@@ -895,6 +894,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21269 setup_smep(c);
21270 setup_smap(c);
21271
21272+#ifdef CONFIG_X86_64
21273+ setup_pcid(c);
21274+#endif
21275+
21276 /*
21277 * The vendor-specific functions might have changed features.
21278 * Now we do "generic changes."
21279@@ -903,6 +906,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21280 /* Filter out anything that depends on CPUID levels we don't have */
21281 filter_cpuid_features(c, true);
21282
21283+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21284+ setup_clear_cpu_cap(X86_FEATURE_SEP);
21285+#endif
21286+
21287 /* If the model name is still unset, do table lookup. */
21288 if (!c->x86_model_id[0]) {
21289 const char *p;
21290@@ -977,7 +984,7 @@ static void syscall32_cpu_init(void)
21291 void enable_sep_cpu(void)
21292 {
21293 int cpu = get_cpu();
21294- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21295+ struct tss_struct *tss = init_tss + cpu;
21296
21297 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21298 put_cpu();
21299@@ -1115,14 +1122,16 @@ static __init int setup_disablecpuid(char *arg)
21300 }
21301 __setup("clearcpuid=", setup_disablecpuid);
21302
21303+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21304+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21305+
21306 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21307- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21308+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21309 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21310
21311 #ifdef CONFIG_X86_64
21312-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21313-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21314- (unsigned long) debug_idt_table };
21315+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21316+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21317
21318 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21319 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21320@@ -1299,7 +1308,7 @@ void cpu_init(void)
21321 */
21322 load_ucode_ap();
21323
21324- t = &per_cpu(init_tss, cpu);
21325+ t = init_tss + cpu;
21326 oist = &per_cpu(orig_ist, cpu);
21327
21328 #ifdef CONFIG_NUMA
21329@@ -1331,7 +1340,6 @@ void cpu_init(void)
21330 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21331 barrier();
21332
21333- x86_configure_nx();
21334 enable_x2apic();
21335
21336 /*
21337@@ -1383,7 +1391,7 @@ void cpu_init(void)
21338 {
21339 int cpu = smp_processor_id();
21340 struct task_struct *curr = current;
21341- struct tss_struct *t = &per_cpu(init_tss, cpu);
21342+ struct tss_struct *t = init_tss + cpu;
21343 struct thread_struct *thread = &curr->thread;
21344
21345 wait_for_master_cpu(cpu);
21346diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21347index c703507..28535e3 100644
21348--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21349+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21350@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21351 };
21352
21353 #ifdef CONFIG_AMD_NB
21354+static struct attribute *default_attrs_amd_nb[] = {
21355+ &type.attr,
21356+ &level.attr,
21357+ &coherency_line_size.attr,
21358+ &physical_line_partition.attr,
21359+ &ways_of_associativity.attr,
21360+ &number_of_sets.attr,
21361+ &size.attr,
21362+ &shared_cpu_map.attr,
21363+ &shared_cpu_list.attr,
21364+ NULL,
21365+ NULL,
21366+ NULL,
21367+ NULL
21368+};
21369+
21370 static struct attribute **amd_l3_attrs(void)
21371 {
21372 static struct attribute **attrs;
21373@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21374
21375 n = ARRAY_SIZE(default_attrs);
21376
21377- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21378- n += 2;
21379-
21380- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21381- n += 1;
21382-
21383- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21384- if (attrs == NULL)
21385- return attrs = default_attrs;
21386-
21387- for (n = 0; default_attrs[n]; n++)
21388- attrs[n] = default_attrs[n];
21389+ attrs = default_attrs_amd_nb;
21390
21391 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21392 attrs[n++] = &cache_disable_0.attr;
21393@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21394 .default_attrs = default_attrs,
21395 };
21396
21397+#ifdef CONFIG_AMD_NB
21398+static struct kobj_type ktype_cache_amd_nb = {
21399+ .sysfs_ops = &sysfs_ops,
21400+ .default_attrs = default_attrs_amd_nb,
21401+};
21402+#endif
21403+
21404 static struct kobj_type ktype_percpu_entry = {
21405 .sysfs_ops = &sysfs_ops,
21406 };
21407@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21408 return retval;
21409 }
21410
21411+#ifdef CONFIG_AMD_NB
21412+ amd_l3_attrs();
21413+#endif
21414+
21415 for (i = 0; i < num_cache_leaves; i++) {
21416+ struct kobj_type *ktype;
21417+
21418 this_object = INDEX_KOBJECT_PTR(cpu, i);
21419 this_object->cpu = cpu;
21420 this_object->index = i;
21421
21422 this_leaf = CPUID4_INFO_IDX(cpu, i);
21423
21424- ktype_cache.default_attrs = default_attrs;
21425+ ktype = &ktype_cache;
21426 #ifdef CONFIG_AMD_NB
21427 if (this_leaf->base.nb)
21428- ktype_cache.default_attrs = amd_l3_attrs();
21429+ ktype = &ktype_cache_amd_nb;
21430 #endif
21431 retval = kobject_init_and_add(&(this_object->kobj),
21432- &ktype_cache,
21433+ ktype,
21434 per_cpu(ici_cache_kobject, cpu),
21435 "index%1lu", i);
21436 if (unlikely(retval)) {
21437diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21438index d2c6116..62fd7aa 100644
21439--- a/arch/x86/kernel/cpu/mcheck/mce.c
21440+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21441@@ -45,6 +45,7 @@
21442 #include <asm/processor.h>
21443 #include <asm/mce.h>
21444 #include <asm/msr.h>
21445+#include <asm/local.h>
21446
21447 #include "mce-internal.h"
21448
21449@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21450 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21451 m->cs, m->ip);
21452
21453- if (m->cs == __KERNEL_CS)
21454+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21455 print_symbol("{%s}", m->ip);
21456 pr_cont("\n");
21457 }
21458@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21459
21460 #define PANIC_TIMEOUT 5 /* 5 seconds */
21461
21462-static atomic_t mce_panicked;
21463+static atomic_unchecked_t mce_panicked;
21464
21465 static int fake_panic;
21466-static atomic_t mce_fake_panicked;
21467+static atomic_unchecked_t mce_fake_panicked;
21468
21469 /* Panic in progress. Enable interrupts and wait for final IPI */
21470 static void wait_for_panic(void)
21471@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21472 /*
21473 * Make sure only one CPU runs in machine check panic
21474 */
21475- if (atomic_inc_return(&mce_panicked) > 1)
21476+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21477 wait_for_panic();
21478 barrier();
21479
21480@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21481 console_verbose();
21482 } else {
21483 /* Don't log too much for fake panic */
21484- if (atomic_inc_return(&mce_fake_panicked) > 1)
21485+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21486 return;
21487 }
21488 /* First print corrected ones that are still unlogged */
21489@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21490 if (!fake_panic) {
21491 if (panic_timeout == 0)
21492 panic_timeout = mca_cfg.panic_timeout;
21493- panic(msg);
21494+ panic("%s", msg);
21495 } else
21496 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21497 }
21498@@ -744,7 +745,7 @@ static int mce_timed_out(u64 *t)
21499 * might have been modified by someone else.
21500 */
21501 rmb();
21502- if (atomic_read(&mce_panicked))
21503+ if (atomic_read_unchecked(&mce_panicked))
21504 wait_for_panic();
21505 if (!mca_cfg.monarch_timeout)
21506 goto out;
21507@@ -1722,7 +1723,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21508 }
21509
21510 /* Call the installed machine check handler for this CPU setup. */
21511-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21512+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21513 unexpected_machine_check;
21514
21515 /*
21516@@ -1745,7 +1746,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21517 return;
21518 }
21519
21520+ pax_open_kernel();
21521 machine_check_vector = do_machine_check;
21522+ pax_close_kernel();
21523
21524 __mcheck_cpu_init_generic();
21525 __mcheck_cpu_init_vendor(c);
21526@@ -1759,7 +1762,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21527 */
21528
21529 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21530-static int mce_chrdev_open_count; /* #times opened */
21531+static local_t mce_chrdev_open_count; /* #times opened */
21532 static int mce_chrdev_open_exclu; /* already open exclusive? */
21533
21534 static int mce_chrdev_open(struct inode *inode, struct file *file)
21535@@ -1767,7 +1770,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21536 spin_lock(&mce_chrdev_state_lock);
21537
21538 if (mce_chrdev_open_exclu ||
21539- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21540+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21541 spin_unlock(&mce_chrdev_state_lock);
21542
21543 return -EBUSY;
21544@@ -1775,7 +1778,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21545
21546 if (file->f_flags & O_EXCL)
21547 mce_chrdev_open_exclu = 1;
21548- mce_chrdev_open_count++;
21549+ local_inc(&mce_chrdev_open_count);
21550
21551 spin_unlock(&mce_chrdev_state_lock);
21552
21553@@ -1786,7 +1789,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21554 {
21555 spin_lock(&mce_chrdev_state_lock);
21556
21557- mce_chrdev_open_count--;
21558+ local_dec(&mce_chrdev_open_count);
21559 mce_chrdev_open_exclu = 0;
21560
21561 spin_unlock(&mce_chrdev_state_lock);
21562@@ -2461,7 +2464,7 @@ static __init void mce_init_banks(void)
21563
21564 for (i = 0; i < mca_cfg.banks; i++) {
21565 struct mce_bank *b = &mce_banks[i];
21566- struct device_attribute *a = &b->attr;
21567+ device_attribute_no_const *a = &b->attr;
21568
21569 sysfs_attr_init(&a->attr);
21570 a->attr.name = b->attrname;
21571@@ -2568,7 +2571,7 @@ struct dentry *mce_get_debugfs_dir(void)
21572 static void mce_reset(void)
21573 {
21574 cpu_missing = 0;
21575- atomic_set(&mce_fake_panicked, 0);
21576+ atomic_set_unchecked(&mce_fake_panicked, 0);
21577 atomic_set(&mce_executing, 0);
21578 atomic_set(&mce_callin, 0);
21579 atomic_set(&global_nwo, 0);
21580diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21581index a304298..49b6d06 100644
21582--- a/arch/x86/kernel/cpu/mcheck/p5.c
21583+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21584@@ -10,6 +10,7 @@
21585 #include <asm/processor.h>
21586 #include <asm/mce.h>
21587 #include <asm/msr.h>
21588+#include <asm/pgtable.h>
21589
21590 /* By default disabled */
21591 int mce_p5_enabled __read_mostly;
21592@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21593 if (!cpu_has(c, X86_FEATURE_MCE))
21594 return;
21595
21596+ pax_open_kernel();
21597 machine_check_vector = pentium_machine_check;
21598+ pax_close_kernel();
21599 /* Make sure the vector pointer is visible before we enable MCEs: */
21600 wmb();
21601
21602diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21603index 7dc5564..1273569 100644
21604--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21605+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21606@@ -9,6 +9,7 @@
21607 #include <asm/processor.h>
21608 #include <asm/mce.h>
21609 #include <asm/msr.h>
21610+#include <asm/pgtable.h>
21611
21612 /* Machine check handler for WinChip C6: */
21613 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21614@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21615 {
21616 u32 lo, hi;
21617
21618+ pax_open_kernel();
21619 machine_check_vector = winchip_machine_check;
21620+ pax_close_kernel();
21621 /* Make sure the vector pointer is visible before we enable MCEs: */
21622 wmb();
21623
21624diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21625index 36a8361..e7058c2 100644
21626--- a/arch/x86/kernel/cpu/microcode/core.c
21627+++ b/arch/x86/kernel/cpu/microcode/core.c
21628@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21629 return NOTIFY_OK;
21630 }
21631
21632-static struct notifier_block __refdata mc_cpu_notifier = {
21633+static struct notifier_block mc_cpu_notifier = {
21634 .notifier_call = mc_cpu_callback,
21635 };
21636
21637diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21638index c6826d1..8dc677e 100644
21639--- a/arch/x86/kernel/cpu/microcode/intel.c
21640+++ b/arch/x86/kernel/cpu/microcode/intel.c
21641@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
21642 struct microcode_header_intel mc_header;
21643 unsigned int mc_size;
21644
21645+ if (leftover < sizeof(mc_header)) {
21646+ pr_err("error! Truncated header in microcode data file\n");
21647+ break;
21648+ }
21649+
21650 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
21651 break;
21652
21653@@ -293,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21654
21655 static int get_ucode_user(void *to, const void *from, size_t n)
21656 {
21657- return copy_from_user(to, from, n);
21658+ return copy_from_user(to, (const void __force_user *)from, n);
21659 }
21660
21661 static enum ucode_state
21662 request_microcode_user(int cpu, const void __user *buf, size_t size)
21663 {
21664- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21665+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21666 }
21667
21668 static void microcode_fini_cpu(int cpu)
21669diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
21670index ec9df6f..420eb93 100644
21671--- a/arch/x86/kernel/cpu/microcode/intel_early.c
21672+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
21673@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
21674 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
21675 int i;
21676
21677- while (leftover) {
21678+ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
21679+
21680+ if (leftover < sizeof(mc_header))
21681+ break;
21682+
21683 mc_header = (struct microcode_header_intel *)ucode_ptr;
21684
21685 mc_size = get_totalsize(mc_header);
21686diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21687index ea5f363..cb0e905 100644
21688--- a/arch/x86/kernel/cpu/mtrr/main.c
21689+++ b/arch/x86/kernel/cpu/mtrr/main.c
21690@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21691 u64 size_or_mask, size_and_mask;
21692 static bool mtrr_aps_delayed_init;
21693
21694-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21695+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21696
21697 const struct mtrr_ops *mtrr_if;
21698
21699diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21700index df5e41f..816c719 100644
21701--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21702+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21703@@ -25,7 +25,7 @@ struct mtrr_ops {
21704 int (*validate_add_page)(unsigned long base, unsigned long size,
21705 unsigned int type);
21706 int (*have_wrcomb)(void);
21707-};
21708+} __do_const;
21709
21710 extern int generic_get_free_region(unsigned long base, unsigned long size,
21711 int replace_reg);
21712diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21713index 143e5f5..5825081 100644
21714--- a/arch/x86/kernel/cpu/perf_event.c
21715+++ b/arch/x86/kernel/cpu/perf_event.c
21716@@ -1374,7 +1374,7 @@ static void __init pmu_check_apic(void)
21717
21718 }
21719
21720-static struct attribute_group x86_pmu_format_group = {
21721+static attribute_group_no_const x86_pmu_format_group = {
21722 .name = "format",
21723 .attrs = NULL,
21724 };
21725@@ -1473,7 +1473,7 @@ static struct attribute *events_attr[] = {
21726 NULL,
21727 };
21728
21729-static struct attribute_group x86_pmu_events_group = {
21730+static attribute_group_no_const x86_pmu_events_group = {
21731 .name = "events",
21732 .attrs = events_attr,
21733 };
21734@@ -1997,7 +1997,7 @@ static unsigned long get_segment_base(unsigned int segment)
21735 if (idx > GDT_ENTRIES)
21736 return 0;
21737
21738- desc = raw_cpu_ptr(gdt_page.gdt);
21739+ desc = get_cpu_gdt_table(smp_processor_id());
21740 }
21741
21742 return get_desc_base(desc + idx);
21743@@ -2087,7 +2087,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21744 break;
21745
21746 perf_callchain_store(entry, frame.return_address);
21747- fp = frame.next_frame;
21748+ fp = (const void __force_user *)frame.next_frame;
21749 }
21750 }
21751
21752diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21753index 97242a9..cf9c30e 100644
21754--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21755+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21756@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21757 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21758 {
21759 struct attribute **attrs;
21760- struct attribute_group *attr_group;
21761+ attribute_group_no_const *attr_group;
21762 int i = 0, j;
21763
21764 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21765diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21766index 498b6d9..4126515 100644
21767--- a/arch/x86/kernel/cpu/perf_event_intel.c
21768+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21769@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21770 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21771
21772 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21773- u64 capabilities;
21774+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21775
21776- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21777- x86_pmu.intel_cap.capabilities = capabilities;
21778+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21779+ x86_pmu.intel_cap.capabilities = capabilities;
21780 }
21781
21782 intel_ds_init();
21783diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21784index c4bb8b8..9f7384d 100644
21785--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21786+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21787@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21788 NULL,
21789 };
21790
21791-static struct attribute_group rapl_pmu_events_group = {
21792+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21793 .name = "events",
21794 .attrs = NULL, /* patched at runtime */
21795 };
21796diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21797index c635b8b..b78835e 100644
21798--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21799+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21800@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21801 static int __init uncore_type_init(struct intel_uncore_type *type)
21802 {
21803 struct intel_uncore_pmu *pmus;
21804- struct attribute_group *attr_group;
21805+ attribute_group_no_const *attr_group;
21806 struct attribute **attrs;
21807 int i, j;
21808
21809diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21810index 6c8c1e7..515b98a 100644
21811--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21812+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21813@@ -114,7 +114,7 @@ struct intel_uncore_box {
21814 struct uncore_event_desc {
21815 struct kobj_attribute attr;
21816 const char *config;
21817-};
21818+} __do_const;
21819
21820 ssize_t uncore_event_show(struct kobject *kobj,
21821 struct kobj_attribute *attr, char *buf);
21822diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21823index 83741a7..bd3507d 100644
21824--- a/arch/x86/kernel/cpuid.c
21825+++ b/arch/x86/kernel/cpuid.c
21826@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21827 return notifier_from_errno(err);
21828 }
21829
21830-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21831+static struct notifier_block cpuid_class_cpu_notifier =
21832 {
21833 .notifier_call = cpuid_class_cpu_callback,
21834 };
21835diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21836index aceb2f9..c76d3e3 100644
21837--- a/arch/x86/kernel/crash.c
21838+++ b/arch/x86/kernel/crash.c
21839@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21840 #ifdef CONFIG_X86_32
21841 struct pt_regs fixed_regs;
21842
21843- if (!user_mode_vm(regs)) {
21844+ if (!user_mode(regs)) {
21845 crash_fixup_ss_esp(&fixed_regs, regs);
21846 regs = &fixed_regs;
21847 }
21848diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21849index afa64ad..dce67dd 100644
21850--- a/arch/x86/kernel/crash_dump_64.c
21851+++ b/arch/x86/kernel/crash_dump_64.c
21852@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21853 return -ENOMEM;
21854
21855 if (userbuf) {
21856- if (copy_to_user(buf, vaddr + offset, csize)) {
21857+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21858 iounmap(vaddr);
21859 return -EFAULT;
21860 }
21861diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21862index f6dfd93..892ade4 100644
21863--- a/arch/x86/kernel/doublefault.c
21864+++ b/arch/x86/kernel/doublefault.c
21865@@ -12,7 +12,7 @@
21866
21867 #define DOUBLEFAULT_STACKSIZE (1024)
21868 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21869-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21870+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21871
21872 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21873
21874@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21875 unsigned long gdt, tss;
21876
21877 native_store_gdt(&gdt_desc);
21878- gdt = gdt_desc.address;
21879+ gdt = (unsigned long)gdt_desc.address;
21880
21881 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21882
21883@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21884 /* 0x2 bit is always set */
21885 .flags = X86_EFLAGS_SF | 0x2,
21886 .sp = STACK_START,
21887- .es = __USER_DS,
21888+ .es = __KERNEL_DS,
21889 .cs = __KERNEL_CS,
21890 .ss = __KERNEL_DS,
21891- .ds = __USER_DS,
21892+ .ds = __KERNEL_DS,
21893 .fs = __KERNEL_PERCPU,
21894
21895 .__cr3 = __pa_nodebug(swapper_pg_dir),
21896diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21897index b74ebc7..2c95874 100644
21898--- a/arch/x86/kernel/dumpstack.c
21899+++ b/arch/x86/kernel/dumpstack.c
21900@@ -2,6 +2,9 @@
21901 * Copyright (C) 1991, 1992 Linus Torvalds
21902 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21903 */
21904+#ifdef CONFIG_GRKERNSEC_HIDESYM
21905+#define __INCLUDED_BY_HIDESYM 1
21906+#endif
21907 #include <linux/kallsyms.h>
21908 #include <linux/kprobes.h>
21909 #include <linux/uaccess.h>
21910@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21911
21912 void printk_address(unsigned long address)
21913 {
21914- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21915+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21916 }
21917
21918 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21919 static void
21920 print_ftrace_graph_addr(unsigned long addr, void *data,
21921 const struct stacktrace_ops *ops,
21922- struct thread_info *tinfo, int *graph)
21923+ struct task_struct *task, int *graph)
21924 {
21925- struct task_struct *task;
21926 unsigned long ret_addr;
21927 int index;
21928
21929 if (addr != (unsigned long)return_to_handler)
21930 return;
21931
21932- task = tinfo->task;
21933 index = task->curr_ret_stack;
21934
21935 if (!task->ret_stack || index < *graph)
21936@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21937 static inline void
21938 print_ftrace_graph_addr(unsigned long addr, void *data,
21939 const struct stacktrace_ops *ops,
21940- struct thread_info *tinfo, int *graph)
21941+ struct task_struct *task, int *graph)
21942 { }
21943 #endif
21944
21945@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21946 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21947 */
21948
21949-static inline int valid_stack_ptr(struct thread_info *tinfo,
21950- void *p, unsigned int size, void *end)
21951+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21952 {
21953- void *t = tinfo;
21954 if (end) {
21955 if (p < end && p >= (end-THREAD_SIZE))
21956 return 1;
21957@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21958 }
21959
21960 unsigned long
21961-print_context_stack(struct thread_info *tinfo,
21962+print_context_stack(struct task_struct *task, void *stack_start,
21963 unsigned long *stack, unsigned long bp,
21964 const struct stacktrace_ops *ops, void *data,
21965 unsigned long *end, int *graph)
21966 {
21967 struct stack_frame *frame = (struct stack_frame *)bp;
21968
21969- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21970+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21971 unsigned long addr;
21972
21973 addr = *stack;
21974@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21975 } else {
21976 ops->address(data, addr, 0);
21977 }
21978- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21979+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21980 }
21981 stack++;
21982 }
21983@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21984 EXPORT_SYMBOL_GPL(print_context_stack);
21985
21986 unsigned long
21987-print_context_stack_bp(struct thread_info *tinfo,
21988+print_context_stack_bp(struct task_struct *task, void *stack_start,
21989 unsigned long *stack, unsigned long bp,
21990 const struct stacktrace_ops *ops, void *data,
21991 unsigned long *end, int *graph)
21992@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21993 struct stack_frame *frame = (struct stack_frame *)bp;
21994 unsigned long *ret_addr = &frame->return_address;
21995
21996- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21997+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21998 unsigned long addr = *ret_addr;
21999
22000 if (!__kernel_text_address(addr))
22001@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22002 ops->address(data, addr, 1);
22003 frame = frame->next_frame;
22004 ret_addr = &frame->return_address;
22005- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22006+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22007 }
22008
22009 return (unsigned long)frame;
22010@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22011 static void print_trace_address(void *data, unsigned long addr, int reliable)
22012 {
22013 touch_nmi_watchdog();
22014- printk(data);
22015+ printk("%s", (char *)data);
22016 printk_stack_address(addr, reliable);
22017 }
22018
22019@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22020 EXPORT_SYMBOL_GPL(oops_begin);
22021 NOKPROBE_SYMBOL(oops_begin);
22022
22023+extern void gr_handle_kernel_exploit(void);
22024+
22025 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22026 {
22027 if (regs && kexec_should_crash(current))
22028@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22029 panic("Fatal exception in interrupt");
22030 if (panic_on_oops)
22031 panic("Fatal exception");
22032- do_exit(signr);
22033+
22034+ gr_handle_kernel_exploit();
22035+
22036+ do_group_exit(signr);
22037 }
22038 NOKPROBE_SYMBOL(oops_end);
22039
22040@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22041 print_modules();
22042 show_regs(regs);
22043 #ifdef CONFIG_X86_32
22044- if (user_mode_vm(regs)) {
22045+ if (user_mode(regs)) {
22046 sp = regs->sp;
22047 ss = regs->ss & 0xffff;
22048 } else {
22049@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22050 unsigned long flags = oops_begin();
22051 int sig = SIGSEGV;
22052
22053- if (!user_mode_vm(regs))
22054+ if (!user_mode(regs))
22055 report_bug(regs->ip, regs);
22056
22057 if (__die(str, regs, err))
22058diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22059index 5abd4cd..c65733b 100644
22060--- a/arch/x86/kernel/dumpstack_32.c
22061+++ b/arch/x86/kernel/dumpstack_32.c
22062@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22063 bp = stack_frame(task, regs);
22064
22065 for (;;) {
22066- struct thread_info *context;
22067+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22068 void *end_stack;
22069
22070 end_stack = is_hardirq_stack(stack, cpu);
22071 if (!end_stack)
22072 end_stack = is_softirq_stack(stack, cpu);
22073
22074- context = task_thread_info(task);
22075- bp = ops->walk_stack(context, stack, bp, ops, data,
22076+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22077 end_stack, &graph);
22078
22079 /* Stop if not on irq stack */
22080@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22081 int i;
22082
22083 show_regs_print_info(KERN_EMERG);
22084- __show_regs(regs, !user_mode_vm(regs));
22085+ __show_regs(regs, !user_mode(regs));
22086
22087 /*
22088 * When in-kernel, we also print out the stack and code at the
22089 * time of the fault..
22090 */
22091- if (!user_mode_vm(regs)) {
22092+ if (!user_mode(regs)) {
22093 unsigned int code_prologue = code_bytes * 43 / 64;
22094 unsigned int code_len = code_bytes;
22095 unsigned char c;
22096 u8 *ip;
22097+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22098
22099 pr_emerg("Stack:\n");
22100 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22101
22102 pr_emerg("Code:");
22103
22104- ip = (u8 *)regs->ip - code_prologue;
22105+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22106 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22107 /* try starting at IP */
22108- ip = (u8 *)regs->ip;
22109+ ip = (u8 *)regs->ip + cs_base;
22110 code_len = code_len - code_prologue + 1;
22111 }
22112 for (i = 0; i < code_len; i++, ip++) {
22113@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22114 pr_cont(" Bad EIP value.");
22115 break;
22116 }
22117- if (ip == (u8 *)regs->ip)
22118+ if (ip == (u8 *)regs->ip + cs_base)
22119 pr_cont(" <%02x>", c);
22120 else
22121 pr_cont(" %02x", c);
22122@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22123 {
22124 unsigned short ud2;
22125
22126+ ip = ktla_ktva(ip);
22127 if (ip < PAGE_OFFSET)
22128 return 0;
22129 if (probe_kernel_address((unsigned short *)ip, ud2))
22130@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22131
22132 return ud2 == 0x0b0f;
22133 }
22134+
22135+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22136+void pax_check_alloca(unsigned long size)
22137+{
22138+ unsigned long sp = (unsigned long)&sp, stack_left;
22139+
22140+ /* all kernel stacks are of the same size */
22141+ stack_left = sp & (THREAD_SIZE - 1);
22142+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22143+}
22144+EXPORT_SYMBOL(pax_check_alloca);
22145+#endif
22146diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22147index ff86f19..73eabf4 100644
22148--- a/arch/x86/kernel/dumpstack_64.c
22149+++ b/arch/x86/kernel/dumpstack_64.c
22150@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22151 const struct stacktrace_ops *ops, void *data)
22152 {
22153 const unsigned cpu = get_cpu();
22154- struct thread_info *tinfo;
22155 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22156 unsigned long dummy;
22157 unsigned used = 0;
22158 int graph = 0;
22159 int done = 0;
22160+ void *stack_start;
22161
22162 if (!task)
22163 task = current;
22164@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22165 * current stack address. If the stacks consist of nested
22166 * exceptions
22167 */
22168- tinfo = task_thread_info(task);
22169 while (!done) {
22170 unsigned long *stack_end;
22171 enum stack_type stype;
22172@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22173 if (ops->stack(data, id) < 0)
22174 break;
22175
22176- bp = ops->walk_stack(tinfo, stack, bp, ops,
22177+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22178 data, stack_end, &graph);
22179 ops->stack(data, "<EOE>");
22180 /*
22181@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22182 * second-to-last pointer (index -2 to end) in the
22183 * exception stack:
22184 */
22185+ if ((u16)stack_end[-1] != __KERNEL_DS)
22186+ goto out;
22187 stack = (unsigned long *) stack_end[-2];
22188 done = 0;
22189 break;
22190@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22191
22192 if (ops->stack(data, "IRQ") < 0)
22193 break;
22194- bp = ops->walk_stack(tinfo, stack, bp,
22195+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22196 ops, data, stack_end, &graph);
22197 /*
22198 * We link to the next stack (which would be
22199@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22200 /*
22201 * This handles the process stack:
22202 */
22203- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22204+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22205+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22206+out:
22207 put_cpu();
22208 }
22209 EXPORT_SYMBOL(dump_trace);
22210@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22211 {
22212 unsigned short ud2;
22213
22214- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22215+ if (probe_kernel_address((unsigned short *)ip, ud2))
22216 return 0;
22217
22218 return ud2 == 0x0b0f;
22219 }
22220+
22221+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22222+void pax_check_alloca(unsigned long size)
22223+{
22224+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22225+ unsigned cpu, used;
22226+ char *id;
22227+
22228+ /* check the process stack first */
22229+ stack_start = (unsigned long)task_stack_page(current);
22230+ stack_end = stack_start + THREAD_SIZE;
22231+ if (likely(stack_start <= sp && sp < stack_end)) {
22232+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22233+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22234+ return;
22235+ }
22236+
22237+ cpu = get_cpu();
22238+
22239+ /* check the irq stacks */
22240+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22241+ stack_start = stack_end - IRQ_STACK_SIZE;
22242+ if (stack_start <= sp && sp < stack_end) {
22243+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22244+ put_cpu();
22245+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22246+ return;
22247+ }
22248+
22249+ /* check the exception stacks */
22250+ used = 0;
22251+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22252+ stack_start = stack_end - EXCEPTION_STKSZ;
22253+ if (stack_end && stack_start <= sp && sp < stack_end) {
22254+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22255+ put_cpu();
22256+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22257+ return;
22258+ }
22259+
22260+ put_cpu();
22261+
22262+ /* unknown stack */
22263+ BUG();
22264+}
22265+EXPORT_SYMBOL(pax_check_alloca);
22266+#endif
22267diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22268index dd2f07a..845dc05 100644
22269--- a/arch/x86/kernel/e820.c
22270+++ b/arch/x86/kernel/e820.c
22271@@ -802,8 +802,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22272
22273 static void early_panic(char *msg)
22274 {
22275- early_printk(msg);
22276- panic(msg);
22277+ early_printk("%s", msg);
22278+ panic("%s", msg);
22279 }
22280
22281 static int userdef __initdata;
22282diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22283index 01d1c18..8073693 100644
22284--- a/arch/x86/kernel/early_printk.c
22285+++ b/arch/x86/kernel/early_printk.c
22286@@ -7,6 +7,7 @@
22287 #include <linux/pci_regs.h>
22288 #include <linux/pci_ids.h>
22289 #include <linux/errno.h>
22290+#include <linux/sched.h>
22291 #include <asm/io.h>
22292 #include <asm/processor.h>
22293 #include <asm/fcntl.h>
22294diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22295index 000d419..8f66802 100644
22296--- a/arch/x86/kernel/entry_32.S
22297+++ b/arch/x86/kernel/entry_32.S
22298@@ -177,13 +177,154 @@
22299 /*CFI_REL_OFFSET gs, PT_GS*/
22300 .endm
22301 .macro SET_KERNEL_GS reg
22302+
22303+#ifdef CONFIG_CC_STACKPROTECTOR
22304 movl $(__KERNEL_STACK_CANARY), \reg
22305+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22306+ movl $(__USER_DS), \reg
22307+#else
22308+ xorl \reg, \reg
22309+#endif
22310+
22311 movl \reg, %gs
22312 .endm
22313
22314 #endif /* CONFIG_X86_32_LAZY_GS */
22315
22316-.macro SAVE_ALL
22317+.macro pax_enter_kernel
22318+#ifdef CONFIG_PAX_KERNEXEC
22319+ call pax_enter_kernel
22320+#endif
22321+.endm
22322+
22323+.macro pax_exit_kernel
22324+#ifdef CONFIG_PAX_KERNEXEC
22325+ call pax_exit_kernel
22326+#endif
22327+.endm
22328+
22329+#ifdef CONFIG_PAX_KERNEXEC
22330+ENTRY(pax_enter_kernel)
22331+#ifdef CONFIG_PARAVIRT
22332+ pushl %eax
22333+ pushl %ecx
22334+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22335+ mov %eax, %esi
22336+#else
22337+ mov %cr0, %esi
22338+#endif
22339+ bts $16, %esi
22340+ jnc 1f
22341+ mov %cs, %esi
22342+ cmp $__KERNEL_CS, %esi
22343+ jz 3f
22344+ ljmp $__KERNEL_CS, $3f
22345+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22346+2:
22347+#ifdef CONFIG_PARAVIRT
22348+ mov %esi, %eax
22349+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22350+#else
22351+ mov %esi, %cr0
22352+#endif
22353+3:
22354+#ifdef CONFIG_PARAVIRT
22355+ popl %ecx
22356+ popl %eax
22357+#endif
22358+ ret
22359+ENDPROC(pax_enter_kernel)
22360+
22361+ENTRY(pax_exit_kernel)
22362+#ifdef CONFIG_PARAVIRT
22363+ pushl %eax
22364+ pushl %ecx
22365+#endif
22366+ mov %cs, %esi
22367+ cmp $__KERNEXEC_KERNEL_CS, %esi
22368+ jnz 2f
22369+#ifdef CONFIG_PARAVIRT
22370+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22371+ mov %eax, %esi
22372+#else
22373+ mov %cr0, %esi
22374+#endif
22375+ btr $16, %esi
22376+ ljmp $__KERNEL_CS, $1f
22377+1:
22378+#ifdef CONFIG_PARAVIRT
22379+ mov %esi, %eax
22380+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22381+#else
22382+ mov %esi, %cr0
22383+#endif
22384+2:
22385+#ifdef CONFIG_PARAVIRT
22386+ popl %ecx
22387+ popl %eax
22388+#endif
22389+ ret
22390+ENDPROC(pax_exit_kernel)
22391+#endif
22392+
22393+ .macro pax_erase_kstack
22394+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22395+ call pax_erase_kstack
22396+#endif
22397+ .endm
22398+
22399+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22400+/*
22401+ * ebp: thread_info
22402+ */
22403+ENTRY(pax_erase_kstack)
22404+ pushl %edi
22405+ pushl %ecx
22406+ pushl %eax
22407+
22408+ mov TI_lowest_stack(%ebp), %edi
22409+ mov $-0xBEEF, %eax
22410+ std
22411+
22412+1: mov %edi, %ecx
22413+ and $THREAD_SIZE_asm - 1, %ecx
22414+ shr $2, %ecx
22415+ repne scasl
22416+ jecxz 2f
22417+
22418+ cmp $2*16, %ecx
22419+ jc 2f
22420+
22421+ mov $2*16, %ecx
22422+ repe scasl
22423+ jecxz 2f
22424+ jne 1b
22425+
22426+2: cld
22427+ or $2*4, %edi
22428+ mov %esp, %ecx
22429+ sub %edi, %ecx
22430+
22431+ cmp $THREAD_SIZE_asm, %ecx
22432+ jb 3f
22433+ ud2
22434+3:
22435+
22436+ shr $2, %ecx
22437+ rep stosl
22438+
22439+ mov TI_task_thread_sp0(%ebp), %edi
22440+ sub $128, %edi
22441+ mov %edi, TI_lowest_stack(%ebp)
22442+
22443+ popl %eax
22444+ popl %ecx
22445+ popl %edi
22446+ ret
22447+ENDPROC(pax_erase_kstack)
22448+#endif
22449+
22450+.macro __SAVE_ALL _DS
22451 cld
22452 PUSH_GS
22453 pushl_cfi %fs
22454@@ -206,7 +347,7 @@
22455 CFI_REL_OFFSET ecx, 0
22456 pushl_cfi %ebx
22457 CFI_REL_OFFSET ebx, 0
22458- movl $(__USER_DS), %edx
22459+ movl $\_DS, %edx
22460 movl %edx, %ds
22461 movl %edx, %es
22462 movl $(__KERNEL_PERCPU), %edx
22463@@ -214,6 +355,15 @@
22464 SET_KERNEL_GS %edx
22465 .endm
22466
22467+.macro SAVE_ALL
22468+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22469+ __SAVE_ALL __KERNEL_DS
22470+ pax_enter_kernel
22471+#else
22472+ __SAVE_ALL __USER_DS
22473+#endif
22474+.endm
22475+
22476 .macro RESTORE_INT_REGS
22477 popl_cfi %ebx
22478 CFI_RESTORE ebx
22479@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22480 popfl_cfi
22481 jmp syscall_exit
22482 CFI_ENDPROC
22483-END(ret_from_fork)
22484+ENDPROC(ret_from_fork)
22485
22486 ENTRY(ret_from_kernel_thread)
22487 CFI_STARTPROC
22488@@ -340,7 +490,15 @@ ret_from_intr:
22489 andl $SEGMENT_RPL_MASK, %eax
22490 #endif
22491 cmpl $USER_RPL, %eax
22492+
22493+#ifdef CONFIG_PAX_KERNEXEC
22494+ jae resume_userspace
22495+
22496+ pax_exit_kernel
22497+ jmp resume_kernel
22498+#else
22499 jb resume_kernel # not returning to v8086 or userspace
22500+#endif
22501
22502 ENTRY(resume_userspace)
22503 LOCKDEP_SYS_EXIT
22504@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22505 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22506 # int/exception return?
22507 jne work_pending
22508- jmp restore_all
22509-END(ret_from_exception)
22510+ jmp restore_all_pax
22511+ENDPROC(ret_from_exception)
22512
22513 #ifdef CONFIG_PREEMPT
22514 ENTRY(resume_kernel)
22515@@ -365,7 +523,7 @@ need_resched:
22516 jz restore_all
22517 call preempt_schedule_irq
22518 jmp need_resched
22519-END(resume_kernel)
22520+ENDPROC(resume_kernel)
22521 #endif
22522 CFI_ENDPROC
22523
22524@@ -395,30 +553,45 @@ sysenter_past_esp:
22525 /*CFI_REL_OFFSET cs, 0*/
22526 /*
22527 * Push current_thread_info()->sysenter_return to the stack.
22528- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22529- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22530 */
22531- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22532+ pushl_cfi $0
22533 CFI_REL_OFFSET eip, 0
22534
22535 pushl_cfi %eax
22536 SAVE_ALL
22537+ GET_THREAD_INFO(%ebp)
22538+ movl TI_sysenter_return(%ebp),%ebp
22539+ movl %ebp,PT_EIP(%esp)
22540 ENABLE_INTERRUPTS(CLBR_NONE)
22541
22542 /*
22543 * Load the potential sixth argument from user stack.
22544 * Careful about security.
22545 */
22546+ movl PT_OLDESP(%esp),%ebp
22547+
22548+#ifdef CONFIG_PAX_MEMORY_UDEREF
22549+ mov PT_OLDSS(%esp),%ds
22550+1: movl %ds:(%ebp),%ebp
22551+ push %ss
22552+ pop %ds
22553+#else
22554 cmpl $__PAGE_OFFSET-3,%ebp
22555 jae syscall_fault
22556 ASM_STAC
22557 1: movl (%ebp),%ebp
22558 ASM_CLAC
22559+#endif
22560+
22561 movl %ebp,PT_EBP(%esp)
22562 _ASM_EXTABLE(1b,syscall_fault)
22563
22564 GET_THREAD_INFO(%ebp)
22565
22566+#ifdef CONFIG_PAX_RANDKSTACK
22567+ pax_erase_kstack
22568+#endif
22569+
22570 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22571 jnz sysenter_audit
22572 sysenter_do_call:
22573@@ -434,12 +607,24 @@ sysenter_after_call:
22574 testl $_TIF_ALLWORK_MASK, %ecx
22575 jne sysexit_audit
22576 sysenter_exit:
22577+
22578+#ifdef CONFIG_PAX_RANDKSTACK
22579+ pushl_cfi %eax
22580+ movl %esp, %eax
22581+ call pax_randomize_kstack
22582+ popl_cfi %eax
22583+#endif
22584+
22585+ pax_erase_kstack
22586+
22587 /* if something modifies registers it must also disable sysexit */
22588 movl PT_EIP(%esp), %edx
22589 movl PT_OLDESP(%esp), %ecx
22590 xorl %ebp,%ebp
22591 TRACE_IRQS_ON
22592 1: mov PT_FS(%esp), %fs
22593+2: mov PT_DS(%esp), %ds
22594+3: mov PT_ES(%esp), %es
22595 PTGS_TO_GS
22596 ENABLE_INTERRUPTS_SYSEXIT
22597
22598@@ -453,6 +638,9 @@ sysenter_audit:
22599 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22600 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22601 call __audit_syscall_entry
22602+
22603+ pax_erase_kstack
22604+
22605 popl_cfi %ecx /* get that remapped edx off the stack */
22606 popl_cfi %ecx /* get that remapped esi off the stack */
22607 movl PT_EAX(%esp),%eax /* reload syscall number */
22608@@ -479,10 +667,16 @@ sysexit_audit:
22609
22610 CFI_ENDPROC
22611 .pushsection .fixup,"ax"
22612-2: movl $0,PT_FS(%esp)
22613+4: movl $0,PT_FS(%esp)
22614+ jmp 1b
22615+5: movl $0,PT_DS(%esp)
22616+ jmp 1b
22617+6: movl $0,PT_ES(%esp)
22618 jmp 1b
22619 .popsection
22620- _ASM_EXTABLE(1b,2b)
22621+ _ASM_EXTABLE(1b,4b)
22622+ _ASM_EXTABLE(2b,5b)
22623+ _ASM_EXTABLE(3b,6b)
22624 PTGS_TO_GS_EX
22625 ENDPROC(ia32_sysenter_target)
22626
22627@@ -493,6 +687,11 @@ ENTRY(system_call)
22628 pushl_cfi %eax # save orig_eax
22629 SAVE_ALL
22630 GET_THREAD_INFO(%ebp)
22631+
22632+#ifdef CONFIG_PAX_RANDKSTACK
22633+ pax_erase_kstack
22634+#endif
22635+
22636 # system call tracing in operation / emulation
22637 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22638 jnz syscall_trace_entry
22639@@ -512,6 +711,15 @@ syscall_exit:
22640 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22641 jne syscall_exit_work
22642
22643+restore_all_pax:
22644+
22645+#ifdef CONFIG_PAX_RANDKSTACK
22646+ movl %esp, %eax
22647+ call pax_randomize_kstack
22648+#endif
22649+
22650+ pax_erase_kstack
22651+
22652 restore_all:
22653 TRACE_IRQS_IRET
22654 restore_all_notrace:
22655@@ -566,14 +774,34 @@ ldt_ss:
22656 * compensating for the offset by changing to the ESPFIX segment with
22657 * a base address that matches for the difference.
22658 */
22659-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22660+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22661 mov %esp, %edx /* load kernel esp */
22662 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22663 mov %dx, %ax /* eax: new kernel esp */
22664 sub %eax, %edx /* offset (low word is 0) */
22665+#ifdef CONFIG_SMP
22666+ movl PER_CPU_VAR(cpu_number), %ebx
22667+ shll $PAGE_SHIFT_asm, %ebx
22668+ addl $cpu_gdt_table, %ebx
22669+#else
22670+ movl $cpu_gdt_table, %ebx
22671+#endif
22672 shr $16, %edx
22673- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22674- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22675+
22676+#ifdef CONFIG_PAX_KERNEXEC
22677+ mov %cr0, %esi
22678+ btr $16, %esi
22679+ mov %esi, %cr0
22680+#endif
22681+
22682+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22683+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22684+
22685+#ifdef CONFIG_PAX_KERNEXEC
22686+ bts $16, %esi
22687+ mov %esi, %cr0
22688+#endif
22689+
22690 pushl_cfi $__ESPFIX_SS
22691 pushl_cfi %eax /* new kernel esp */
22692 /* Disable interrupts, but do not irqtrace this section: we
22693@@ -603,20 +831,18 @@ work_resched:
22694 movl TI_flags(%ebp), %ecx
22695 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22696 # than syscall tracing?
22697- jz restore_all
22698+ jz restore_all_pax
22699 testb $_TIF_NEED_RESCHED, %cl
22700 jnz work_resched
22701
22702 work_notifysig: # deal with pending signals and
22703 # notify-resume requests
22704+ movl %esp, %eax
22705 #ifdef CONFIG_VM86
22706 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22707- movl %esp, %eax
22708 jne work_notifysig_v86 # returning to kernel-space or
22709 # vm86-space
22710 1:
22711-#else
22712- movl %esp, %eax
22713 #endif
22714 TRACE_IRQS_ON
22715 ENABLE_INTERRUPTS(CLBR_NONE)
22716@@ -637,7 +863,7 @@ work_notifysig_v86:
22717 movl %eax, %esp
22718 jmp 1b
22719 #endif
22720-END(work_pending)
22721+ENDPROC(work_pending)
22722
22723 # perform syscall exit tracing
22724 ALIGN
22725@@ -645,11 +871,14 @@ syscall_trace_entry:
22726 movl $-ENOSYS,PT_EAX(%esp)
22727 movl %esp, %eax
22728 call syscall_trace_enter
22729+
22730+ pax_erase_kstack
22731+
22732 /* What it returned is what we'll actually use. */
22733 cmpl $(NR_syscalls), %eax
22734 jnae syscall_call
22735 jmp syscall_exit
22736-END(syscall_trace_entry)
22737+ENDPROC(syscall_trace_entry)
22738
22739 # perform syscall exit tracing
22740 ALIGN
22741@@ -662,26 +891,30 @@ syscall_exit_work:
22742 movl %esp, %eax
22743 call syscall_trace_leave
22744 jmp resume_userspace
22745-END(syscall_exit_work)
22746+ENDPROC(syscall_exit_work)
22747 CFI_ENDPROC
22748
22749 RING0_INT_FRAME # can't unwind into user space anyway
22750 syscall_fault:
22751+#ifdef CONFIG_PAX_MEMORY_UDEREF
22752+ push %ss
22753+ pop %ds
22754+#endif
22755 ASM_CLAC
22756 GET_THREAD_INFO(%ebp)
22757 movl $-EFAULT,PT_EAX(%esp)
22758 jmp resume_userspace
22759-END(syscall_fault)
22760+ENDPROC(syscall_fault)
22761
22762 syscall_badsys:
22763 movl $-ENOSYS,%eax
22764 jmp syscall_after_call
22765-END(syscall_badsys)
22766+ENDPROC(syscall_badsys)
22767
22768 sysenter_badsys:
22769 movl $-ENOSYS,%eax
22770 jmp sysenter_after_call
22771-END(sysenter_badsys)
22772+ENDPROC(sysenter_badsys)
22773 CFI_ENDPROC
22774
22775 .macro FIXUP_ESPFIX_STACK
22776@@ -694,8 +927,15 @@ END(sysenter_badsys)
22777 */
22778 #ifdef CONFIG_X86_ESPFIX32
22779 /* fixup the stack */
22780- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22781- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22782+#ifdef CONFIG_SMP
22783+ movl PER_CPU_VAR(cpu_number), %ebx
22784+ shll $PAGE_SHIFT_asm, %ebx
22785+ addl $cpu_gdt_table, %ebx
22786+#else
22787+ movl $cpu_gdt_table, %ebx
22788+#endif
22789+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22790+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22791 shl $16, %eax
22792 addl %esp, %eax /* the adjusted stack pointer */
22793 pushl_cfi $__KERNEL_DS
22794@@ -751,7 +991,7 @@ vector=vector+1
22795 .endr
22796 2: jmp common_interrupt
22797 .endr
22798-END(irq_entries_start)
22799+ENDPROC(irq_entries_start)
22800
22801 .previous
22802 END(interrupt)
22803@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22804 pushl_cfi $do_coprocessor_error
22805 jmp error_code
22806 CFI_ENDPROC
22807-END(coprocessor_error)
22808+ENDPROC(coprocessor_error)
22809
22810 ENTRY(simd_coprocessor_error)
22811 RING0_INT_FRAME
22812@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22813 .section .altinstructions,"a"
22814 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22815 .previous
22816-.section .altinstr_replacement,"ax"
22817+.section .altinstr_replacement,"a"
22818 663: pushl $do_simd_coprocessor_error
22819 664:
22820 .previous
22821@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22822 #endif
22823 jmp error_code
22824 CFI_ENDPROC
22825-END(simd_coprocessor_error)
22826+ENDPROC(simd_coprocessor_error)
22827
22828 ENTRY(device_not_available)
22829 RING0_INT_FRAME
22830@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22831 pushl_cfi $do_device_not_available
22832 jmp error_code
22833 CFI_ENDPROC
22834-END(device_not_available)
22835+ENDPROC(device_not_available)
22836
22837 #ifdef CONFIG_PARAVIRT
22838 ENTRY(native_iret)
22839 iret
22840 _ASM_EXTABLE(native_iret, iret_exc)
22841-END(native_iret)
22842+ENDPROC(native_iret)
22843
22844 ENTRY(native_irq_enable_sysexit)
22845 sti
22846 sysexit
22847-END(native_irq_enable_sysexit)
22848+ENDPROC(native_irq_enable_sysexit)
22849 #endif
22850
22851 ENTRY(overflow)
22852@@ -860,7 +1100,7 @@ ENTRY(overflow)
22853 pushl_cfi $do_overflow
22854 jmp error_code
22855 CFI_ENDPROC
22856-END(overflow)
22857+ENDPROC(overflow)
22858
22859 ENTRY(bounds)
22860 RING0_INT_FRAME
22861@@ -869,7 +1109,7 @@ ENTRY(bounds)
22862 pushl_cfi $do_bounds
22863 jmp error_code
22864 CFI_ENDPROC
22865-END(bounds)
22866+ENDPROC(bounds)
22867
22868 ENTRY(invalid_op)
22869 RING0_INT_FRAME
22870@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22871 pushl_cfi $do_invalid_op
22872 jmp error_code
22873 CFI_ENDPROC
22874-END(invalid_op)
22875+ENDPROC(invalid_op)
22876
22877 ENTRY(coprocessor_segment_overrun)
22878 RING0_INT_FRAME
22879@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22880 pushl_cfi $do_coprocessor_segment_overrun
22881 jmp error_code
22882 CFI_ENDPROC
22883-END(coprocessor_segment_overrun)
22884+ENDPROC(coprocessor_segment_overrun)
22885
22886 ENTRY(invalid_TSS)
22887 RING0_EC_FRAME
22888@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22889 pushl_cfi $do_invalid_TSS
22890 jmp error_code
22891 CFI_ENDPROC
22892-END(invalid_TSS)
22893+ENDPROC(invalid_TSS)
22894
22895 ENTRY(segment_not_present)
22896 RING0_EC_FRAME
22897@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22898 pushl_cfi $do_segment_not_present
22899 jmp error_code
22900 CFI_ENDPROC
22901-END(segment_not_present)
22902+ENDPROC(segment_not_present)
22903
22904 ENTRY(stack_segment)
22905 RING0_EC_FRAME
22906@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22907 pushl_cfi $do_stack_segment
22908 jmp error_code
22909 CFI_ENDPROC
22910-END(stack_segment)
22911+ENDPROC(stack_segment)
22912
22913 ENTRY(alignment_check)
22914 RING0_EC_FRAME
22915@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22916 pushl_cfi $do_alignment_check
22917 jmp error_code
22918 CFI_ENDPROC
22919-END(alignment_check)
22920+ENDPROC(alignment_check)
22921
22922 ENTRY(divide_error)
22923 RING0_INT_FRAME
22924@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22925 pushl_cfi $do_divide_error
22926 jmp error_code
22927 CFI_ENDPROC
22928-END(divide_error)
22929+ENDPROC(divide_error)
22930
22931 #ifdef CONFIG_X86_MCE
22932 ENTRY(machine_check)
22933@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22934 pushl_cfi machine_check_vector
22935 jmp error_code
22936 CFI_ENDPROC
22937-END(machine_check)
22938+ENDPROC(machine_check)
22939 #endif
22940
22941 ENTRY(spurious_interrupt_bug)
22942@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22943 pushl_cfi $do_spurious_interrupt_bug
22944 jmp error_code
22945 CFI_ENDPROC
22946-END(spurious_interrupt_bug)
22947+ENDPROC(spurious_interrupt_bug)
22948
22949 #ifdef CONFIG_XEN
22950 /* Xen doesn't set %esp to be precisely what the normal sysenter
22951@@ -1054,7 +1294,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22952
22953 ENTRY(mcount)
22954 ret
22955-END(mcount)
22956+ENDPROC(mcount)
22957
22958 ENTRY(ftrace_caller)
22959 pushl %eax
22960@@ -1084,7 +1324,7 @@ ftrace_graph_call:
22961 .globl ftrace_stub
22962 ftrace_stub:
22963 ret
22964-END(ftrace_caller)
22965+ENDPROC(ftrace_caller)
22966
22967 ENTRY(ftrace_regs_caller)
22968 pushf /* push flags before compare (in cs location) */
22969@@ -1182,7 +1422,7 @@ trace:
22970 popl %ecx
22971 popl %eax
22972 jmp ftrace_stub
22973-END(mcount)
22974+ENDPROC(mcount)
22975 #endif /* CONFIG_DYNAMIC_FTRACE */
22976 #endif /* CONFIG_FUNCTION_TRACER */
22977
22978@@ -1200,7 +1440,7 @@ ENTRY(ftrace_graph_caller)
22979 popl %ecx
22980 popl %eax
22981 ret
22982-END(ftrace_graph_caller)
22983+ENDPROC(ftrace_graph_caller)
22984
22985 .globl return_to_handler
22986 return_to_handler:
22987@@ -1261,15 +1501,18 @@ error_code:
22988 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22989 REG_TO_PTGS %ecx
22990 SET_KERNEL_GS %ecx
22991- movl $(__USER_DS), %ecx
22992+ movl $(__KERNEL_DS), %ecx
22993 movl %ecx, %ds
22994 movl %ecx, %es
22995+
22996+ pax_enter_kernel
22997+
22998 TRACE_IRQS_OFF
22999 movl %esp,%eax # pt_regs pointer
23000 call *%edi
23001 jmp ret_from_exception
23002 CFI_ENDPROC
23003-END(page_fault)
23004+ENDPROC(page_fault)
23005
23006 /*
23007 * Debug traps and NMI can happen at the one SYSENTER instruction
23008@@ -1312,7 +1555,7 @@ debug_stack_correct:
23009 call do_debug
23010 jmp ret_from_exception
23011 CFI_ENDPROC
23012-END(debug)
23013+ENDPROC(debug)
23014
23015 /*
23016 * NMI is doubly nasty. It can happen _while_ we're handling
23017@@ -1352,6 +1595,9 @@ nmi_stack_correct:
23018 xorl %edx,%edx # zero error code
23019 movl %esp,%eax # pt_regs pointer
23020 call do_nmi
23021+
23022+ pax_exit_kernel
23023+
23024 jmp restore_all_notrace
23025 CFI_ENDPROC
23026
23027@@ -1389,13 +1635,16 @@ nmi_espfix_stack:
23028 FIXUP_ESPFIX_STACK # %eax == %esp
23029 xorl %edx,%edx # zero error code
23030 call do_nmi
23031+
23032+ pax_exit_kernel
23033+
23034 RESTORE_REGS
23035 lss 12+4(%esp), %esp # back to espfix stack
23036 CFI_ADJUST_CFA_OFFSET -24
23037 jmp irq_return
23038 #endif
23039 CFI_ENDPROC
23040-END(nmi)
23041+ENDPROC(nmi)
23042
23043 ENTRY(int3)
23044 RING0_INT_FRAME
23045@@ -1408,14 +1657,14 @@ ENTRY(int3)
23046 call do_int3
23047 jmp ret_from_exception
23048 CFI_ENDPROC
23049-END(int3)
23050+ENDPROC(int3)
23051
23052 ENTRY(general_protection)
23053 RING0_EC_FRAME
23054 pushl_cfi $do_general_protection
23055 jmp error_code
23056 CFI_ENDPROC
23057-END(general_protection)
23058+ENDPROC(general_protection)
23059
23060 #ifdef CONFIG_KVM_GUEST
23061 ENTRY(async_page_fault)
23062@@ -1424,6 +1673,6 @@ ENTRY(async_page_fault)
23063 pushl_cfi $do_async_page_fault
23064 jmp error_code
23065 CFI_ENDPROC
23066-END(async_page_fault)
23067+ENDPROC(async_page_fault)
23068 #endif
23069
23070diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23071index 4ee9a23..c786610 100644
23072--- a/arch/x86/kernel/entry_64.S
23073+++ b/arch/x86/kernel/entry_64.S
23074@@ -59,6 +59,8 @@
23075 #include <asm/smap.h>
23076 #include <asm/pgtable_types.h>
23077 #include <linux/err.h>
23078+#include <asm/pgtable.h>
23079+#include <asm/alternative-asm.h>
23080
23081 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23082 #include <linux/elf-em.h>
23083@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23084 ENDPROC(native_usergs_sysret64)
23085 #endif /* CONFIG_PARAVIRT */
23086
23087+ .macro ljmpq sel, off
23088+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23089+ .byte 0x48; ljmp *1234f(%rip)
23090+ .pushsection .rodata
23091+ .align 16
23092+ 1234: .quad \off; .word \sel
23093+ .popsection
23094+#else
23095+ pushq $\sel
23096+ pushq $\off
23097+ lretq
23098+#endif
23099+ .endm
23100+
23101+ .macro pax_enter_kernel
23102+ pax_set_fptr_mask
23103+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23104+ call pax_enter_kernel
23105+#endif
23106+ .endm
23107+
23108+ .macro pax_exit_kernel
23109+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23110+ call pax_exit_kernel
23111+#endif
23112+
23113+ .endm
23114+
23115+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23116+ENTRY(pax_enter_kernel)
23117+ pushq %rdi
23118+
23119+#ifdef CONFIG_PARAVIRT
23120+ PV_SAVE_REGS(CLBR_RDI)
23121+#endif
23122+
23123+#ifdef CONFIG_PAX_KERNEXEC
23124+ GET_CR0_INTO_RDI
23125+ bts $16,%rdi
23126+ jnc 3f
23127+ mov %cs,%edi
23128+ cmp $__KERNEL_CS,%edi
23129+ jnz 2f
23130+1:
23131+#endif
23132+
23133+#ifdef CONFIG_PAX_MEMORY_UDEREF
23134+ 661: jmp 111f
23135+ .pushsection .altinstr_replacement, "a"
23136+ 662: ASM_NOP2
23137+ .popsection
23138+ .pushsection .altinstructions, "a"
23139+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23140+ .popsection
23141+ GET_CR3_INTO_RDI
23142+ cmp $0,%dil
23143+ jnz 112f
23144+ mov $__KERNEL_DS,%edi
23145+ mov %edi,%ss
23146+ jmp 111f
23147+112: cmp $1,%dil
23148+ jz 113f
23149+ ud2
23150+113: sub $4097,%rdi
23151+ bts $63,%rdi
23152+ SET_RDI_INTO_CR3
23153+ mov $__UDEREF_KERNEL_DS,%edi
23154+ mov %edi,%ss
23155+111:
23156+#endif
23157+
23158+#ifdef CONFIG_PARAVIRT
23159+ PV_RESTORE_REGS(CLBR_RDI)
23160+#endif
23161+
23162+ popq %rdi
23163+ pax_force_retaddr
23164+ retq
23165+
23166+#ifdef CONFIG_PAX_KERNEXEC
23167+2: ljmpq __KERNEL_CS,1b
23168+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23169+4: SET_RDI_INTO_CR0
23170+ jmp 1b
23171+#endif
23172+ENDPROC(pax_enter_kernel)
23173+
23174+ENTRY(pax_exit_kernel)
23175+ pushq %rdi
23176+
23177+#ifdef CONFIG_PARAVIRT
23178+ PV_SAVE_REGS(CLBR_RDI)
23179+#endif
23180+
23181+#ifdef CONFIG_PAX_KERNEXEC
23182+ mov %cs,%rdi
23183+ cmp $__KERNEXEC_KERNEL_CS,%edi
23184+ jz 2f
23185+ GET_CR0_INTO_RDI
23186+ bts $16,%rdi
23187+ jnc 4f
23188+1:
23189+#endif
23190+
23191+#ifdef CONFIG_PAX_MEMORY_UDEREF
23192+ 661: jmp 111f
23193+ .pushsection .altinstr_replacement, "a"
23194+ 662: ASM_NOP2
23195+ .popsection
23196+ .pushsection .altinstructions, "a"
23197+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23198+ .popsection
23199+ mov %ss,%edi
23200+ cmp $__UDEREF_KERNEL_DS,%edi
23201+ jnz 111f
23202+ GET_CR3_INTO_RDI
23203+ cmp $0,%dil
23204+ jz 112f
23205+ ud2
23206+112: add $4097,%rdi
23207+ bts $63,%rdi
23208+ SET_RDI_INTO_CR3
23209+ mov $__KERNEL_DS,%edi
23210+ mov %edi,%ss
23211+111:
23212+#endif
23213+
23214+#ifdef CONFIG_PARAVIRT
23215+ PV_RESTORE_REGS(CLBR_RDI);
23216+#endif
23217+
23218+ popq %rdi
23219+ pax_force_retaddr
23220+ retq
23221+
23222+#ifdef CONFIG_PAX_KERNEXEC
23223+2: GET_CR0_INTO_RDI
23224+ btr $16,%rdi
23225+ jnc 4f
23226+ ljmpq __KERNEL_CS,3f
23227+3: SET_RDI_INTO_CR0
23228+ jmp 1b
23229+4: ud2
23230+ jmp 4b
23231+#endif
23232+ENDPROC(pax_exit_kernel)
23233+#endif
23234+
23235+ .macro pax_enter_kernel_user
23236+ pax_set_fptr_mask
23237+#ifdef CONFIG_PAX_MEMORY_UDEREF
23238+ call pax_enter_kernel_user
23239+#endif
23240+ .endm
23241+
23242+ .macro pax_exit_kernel_user
23243+#ifdef CONFIG_PAX_MEMORY_UDEREF
23244+ call pax_exit_kernel_user
23245+#endif
23246+#ifdef CONFIG_PAX_RANDKSTACK
23247+ pushq %rax
23248+ pushq %r11
23249+ call pax_randomize_kstack
23250+ popq %r11
23251+ popq %rax
23252+#endif
23253+ .endm
23254+
23255+#ifdef CONFIG_PAX_MEMORY_UDEREF
23256+ENTRY(pax_enter_kernel_user)
23257+ pushq %rdi
23258+ pushq %rbx
23259+
23260+#ifdef CONFIG_PARAVIRT
23261+ PV_SAVE_REGS(CLBR_RDI)
23262+#endif
23263+
23264+ 661: jmp 111f
23265+ .pushsection .altinstr_replacement, "a"
23266+ 662: ASM_NOP2
23267+ .popsection
23268+ .pushsection .altinstructions, "a"
23269+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23270+ .popsection
23271+ GET_CR3_INTO_RDI
23272+ cmp $1,%dil
23273+ jnz 4f
23274+ sub $4097,%rdi
23275+ bts $63,%rdi
23276+ SET_RDI_INTO_CR3
23277+ jmp 3f
23278+111:
23279+
23280+ GET_CR3_INTO_RDI
23281+ mov %rdi,%rbx
23282+ add $__START_KERNEL_map,%rbx
23283+ sub phys_base(%rip),%rbx
23284+
23285+#ifdef CONFIG_PARAVIRT
23286+ cmpl $0, pv_info+PARAVIRT_enabled
23287+ jz 1f
23288+ pushq %rdi
23289+ i = 0
23290+ .rept USER_PGD_PTRS
23291+ mov i*8(%rbx),%rsi
23292+ mov $0,%sil
23293+ lea i*8(%rbx),%rdi
23294+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23295+ i = i + 1
23296+ .endr
23297+ popq %rdi
23298+ jmp 2f
23299+1:
23300+#endif
23301+
23302+ i = 0
23303+ .rept USER_PGD_PTRS
23304+ movb $0,i*8(%rbx)
23305+ i = i + 1
23306+ .endr
23307+
23308+2: SET_RDI_INTO_CR3
23309+
23310+#ifdef CONFIG_PAX_KERNEXEC
23311+ GET_CR0_INTO_RDI
23312+ bts $16,%rdi
23313+ SET_RDI_INTO_CR0
23314+#endif
23315+
23316+3:
23317+
23318+#ifdef CONFIG_PARAVIRT
23319+ PV_RESTORE_REGS(CLBR_RDI)
23320+#endif
23321+
23322+ popq %rbx
23323+ popq %rdi
23324+ pax_force_retaddr
23325+ retq
23326+4: ud2
23327+ENDPROC(pax_enter_kernel_user)
23328+
23329+ENTRY(pax_exit_kernel_user)
23330+ pushq %rdi
23331+ pushq %rbx
23332+
23333+#ifdef CONFIG_PARAVIRT
23334+ PV_SAVE_REGS(CLBR_RDI)
23335+#endif
23336+
23337+ GET_CR3_INTO_RDI
23338+ 661: jmp 1f
23339+ .pushsection .altinstr_replacement, "a"
23340+ 662: ASM_NOP2
23341+ .popsection
23342+ .pushsection .altinstructions, "a"
23343+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23344+ .popsection
23345+ cmp $0,%dil
23346+ jnz 3f
23347+ add $4097,%rdi
23348+ bts $63,%rdi
23349+ SET_RDI_INTO_CR3
23350+ jmp 2f
23351+1:
23352+
23353+ mov %rdi,%rbx
23354+
23355+#ifdef CONFIG_PAX_KERNEXEC
23356+ GET_CR0_INTO_RDI
23357+ btr $16,%rdi
23358+ jnc 3f
23359+ SET_RDI_INTO_CR0
23360+#endif
23361+
23362+ add $__START_KERNEL_map,%rbx
23363+ sub phys_base(%rip),%rbx
23364+
23365+#ifdef CONFIG_PARAVIRT
23366+ cmpl $0, pv_info+PARAVIRT_enabled
23367+ jz 1f
23368+ i = 0
23369+ .rept USER_PGD_PTRS
23370+ mov i*8(%rbx),%rsi
23371+ mov $0x67,%sil
23372+ lea i*8(%rbx),%rdi
23373+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23374+ i = i + 1
23375+ .endr
23376+ jmp 2f
23377+1:
23378+#endif
23379+
23380+ i = 0
23381+ .rept USER_PGD_PTRS
23382+ movb $0x67,i*8(%rbx)
23383+ i = i + 1
23384+ .endr
23385+2:
23386+
23387+#ifdef CONFIG_PARAVIRT
23388+ PV_RESTORE_REGS(CLBR_RDI)
23389+#endif
23390+
23391+ popq %rbx
23392+ popq %rdi
23393+ pax_force_retaddr
23394+ retq
23395+3: ud2
23396+ENDPROC(pax_exit_kernel_user)
23397+#endif
23398+
23399+ .macro pax_enter_kernel_nmi
23400+ pax_set_fptr_mask
23401+
23402+#ifdef CONFIG_PAX_KERNEXEC
23403+ GET_CR0_INTO_RDI
23404+ bts $16,%rdi
23405+ jc 110f
23406+ SET_RDI_INTO_CR0
23407+ or $2,%ebx
23408+110:
23409+#endif
23410+
23411+#ifdef CONFIG_PAX_MEMORY_UDEREF
23412+ 661: jmp 111f
23413+ .pushsection .altinstr_replacement, "a"
23414+ 662: ASM_NOP2
23415+ .popsection
23416+ .pushsection .altinstructions, "a"
23417+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23418+ .popsection
23419+ GET_CR3_INTO_RDI
23420+ cmp $0,%dil
23421+ jz 111f
23422+ sub $4097,%rdi
23423+ or $4,%ebx
23424+ bts $63,%rdi
23425+ SET_RDI_INTO_CR3
23426+ mov $__UDEREF_KERNEL_DS,%edi
23427+ mov %edi,%ss
23428+111:
23429+#endif
23430+ .endm
23431+
23432+ .macro pax_exit_kernel_nmi
23433+#ifdef CONFIG_PAX_KERNEXEC
23434+ btr $1,%ebx
23435+ jnc 110f
23436+ GET_CR0_INTO_RDI
23437+ btr $16,%rdi
23438+ SET_RDI_INTO_CR0
23439+110:
23440+#endif
23441+
23442+#ifdef CONFIG_PAX_MEMORY_UDEREF
23443+ btr $2,%ebx
23444+ jnc 111f
23445+ GET_CR3_INTO_RDI
23446+ add $4097,%rdi
23447+ bts $63,%rdi
23448+ SET_RDI_INTO_CR3
23449+ mov $__KERNEL_DS,%edi
23450+ mov %edi,%ss
23451+111:
23452+#endif
23453+ .endm
23454+
23455+ .macro pax_erase_kstack
23456+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23457+ call pax_erase_kstack
23458+#endif
23459+ .endm
23460+
23461+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23462+ENTRY(pax_erase_kstack)
23463+ pushq %rdi
23464+ pushq %rcx
23465+ pushq %rax
23466+ pushq %r11
23467+
23468+ GET_THREAD_INFO(%r11)
23469+ mov TI_lowest_stack(%r11), %rdi
23470+ mov $-0xBEEF, %rax
23471+ std
23472+
23473+1: mov %edi, %ecx
23474+ and $THREAD_SIZE_asm - 1, %ecx
23475+ shr $3, %ecx
23476+ repne scasq
23477+ jecxz 2f
23478+
23479+ cmp $2*8, %ecx
23480+ jc 2f
23481+
23482+ mov $2*8, %ecx
23483+ repe scasq
23484+ jecxz 2f
23485+ jne 1b
23486+
23487+2: cld
23488+ or $2*8, %rdi
23489+ mov %esp, %ecx
23490+ sub %edi, %ecx
23491+
23492+ cmp $THREAD_SIZE_asm, %rcx
23493+ jb 3f
23494+ ud2
23495+3:
23496+
23497+ shr $3, %ecx
23498+ rep stosq
23499+
23500+ mov TI_task_thread_sp0(%r11), %rdi
23501+ sub $256, %rdi
23502+ mov %rdi, TI_lowest_stack(%r11)
23503+
23504+ popq %r11
23505+ popq %rax
23506+ popq %rcx
23507+ popq %rdi
23508+ pax_force_retaddr
23509+ ret
23510+ENDPROC(pax_erase_kstack)
23511+#endif
23512
23513 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23514 #ifdef CONFIG_TRACE_IRQFLAGS
23515@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23516 .endm
23517
23518 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23519- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23520+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23521 jnc 1f
23522 TRACE_IRQS_ON_DEBUG
23523 1:
23524@@ -155,27 +582,6 @@ ENDPROC(native_usergs_sysret64)
23525 movq \tmp,R11+\offset(%rsp)
23526 .endm
23527
23528- .macro FAKE_STACK_FRAME child_rip
23529- /* push in order ss, rsp, eflags, cs, rip */
23530- xorl %eax, %eax
23531- pushq_cfi $__KERNEL_DS /* ss */
23532- /*CFI_REL_OFFSET ss,0*/
23533- pushq_cfi %rax /* rsp */
23534- CFI_REL_OFFSET rsp,0
23535- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23536- /*CFI_REL_OFFSET rflags,0*/
23537- pushq_cfi $__KERNEL_CS /* cs */
23538- /*CFI_REL_OFFSET cs,0*/
23539- pushq_cfi \child_rip /* rip */
23540- CFI_REL_OFFSET rip,0
23541- pushq_cfi %rax /* orig rax */
23542- .endm
23543-
23544- .macro UNFAKE_STACK_FRAME
23545- addq $8*6, %rsp
23546- CFI_ADJUST_CFA_OFFSET -(6*8)
23547- .endm
23548-
23549 /*
23550 * initial frame state for interrupts (and exceptions without error code)
23551 */
23552@@ -241,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23553 /* save partial stack frame */
23554 .macro SAVE_ARGS_IRQ
23555 cld
23556- /* start from rbp in pt_regs and jump over */
23557- movq_cfi rdi, (RDI-RBP)
23558- movq_cfi rsi, (RSI-RBP)
23559- movq_cfi rdx, (RDX-RBP)
23560- movq_cfi rcx, (RCX-RBP)
23561- movq_cfi rax, (RAX-RBP)
23562- movq_cfi r8, (R8-RBP)
23563- movq_cfi r9, (R9-RBP)
23564- movq_cfi r10, (R10-RBP)
23565- movq_cfi r11, (R11-RBP)
23566+ /* start from r15 in pt_regs and jump over */
23567+ movq_cfi rdi, RDI
23568+ movq_cfi rsi, RSI
23569+ movq_cfi rdx, RDX
23570+ movq_cfi rcx, RCX
23571+ movq_cfi rax, RAX
23572+ movq_cfi r8, R8
23573+ movq_cfi r9, R9
23574+ movq_cfi r10, R10
23575+ movq_cfi r11, R11
23576+ movq_cfi r12, R12
23577
23578 /* Save rbp so that we can unwind from get_irq_regs() */
23579- movq_cfi rbp, 0
23580+ movq_cfi rbp, RBP
23581
23582 /* Save previous stack value */
23583 movq %rsp, %rsi
23584
23585- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23586- testl $3, CS-RBP(%rsi)
23587+ movq %rsp,%rdi /* arg1 for handler */
23588+ testb $3, CS(%rsi)
23589 je 1f
23590 SWAPGS
23591 /*
23592@@ -279,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23593 0x06 /* DW_OP_deref */, \
23594 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23595 0x22 /* DW_OP_plus */
23596+
23597+#ifdef CONFIG_PAX_MEMORY_UDEREF
23598+ testb $3, CS(%rdi)
23599+ jnz 1f
23600+ pax_enter_kernel
23601+ jmp 2f
23602+1: pax_enter_kernel_user
23603+2:
23604+#else
23605+ pax_enter_kernel
23606+#endif
23607+
23608 /* We entered an interrupt context - irqs are off: */
23609 TRACE_IRQS_OFF
23610 .endm
23611@@ -308,9 +727,52 @@ ENTRY(save_paranoid)
23612 js 1f /* negative -> in kernel */
23613 SWAPGS
23614 xorl %ebx,%ebx
23615-1: ret
23616+1:
23617+#ifdef CONFIG_PAX_MEMORY_UDEREF
23618+ testb $3, CS+8(%rsp)
23619+ jnz 1f
23620+ pax_enter_kernel
23621+ jmp 2f
23622+1: pax_enter_kernel_user
23623+2:
23624+#else
23625+ pax_enter_kernel
23626+#endif
23627+ pax_force_retaddr
23628+ ret
23629 CFI_ENDPROC
23630-END(save_paranoid)
23631+ENDPROC(save_paranoid)
23632+
23633+ENTRY(save_paranoid_nmi)
23634+ XCPT_FRAME 1 RDI+8
23635+ cld
23636+ movq_cfi rdi, RDI+8
23637+ movq_cfi rsi, RSI+8
23638+ movq_cfi rdx, RDX+8
23639+ movq_cfi rcx, RCX+8
23640+ movq_cfi rax, RAX+8
23641+ movq_cfi r8, R8+8
23642+ movq_cfi r9, R9+8
23643+ movq_cfi r10, R10+8
23644+ movq_cfi r11, R11+8
23645+ movq_cfi rbx, RBX+8
23646+ movq_cfi rbp, RBP+8
23647+ movq_cfi r12, R12+8
23648+ movq_cfi r13, R13+8
23649+ movq_cfi r14, R14+8
23650+ movq_cfi r15, R15+8
23651+ movl $1,%ebx
23652+ movl $MSR_GS_BASE,%ecx
23653+ rdmsr
23654+ testl %edx,%edx
23655+ js 1f /* negative -> in kernel */
23656+ SWAPGS
23657+ xorl %ebx,%ebx
23658+1: pax_enter_kernel_nmi
23659+ pax_force_retaddr
23660+ ret
23661+ CFI_ENDPROC
23662+ENDPROC(save_paranoid_nmi)
23663
23664 /*
23665 * A newly forked process directly context switches into this address.
23666@@ -331,7 +793,7 @@ ENTRY(ret_from_fork)
23667
23668 RESTORE_REST
23669
23670- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23671+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23672 jz 1f
23673
23674 /*
23675@@ -344,15 +806,13 @@ ENTRY(ret_from_fork)
23676 jmp int_ret_from_sys_call
23677
23678 1:
23679- subq $REST_SKIP, %rsp # leave space for volatiles
23680- CFI_ADJUST_CFA_OFFSET REST_SKIP
23681 movq %rbp, %rdi
23682 call *%rbx
23683 movl $0, RAX(%rsp)
23684 RESTORE_REST
23685 jmp int_ret_from_sys_call
23686 CFI_ENDPROC
23687-END(ret_from_fork)
23688+ENDPROC(ret_from_fork)
23689
23690 /*
23691 * System call entry. Up to 6 arguments in registers are supported.
23692@@ -389,7 +849,7 @@ END(ret_from_fork)
23693 ENTRY(system_call)
23694 CFI_STARTPROC simple
23695 CFI_SIGNAL_FRAME
23696- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23697+ CFI_DEF_CFA rsp,0
23698 CFI_REGISTER rip,rcx
23699 /*CFI_REGISTER rflags,r11*/
23700 SWAPGS_UNSAFE_STACK
23701@@ -402,16 +862,23 @@ GLOBAL(system_call_after_swapgs)
23702
23703 movq %rsp,PER_CPU_VAR(old_rsp)
23704 movq PER_CPU_VAR(kernel_stack),%rsp
23705+ SAVE_ARGS 8*6, 0, rax_enosys=1
23706+ pax_enter_kernel_user
23707+
23708+#ifdef CONFIG_PAX_RANDKSTACK
23709+ pax_erase_kstack
23710+#endif
23711+
23712 /*
23713 * No need to follow this irqs off/on section - it's straight
23714 * and short:
23715 */
23716 ENABLE_INTERRUPTS(CLBR_NONE)
23717- SAVE_ARGS 8, 0, rax_enosys=1
23718 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23719 movq %rcx,RIP-ARGOFFSET(%rsp)
23720 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23721- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23722+ GET_THREAD_INFO(%rcx)
23723+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23724 jnz tracesys
23725 system_call_fastpath:
23726 #if __SYSCALL_MASK == ~0
23727@@ -435,10 +902,13 @@ sysret_check:
23728 LOCKDEP_SYS_EXIT
23729 DISABLE_INTERRUPTS(CLBR_NONE)
23730 TRACE_IRQS_OFF
23731- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23732+ GET_THREAD_INFO(%rcx)
23733+ movl TI_flags(%rcx),%edx
23734 andl %edi,%edx
23735 jnz sysret_careful
23736 CFI_REMEMBER_STATE
23737+ pax_exit_kernel_user
23738+ pax_erase_kstack
23739 /*
23740 * sysretq will re-enable interrupts:
23741 */
23742@@ -497,12 +967,15 @@ sysret_audit:
23743
23744 /* Do syscall tracing */
23745 tracesys:
23746- leaq -REST_SKIP(%rsp), %rdi
23747+ movq %rsp, %rdi
23748 movq $AUDIT_ARCH_X86_64, %rsi
23749 call syscall_trace_enter_phase1
23750 test %rax, %rax
23751 jnz tracesys_phase2 /* if needed, run the slow path */
23752- LOAD_ARGS 0 /* else restore clobbered regs */
23753+
23754+ pax_erase_kstack
23755+
23756+ LOAD_ARGS /* else restore clobbered regs */
23757 jmp system_call_fastpath /* and return to the fast path */
23758
23759 tracesys_phase2:
23760@@ -513,12 +986,14 @@ tracesys_phase2:
23761 movq %rax,%rdx
23762 call syscall_trace_enter_phase2
23763
23764+ pax_erase_kstack
23765+
23766 /*
23767 * Reload arg registers from stack in case ptrace changed them.
23768 * We don't reload %rax because syscall_trace_entry_phase2() returned
23769 * the value it wants us to use in the table lookup.
23770 */
23771- LOAD_ARGS ARGOFFSET, 1
23772+ LOAD_ARGS 1
23773 RESTORE_REST
23774 #if __SYSCALL_MASK == ~0
23775 cmpq $__NR_syscall_max,%rax
23776@@ -548,7 +1023,9 @@ GLOBAL(int_with_check)
23777 andl %edi,%edx
23778 jnz int_careful
23779 andl $~TS_COMPAT,TI_status(%rcx)
23780- jmp retint_swapgs
23781+ pax_exit_kernel_user
23782+ pax_erase_kstack
23783+ jmp retint_swapgs_pax
23784
23785 /* Either reschedule or signal or syscall exit tracking needed. */
23786 /* First do a reschedule test. */
23787@@ -594,7 +1071,7 @@ int_restore_rest:
23788 TRACE_IRQS_OFF
23789 jmp int_with_check
23790 CFI_ENDPROC
23791-END(system_call)
23792+ENDPROC(system_call)
23793
23794 .macro FORK_LIKE func
23795 ENTRY(stub_\func)
23796@@ -607,9 +1084,10 @@ ENTRY(stub_\func)
23797 DEFAULT_FRAME 0 8 /* offset 8: return address */
23798 call sys_\func
23799 RESTORE_TOP_OF_STACK %r11, 8
23800- ret $REST_SKIP /* pop extended registers */
23801+ pax_force_retaddr
23802+ ret
23803 CFI_ENDPROC
23804-END(stub_\func)
23805+ENDPROC(stub_\func)
23806 .endm
23807
23808 .macro FIXED_FRAME label,func
23809@@ -619,9 +1097,10 @@ ENTRY(\label)
23810 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23811 call \func
23812 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23813+ pax_force_retaddr
23814 ret
23815 CFI_ENDPROC
23816-END(\label)
23817+ENDPROC(\label)
23818 .endm
23819
23820 FORK_LIKE clone
23821@@ -629,19 +1108,6 @@ END(\label)
23822 FORK_LIKE vfork
23823 FIXED_FRAME stub_iopl, sys_iopl
23824
23825-ENTRY(ptregscall_common)
23826- DEFAULT_FRAME 1 8 /* offset 8: return address */
23827- RESTORE_TOP_OF_STACK %r11, 8
23828- movq_cfi_restore R15+8, r15
23829- movq_cfi_restore R14+8, r14
23830- movq_cfi_restore R13+8, r13
23831- movq_cfi_restore R12+8, r12
23832- movq_cfi_restore RBP+8, rbp
23833- movq_cfi_restore RBX+8, rbx
23834- ret $REST_SKIP /* pop extended registers */
23835- CFI_ENDPROC
23836-END(ptregscall_common)
23837-
23838 ENTRY(stub_execve)
23839 CFI_STARTPROC
23840 addq $8, %rsp
23841@@ -653,7 +1119,7 @@ ENTRY(stub_execve)
23842 RESTORE_REST
23843 jmp int_ret_from_sys_call
23844 CFI_ENDPROC
23845-END(stub_execve)
23846+ENDPROC(stub_execve)
23847
23848 ENTRY(stub_execveat)
23849 CFI_STARTPROC
23850@@ -667,7 +1133,7 @@ ENTRY(stub_execveat)
23851 RESTORE_REST
23852 jmp int_ret_from_sys_call
23853 CFI_ENDPROC
23854-END(stub_execveat)
23855+ENDPROC(stub_execveat)
23856
23857 /*
23858 * sigreturn is special because it needs to restore all registers on return.
23859@@ -684,7 +1150,7 @@ ENTRY(stub_rt_sigreturn)
23860 RESTORE_REST
23861 jmp int_ret_from_sys_call
23862 CFI_ENDPROC
23863-END(stub_rt_sigreturn)
23864+ENDPROC(stub_rt_sigreturn)
23865
23866 #ifdef CONFIG_X86_X32_ABI
23867 ENTRY(stub_x32_rt_sigreturn)
23868@@ -698,7 +1164,7 @@ ENTRY(stub_x32_rt_sigreturn)
23869 RESTORE_REST
23870 jmp int_ret_from_sys_call
23871 CFI_ENDPROC
23872-END(stub_x32_rt_sigreturn)
23873+ENDPROC(stub_x32_rt_sigreturn)
23874
23875 ENTRY(stub_x32_execve)
23876 CFI_STARTPROC
23877@@ -763,7 +1229,7 @@ vector=vector+1
23878 2: jmp common_interrupt
23879 .endr
23880 CFI_ENDPROC
23881-END(irq_entries_start)
23882+ENDPROC(irq_entries_start)
23883
23884 .previous
23885 END(interrupt)
23886@@ -780,8 +1246,8 @@ END(interrupt)
23887 /* 0(%rsp): ~(interrupt number) */
23888 .macro interrupt func
23889 /* reserve pt_regs for scratch regs and rbp */
23890- subq $ORIG_RAX-RBP, %rsp
23891- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23892+ subq $ORIG_RAX, %rsp
23893+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23894 SAVE_ARGS_IRQ
23895 call \func
23896 .endm
23897@@ -804,14 +1270,14 @@ ret_from_intr:
23898
23899 /* Restore saved previous stack */
23900 popq %rsi
23901- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23902- leaq ARGOFFSET-RBP(%rsi), %rsp
23903+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23904+ movq %rsi, %rsp
23905 CFI_DEF_CFA_REGISTER rsp
23906- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23907+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23908
23909 exit_intr:
23910 GET_THREAD_INFO(%rcx)
23911- testl $3,CS-ARGOFFSET(%rsp)
23912+ testb $3,CS-ARGOFFSET(%rsp)
23913 je retint_kernel
23914
23915 /* Interrupt came from user space */
23916@@ -833,12 +1299,35 @@ retint_swapgs: /* return to user-space */
23917 * The iretq could re-enable interrupts:
23918 */
23919 DISABLE_INTERRUPTS(CLBR_ANY)
23920+ pax_exit_kernel_user
23921+retint_swapgs_pax:
23922 TRACE_IRQS_IRETQ
23923 SWAPGS
23924 jmp restore_args
23925
23926 retint_restore_args: /* return to kernel space */
23927 DISABLE_INTERRUPTS(CLBR_ANY)
23928+ pax_exit_kernel
23929+
23930+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23931+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23932+ * namely calling EFI runtime services with a phys mapping. We're
23933+ * starting off with NOPs and patch in the real instrumentation
23934+ * (BTS/OR) before starting any userland process; even before starting
23935+ * up the APs.
23936+ */
23937+ .pushsection .altinstr_replacement, "a"
23938+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23939+ 602:
23940+ .popsection
23941+ 603: .fill 602b-601b, 1, 0x90
23942+ .pushsection .altinstructions, "a"
23943+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23944+ .popsection
23945+#else
23946+ pax_force_retaddr (RIP-ARGOFFSET)
23947+#endif
23948+
23949 /*
23950 * The iretq could re-enable interrupts:
23951 */
23952@@ -876,15 +1365,15 @@ native_irq_return_ldt:
23953 SWAPGS
23954 movq PER_CPU_VAR(espfix_waddr),%rdi
23955 movq %rax,(0*8)(%rdi) /* RAX */
23956- movq (2*8)(%rsp),%rax /* RIP */
23957+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23958 movq %rax,(1*8)(%rdi)
23959- movq (3*8)(%rsp),%rax /* CS */
23960+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23961 movq %rax,(2*8)(%rdi)
23962- movq (4*8)(%rsp),%rax /* RFLAGS */
23963+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23964 movq %rax,(3*8)(%rdi)
23965- movq (6*8)(%rsp),%rax /* SS */
23966+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23967 movq %rax,(5*8)(%rdi)
23968- movq (5*8)(%rsp),%rax /* RSP */
23969+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23970 movq %rax,(4*8)(%rdi)
23971 andl $0xffff0000,%eax
23972 popq_cfi %rdi
23973@@ -938,7 +1427,7 @@ ENTRY(retint_kernel)
23974 jmp exit_intr
23975 #endif
23976 CFI_ENDPROC
23977-END(common_interrupt)
23978+ENDPROC(common_interrupt)
23979
23980 /*
23981 * APIC interrupts.
23982@@ -952,7 +1441,7 @@ ENTRY(\sym)
23983 interrupt \do_sym
23984 jmp ret_from_intr
23985 CFI_ENDPROC
23986-END(\sym)
23987+ENDPROC(\sym)
23988 .endm
23989
23990 #ifdef CONFIG_TRACING
23991@@ -1025,7 +1514,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23992 /*
23993 * Exception entry points.
23994 */
23995-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23996+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23997
23998 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
23999 ENTRY(\sym)
24000@@ -1076,6 +1565,12 @@ ENTRY(\sym)
24001 .endif
24002
24003 .if \shift_ist != -1
24004+#ifdef CONFIG_SMP
24005+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24006+ lea init_tss(%r13), %r13
24007+#else
24008+ lea init_tss(%rip), %r13
24009+#endif
24010 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24011 .endif
24012
24013@@ -1092,7 +1587,7 @@ ENTRY(\sym)
24014 .endif
24015
24016 CFI_ENDPROC
24017-END(\sym)
24018+ENDPROC(\sym)
24019 .endm
24020
24021 #ifdef CONFIG_TRACING
24022@@ -1133,9 +1628,10 @@ gs_change:
24023 2: mfence /* workaround */
24024 SWAPGS
24025 popfq_cfi
24026+ pax_force_retaddr
24027 ret
24028 CFI_ENDPROC
24029-END(native_load_gs_index)
24030+ENDPROC(native_load_gs_index)
24031
24032 _ASM_EXTABLE(gs_change,bad_gs)
24033 .section .fixup,"ax"
24034@@ -1163,9 +1659,10 @@ ENTRY(do_softirq_own_stack)
24035 CFI_DEF_CFA_REGISTER rsp
24036 CFI_ADJUST_CFA_OFFSET -8
24037 decl PER_CPU_VAR(irq_count)
24038+ pax_force_retaddr
24039 ret
24040 CFI_ENDPROC
24041-END(do_softirq_own_stack)
24042+ENDPROC(do_softirq_own_stack)
24043
24044 #ifdef CONFIG_XEN
24045 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24046@@ -1203,7 +1700,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24047 decl PER_CPU_VAR(irq_count)
24048 jmp error_exit
24049 CFI_ENDPROC
24050-END(xen_do_hypervisor_callback)
24051+ENDPROC(xen_do_hypervisor_callback)
24052
24053 /*
24054 * Hypervisor uses this for application faults while it executes.
24055@@ -1262,7 +1759,7 @@ ENTRY(xen_failsafe_callback)
24056 SAVE_ALL
24057 jmp error_exit
24058 CFI_ENDPROC
24059-END(xen_failsafe_callback)
24060+ENDPROC(xen_failsafe_callback)
24061
24062 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24063 xen_hvm_callback_vector xen_evtchn_do_upcall
24064@@ -1309,18 +1806,33 @@ ENTRY(paranoid_exit)
24065 DEFAULT_FRAME
24066 DISABLE_INTERRUPTS(CLBR_NONE)
24067 TRACE_IRQS_OFF_DEBUG
24068- testl %ebx,%ebx /* swapgs needed? */
24069+ testl $1,%ebx /* swapgs needed? */
24070 jnz paranoid_restore
24071- testl $3,CS(%rsp)
24072+ testb $3,CS(%rsp)
24073 jnz paranoid_userspace
24074+#ifdef CONFIG_PAX_MEMORY_UDEREF
24075+ pax_exit_kernel
24076+ TRACE_IRQS_IRETQ 0
24077+ SWAPGS_UNSAFE_STACK
24078+ RESTORE_ALL 8
24079+ pax_force_retaddr_bts
24080+ jmp irq_return
24081+#endif
24082 paranoid_swapgs:
24083+#ifdef CONFIG_PAX_MEMORY_UDEREF
24084+ pax_exit_kernel_user
24085+#else
24086+ pax_exit_kernel
24087+#endif
24088 TRACE_IRQS_IRETQ 0
24089 SWAPGS_UNSAFE_STACK
24090 RESTORE_ALL 8
24091 jmp irq_return
24092 paranoid_restore:
24093+ pax_exit_kernel
24094 TRACE_IRQS_IRETQ_DEBUG 0
24095 RESTORE_ALL 8
24096+ pax_force_retaddr_bts
24097 jmp irq_return
24098 paranoid_userspace:
24099 GET_THREAD_INFO(%rcx)
24100@@ -1349,7 +1861,7 @@ paranoid_schedule:
24101 TRACE_IRQS_OFF
24102 jmp paranoid_userspace
24103 CFI_ENDPROC
24104-END(paranoid_exit)
24105+ENDPROC(paranoid_exit)
24106
24107 /*
24108 * Exception entry point. This expects an error code/orig_rax on the stack.
24109@@ -1376,12 +1888,23 @@ ENTRY(error_entry)
24110 movq %r14, R14+8(%rsp)
24111 movq %r15, R15+8(%rsp)
24112 xorl %ebx,%ebx
24113- testl $3,CS+8(%rsp)
24114+ testb $3,CS+8(%rsp)
24115 je error_kernelspace
24116 error_swapgs:
24117 SWAPGS
24118 error_sti:
24119+#ifdef CONFIG_PAX_MEMORY_UDEREF
24120+ testb $3, CS+8(%rsp)
24121+ jnz 1f
24122+ pax_enter_kernel
24123+ jmp 2f
24124+1: pax_enter_kernel_user
24125+2:
24126+#else
24127+ pax_enter_kernel
24128+#endif
24129 TRACE_IRQS_OFF
24130+ pax_force_retaddr
24131 ret
24132
24133 /*
24134@@ -1416,7 +1939,7 @@ error_bad_iret:
24135 decl %ebx /* Return to usergs */
24136 jmp error_sti
24137 CFI_ENDPROC
24138-END(error_entry)
24139+ENDPROC(error_entry)
24140
24141
24142 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24143@@ -1427,7 +1950,7 @@ ENTRY(error_exit)
24144 DISABLE_INTERRUPTS(CLBR_NONE)
24145 TRACE_IRQS_OFF
24146 GET_THREAD_INFO(%rcx)
24147- testl %eax,%eax
24148+ testl $1,%eax
24149 jne retint_kernel
24150 LOCKDEP_SYS_EXIT_IRQ
24151 movl TI_flags(%rcx),%edx
24152@@ -1436,7 +1959,7 @@ ENTRY(error_exit)
24153 jnz retint_careful
24154 jmp retint_swapgs
24155 CFI_ENDPROC
24156-END(error_exit)
24157+ENDPROC(error_exit)
24158
24159 /*
24160 * Test if a given stack is an NMI stack or not.
24161@@ -1494,9 +2017,11 @@ ENTRY(nmi)
24162 * If %cs was not the kernel segment, then the NMI triggered in user
24163 * space, which means it is definitely not nested.
24164 */
24165+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24166+ je 1f
24167 cmpl $__KERNEL_CS, 16(%rsp)
24168 jne first_nmi
24169-
24170+1:
24171 /*
24172 * Check the special variable on the stack to see if NMIs are
24173 * executing.
24174@@ -1530,8 +2055,7 @@ nested_nmi:
24175
24176 1:
24177 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24178- leaq -1*8(%rsp), %rdx
24179- movq %rdx, %rsp
24180+ subq $8, %rsp
24181 CFI_ADJUST_CFA_OFFSET 1*8
24182 leaq -10*8(%rsp), %rdx
24183 pushq_cfi $__KERNEL_DS
24184@@ -1549,6 +2073,7 @@ nested_nmi_out:
24185 CFI_RESTORE rdx
24186
24187 /* No need to check faults here */
24188+# pax_force_retaddr_bts
24189 INTERRUPT_RETURN
24190
24191 CFI_RESTORE_STATE
24192@@ -1645,13 +2170,13 @@ end_repeat_nmi:
24193 subq $ORIG_RAX-R15, %rsp
24194 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24195 /*
24196- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24197+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24198 * as we should not be calling schedule in NMI context.
24199 * Even with normal interrupts enabled. An NMI should not be
24200 * setting NEED_RESCHED or anything that normal interrupts and
24201 * exceptions might do.
24202 */
24203- call save_paranoid
24204+ call save_paranoid_nmi
24205 DEFAULT_FRAME 0
24206
24207 /*
24208@@ -1661,9 +2186,9 @@ end_repeat_nmi:
24209 * NMI itself takes a page fault, the page fault that was preempted
24210 * will read the information from the NMI page fault and not the
24211 * origin fault. Save it off and restore it if it changes.
24212- * Use the r12 callee-saved register.
24213+ * Use the r13 callee-saved register.
24214 */
24215- movq %cr2, %r12
24216+ movq %cr2, %r13
24217
24218 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24219 movq %rsp,%rdi
24220@@ -1672,29 +2197,34 @@ end_repeat_nmi:
24221
24222 /* Did the NMI take a page fault? Restore cr2 if it did */
24223 movq %cr2, %rcx
24224- cmpq %rcx, %r12
24225+ cmpq %rcx, %r13
24226 je 1f
24227- movq %r12, %cr2
24228+ movq %r13, %cr2
24229 1:
24230
24231- testl %ebx,%ebx /* swapgs needed? */
24232+ testl $1,%ebx /* swapgs needed? */
24233 jnz nmi_restore
24234 nmi_swapgs:
24235 SWAPGS_UNSAFE_STACK
24236 nmi_restore:
24237+ pax_exit_kernel_nmi
24238 /* Pop the extra iret frame at once */
24239 RESTORE_ALL 6*8
24240+ testb $3, 8(%rsp)
24241+ jnz 1f
24242+ pax_force_retaddr_bts
24243+1:
24244
24245 /* Clear the NMI executing stack variable */
24246 movq $0, 5*8(%rsp)
24247 jmp irq_return
24248 CFI_ENDPROC
24249-END(nmi)
24250+ENDPROC(nmi)
24251
24252 ENTRY(ignore_sysret)
24253 CFI_STARTPROC
24254 mov $-ENOSYS,%eax
24255 sysret
24256 CFI_ENDPROC
24257-END(ignore_sysret)
24258+ENDPROC(ignore_sysret)
24259
24260diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24261index f5d0730..5bce89c 100644
24262--- a/arch/x86/kernel/espfix_64.c
24263+++ b/arch/x86/kernel/espfix_64.c
24264@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24265 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24266 static void *espfix_pages[ESPFIX_MAX_PAGES];
24267
24268-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24269- __aligned(PAGE_SIZE);
24270+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24271
24272 static unsigned int page_random, slot_random;
24273
24274@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24275 void __init init_espfix_bsp(void)
24276 {
24277 pgd_t *pgd_p;
24278+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24279
24280 /* Install the espfix pud into the kernel page directory */
24281- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24282+ pgd_p = &init_level4_pgt[index];
24283 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24284
24285+#ifdef CONFIG_PAX_PER_CPU_PGD
24286+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24287+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24288+#endif
24289+
24290 /* Randomize the locations */
24291 init_espfix_random();
24292
24293@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24294 set_pte(&pte_p[n*PTE_STRIDE], pte);
24295
24296 /* Job is done for this CPU and any CPU which shares this page */
24297- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24298+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24299
24300 unlock_done:
24301 mutex_unlock(&espfix_init_mutex);
24302diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24303index 8b7b0a5..2395f29 100644
24304--- a/arch/x86/kernel/ftrace.c
24305+++ b/arch/x86/kernel/ftrace.c
24306@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24307 * kernel identity mapping to modify code.
24308 */
24309 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24310- ip = (unsigned long)__va(__pa_symbol(ip));
24311+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24312
24313 return ip;
24314 }
24315@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24316 {
24317 unsigned char replaced[MCOUNT_INSN_SIZE];
24318
24319+ ip = ktla_ktva(ip);
24320+
24321 /*
24322 * Note: Due to modules and __init, code can
24323 * disappear and change, we need to protect against faulting
24324@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24325 unsigned char old[MCOUNT_INSN_SIZE];
24326 int ret;
24327
24328- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24329+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24330
24331 ftrace_update_func = ip;
24332 /* Make sure the breakpoints see the ftrace_update_func update */
24333@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24334 unsigned char replaced[MCOUNT_INSN_SIZE];
24335 unsigned char brk = BREAKPOINT_INSTRUCTION;
24336
24337- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24338+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24339 return -EFAULT;
24340
24341 /* Make sure it is what we expect it to be */
24342diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24343index eda1a86..8f6df48 100644
24344--- a/arch/x86/kernel/head64.c
24345+++ b/arch/x86/kernel/head64.c
24346@@ -67,12 +67,12 @@ again:
24347 pgd = *pgd_p;
24348
24349 /*
24350- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24351- * critical -- __PAGE_OFFSET would point us back into the dynamic
24352+ * The use of __early_va rather than __va here is critical:
24353+ * __va would point us back into the dynamic
24354 * range and we might end up looping forever...
24355 */
24356 if (pgd)
24357- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24358+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24359 else {
24360 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24361 reset_early_page_tables();
24362@@ -82,13 +82,13 @@ again:
24363 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24364 for (i = 0; i < PTRS_PER_PUD; i++)
24365 pud_p[i] = 0;
24366- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24367+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24368 }
24369 pud_p += pud_index(address);
24370 pud = *pud_p;
24371
24372 if (pud)
24373- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24374+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24375 else {
24376 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24377 reset_early_page_tables();
24378@@ -98,7 +98,7 @@ again:
24379 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24380 for (i = 0; i < PTRS_PER_PMD; i++)
24381 pmd_p[i] = 0;
24382- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24383+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24384 }
24385 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24386 pmd_p[pmd_index(address)] = pmd;
24387@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24388 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24389 early_printk("Kernel alive\n");
24390
24391- clear_page(init_level4_pgt);
24392 /* set init_level4_pgt kernel high mapping*/
24393 init_level4_pgt[511] = early_level4_pgt[511];
24394
24395diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24396index f36bd42..0ab4474 100644
24397--- a/arch/x86/kernel/head_32.S
24398+++ b/arch/x86/kernel/head_32.S
24399@@ -26,6 +26,12 @@
24400 /* Physical address */
24401 #define pa(X) ((X) - __PAGE_OFFSET)
24402
24403+#ifdef CONFIG_PAX_KERNEXEC
24404+#define ta(X) (X)
24405+#else
24406+#define ta(X) ((X) - __PAGE_OFFSET)
24407+#endif
24408+
24409 /*
24410 * References to members of the new_cpu_data structure.
24411 */
24412@@ -55,11 +61,7 @@
24413 * and small than max_low_pfn, otherwise will waste some page table entries
24414 */
24415
24416-#if PTRS_PER_PMD > 1
24417-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24418-#else
24419-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24420-#endif
24421+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24422
24423 /* Number of possible pages in the lowmem region */
24424 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24425@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24426 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24427
24428 /*
24429+ * Real beginning of normal "text" segment
24430+ */
24431+ENTRY(stext)
24432+ENTRY(_stext)
24433+
24434+/*
24435 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24436 * %esi points to the real-mode code as a 32-bit pointer.
24437 * CS and DS must be 4 GB flat segments, but we don't depend on
24438@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24439 * can.
24440 */
24441 __HEAD
24442+
24443+#ifdef CONFIG_PAX_KERNEXEC
24444+ jmp startup_32
24445+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24446+.fill PAGE_SIZE-5,1,0xcc
24447+#endif
24448+
24449 ENTRY(startup_32)
24450 movl pa(stack_start),%ecx
24451
24452@@ -106,6 +121,59 @@ ENTRY(startup_32)
24453 2:
24454 leal -__PAGE_OFFSET(%ecx),%esp
24455
24456+#ifdef CONFIG_SMP
24457+ movl $pa(cpu_gdt_table),%edi
24458+ movl $__per_cpu_load,%eax
24459+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24460+ rorl $16,%eax
24461+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24462+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24463+ movl $__per_cpu_end - 1,%eax
24464+ subl $__per_cpu_start,%eax
24465+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24466+#endif
24467+
24468+#ifdef CONFIG_PAX_MEMORY_UDEREF
24469+ movl $NR_CPUS,%ecx
24470+ movl $pa(cpu_gdt_table),%edi
24471+1:
24472+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24473+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24474+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24475+ addl $PAGE_SIZE_asm,%edi
24476+ loop 1b
24477+#endif
24478+
24479+#ifdef CONFIG_PAX_KERNEXEC
24480+ movl $pa(boot_gdt),%edi
24481+ movl $__LOAD_PHYSICAL_ADDR,%eax
24482+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24483+ rorl $16,%eax
24484+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24485+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24486+ rorl $16,%eax
24487+
24488+ ljmp $(__BOOT_CS),$1f
24489+1:
24490+
24491+ movl $NR_CPUS,%ecx
24492+ movl $pa(cpu_gdt_table),%edi
24493+ addl $__PAGE_OFFSET,%eax
24494+1:
24495+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24496+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24497+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24498+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24499+ rorl $16,%eax
24500+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24501+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24502+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24503+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24504+ rorl $16,%eax
24505+ addl $PAGE_SIZE_asm,%edi
24506+ loop 1b
24507+#endif
24508+
24509 /*
24510 * Clear BSS first so that there are no surprises...
24511 */
24512@@ -201,8 +269,11 @@ ENTRY(startup_32)
24513 movl %eax, pa(max_pfn_mapped)
24514
24515 /* Do early initialization of the fixmap area */
24516- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24517- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24518+#ifdef CONFIG_COMPAT_VDSO
24519+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24520+#else
24521+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24522+#endif
24523 #else /* Not PAE */
24524
24525 page_pde_offset = (__PAGE_OFFSET >> 20);
24526@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24527 movl %eax, pa(max_pfn_mapped)
24528
24529 /* Do early initialization of the fixmap area */
24530- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24531- movl %eax,pa(initial_page_table+0xffc)
24532+#ifdef CONFIG_COMPAT_VDSO
24533+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24534+#else
24535+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24536+#endif
24537 #endif
24538
24539 #ifdef CONFIG_PARAVIRT
24540@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24541 cmpl $num_subarch_entries, %eax
24542 jae bad_subarch
24543
24544- movl pa(subarch_entries)(,%eax,4), %eax
24545- subl $__PAGE_OFFSET, %eax
24546- jmp *%eax
24547+ jmp *pa(subarch_entries)(,%eax,4)
24548
24549 bad_subarch:
24550 WEAK(lguest_entry)
24551@@ -261,10 +333,10 @@ WEAK(xen_entry)
24552 __INITDATA
24553
24554 subarch_entries:
24555- .long default_entry /* normal x86/PC */
24556- .long lguest_entry /* lguest hypervisor */
24557- .long xen_entry /* Xen hypervisor */
24558- .long default_entry /* Moorestown MID */
24559+ .long ta(default_entry) /* normal x86/PC */
24560+ .long ta(lguest_entry) /* lguest hypervisor */
24561+ .long ta(xen_entry) /* Xen hypervisor */
24562+ .long ta(default_entry) /* Moorestown MID */
24563 num_subarch_entries = (. - subarch_entries) / 4
24564 .previous
24565 #else
24566@@ -354,6 +426,7 @@ default_entry:
24567 movl pa(mmu_cr4_features),%eax
24568 movl %eax,%cr4
24569
24570+#ifdef CONFIG_X86_PAE
24571 testb $X86_CR4_PAE, %al # check if PAE is enabled
24572 jz enable_paging
24573
24574@@ -382,6 +455,9 @@ default_entry:
24575 /* Make changes effective */
24576 wrmsr
24577
24578+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24579+#endif
24580+
24581 enable_paging:
24582
24583 /*
24584@@ -449,14 +525,20 @@ is486:
24585 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24586 movl %eax,%ss # after changing gdt.
24587
24588- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24589+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24590 movl %eax,%ds
24591 movl %eax,%es
24592
24593 movl $(__KERNEL_PERCPU), %eax
24594 movl %eax,%fs # set this cpu's percpu
24595
24596+#ifdef CONFIG_CC_STACKPROTECTOR
24597 movl $(__KERNEL_STACK_CANARY),%eax
24598+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24599+ movl $(__USER_DS),%eax
24600+#else
24601+ xorl %eax,%eax
24602+#endif
24603 movl %eax,%gs
24604
24605 xorl %eax,%eax # Clear LDT
24606@@ -512,8 +594,11 @@ setup_once:
24607 * relocation. Manually set base address in stack canary
24608 * segment descriptor.
24609 */
24610- movl $gdt_page,%eax
24611+ movl $cpu_gdt_table,%eax
24612 movl $stack_canary,%ecx
24613+#ifdef CONFIG_SMP
24614+ addl $__per_cpu_load,%ecx
24615+#endif
24616 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24617 shrl $16, %ecx
24618 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24619@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24620 cmpl $2,(%esp) # X86_TRAP_NMI
24621 je is_nmi # Ignore NMI
24622
24623- cmpl $2,%ss:early_recursion_flag
24624+ cmpl $1,%ss:early_recursion_flag
24625 je hlt_loop
24626 incl %ss:early_recursion_flag
24627
24628@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24629 pushl (20+6*4)(%esp) /* trapno */
24630 pushl $fault_msg
24631 call printk
24632-#endif
24633 call dump_stack
24634+#endif
24635 hlt_loop:
24636 hlt
24637 jmp hlt_loop
24638@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24639 /* This is the default interrupt "handler" :-) */
24640 ALIGN
24641 ignore_int:
24642- cld
24643 #ifdef CONFIG_PRINTK
24644+ cmpl $2,%ss:early_recursion_flag
24645+ je hlt_loop
24646+ incl %ss:early_recursion_flag
24647+ cld
24648 pushl %eax
24649 pushl %ecx
24650 pushl %edx
24651@@ -617,9 +705,6 @@ ignore_int:
24652 movl $(__KERNEL_DS),%eax
24653 movl %eax,%ds
24654 movl %eax,%es
24655- cmpl $2,early_recursion_flag
24656- je hlt_loop
24657- incl early_recursion_flag
24658 pushl 16(%esp)
24659 pushl 24(%esp)
24660 pushl 32(%esp)
24661@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24662 /*
24663 * BSS section
24664 */
24665-__PAGE_ALIGNED_BSS
24666- .align PAGE_SIZE
24667 #ifdef CONFIG_X86_PAE
24668+.section .initial_pg_pmd,"a",@progbits
24669 initial_pg_pmd:
24670 .fill 1024*KPMDS,4,0
24671 #else
24672+.section .initial_page_table,"a",@progbits
24673 ENTRY(initial_page_table)
24674 .fill 1024,4,0
24675 #endif
24676+.section .initial_pg_fixmap,"a",@progbits
24677 initial_pg_fixmap:
24678 .fill 1024,4,0
24679+.section .empty_zero_page,"a",@progbits
24680 ENTRY(empty_zero_page)
24681 .fill 4096,1,0
24682+.section .swapper_pg_dir,"a",@progbits
24683 ENTRY(swapper_pg_dir)
24684+#ifdef CONFIG_X86_PAE
24685+ .fill 4,8,0
24686+#else
24687 .fill 1024,4,0
24688+#endif
24689
24690 /*
24691 * This starts the data section.
24692 */
24693 #ifdef CONFIG_X86_PAE
24694-__PAGE_ALIGNED_DATA
24695- /* Page-aligned for the benefit of paravirt? */
24696- .align PAGE_SIZE
24697+.section .initial_page_table,"a",@progbits
24698 ENTRY(initial_page_table)
24699 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24700 # if KPMDS == 3
24701@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24702 # error "Kernel PMDs should be 1, 2 or 3"
24703 # endif
24704 .align PAGE_SIZE /* needs to be page-sized too */
24705+
24706+#ifdef CONFIG_PAX_PER_CPU_PGD
24707+ENTRY(cpu_pgd)
24708+ .rept 2*NR_CPUS
24709+ .fill 4,8,0
24710+ .endr
24711+#endif
24712+
24713 #endif
24714
24715 .data
24716 .balign 4
24717 ENTRY(stack_start)
24718- .long init_thread_union+THREAD_SIZE
24719+ .long init_thread_union+THREAD_SIZE-8
24720
24721 __INITRODATA
24722 int_msg:
24723@@ -727,7 +825,7 @@ fault_msg:
24724 * segment size, and 32-bit linear address value:
24725 */
24726
24727- .data
24728+.section .rodata,"a",@progbits
24729 .globl boot_gdt_descr
24730 .globl idt_descr
24731
24732@@ -736,7 +834,7 @@ fault_msg:
24733 .word 0 # 32 bit align gdt_desc.address
24734 boot_gdt_descr:
24735 .word __BOOT_DS+7
24736- .long boot_gdt - __PAGE_OFFSET
24737+ .long pa(boot_gdt)
24738
24739 .word 0 # 32-bit align idt_desc.address
24740 idt_descr:
24741@@ -747,7 +845,7 @@ idt_descr:
24742 .word 0 # 32 bit align gdt_desc.address
24743 ENTRY(early_gdt_descr)
24744 .word GDT_ENTRIES*8-1
24745- .long gdt_page /* Overwritten for secondary CPUs */
24746+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24747
24748 /*
24749 * The boot_gdt must mirror the equivalent in setup.S and is
24750@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24751 .align L1_CACHE_BYTES
24752 ENTRY(boot_gdt)
24753 .fill GDT_ENTRY_BOOT_CS,8,0
24754- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24755- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24756+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24757+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24758+
24759+ .align PAGE_SIZE_asm
24760+ENTRY(cpu_gdt_table)
24761+ .rept NR_CPUS
24762+ .quad 0x0000000000000000 /* NULL descriptor */
24763+ .quad 0x0000000000000000 /* 0x0b reserved */
24764+ .quad 0x0000000000000000 /* 0x13 reserved */
24765+ .quad 0x0000000000000000 /* 0x1b reserved */
24766+
24767+#ifdef CONFIG_PAX_KERNEXEC
24768+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24769+#else
24770+ .quad 0x0000000000000000 /* 0x20 unused */
24771+#endif
24772+
24773+ .quad 0x0000000000000000 /* 0x28 unused */
24774+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24775+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24776+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24777+ .quad 0x0000000000000000 /* 0x4b reserved */
24778+ .quad 0x0000000000000000 /* 0x53 reserved */
24779+ .quad 0x0000000000000000 /* 0x5b reserved */
24780+
24781+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24782+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24783+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24784+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24785+
24786+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24787+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24788+
24789+ /*
24790+ * Segments used for calling PnP BIOS have byte granularity.
24791+ * The code segments and data segments have fixed 64k limits,
24792+ * the transfer segment sizes are set at run time.
24793+ */
24794+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24795+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24796+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24797+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24798+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24799+
24800+ /*
24801+ * The APM segments have byte granularity and their bases
24802+ * are set at run time. All have 64k limits.
24803+ */
24804+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24805+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24806+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24807+
24808+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24809+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24810+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24811+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24812+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24813+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24814+
24815+ /* Be sure this is zeroed to avoid false validations in Xen */
24816+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24817+ .endr
24818diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24819index a468c0a..8b5a879 100644
24820--- a/arch/x86/kernel/head_64.S
24821+++ b/arch/x86/kernel/head_64.S
24822@@ -20,6 +20,8 @@
24823 #include <asm/processor-flags.h>
24824 #include <asm/percpu.h>
24825 #include <asm/nops.h>
24826+#include <asm/cpufeature.h>
24827+#include <asm/alternative-asm.h>
24828
24829 #ifdef CONFIG_PARAVIRT
24830 #include <asm/asm-offsets.h>
24831@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24832 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24833 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24834 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24835+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24836+L3_VMALLOC_START = pud_index(VMALLOC_START)
24837+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24838+L3_VMALLOC_END = pud_index(VMALLOC_END)
24839+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24840+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24841
24842 .text
24843 __HEAD
24844@@ -89,11 +97,24 @@ startup_64:
24845 * Fixup the physical addresses in the page table
24846 */
24847 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24848+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24849+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24850+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24851+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24852+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24853
24854- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24855- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24856+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24857+#ifndef CONFIG_XEN
24858+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24859+#endif
24860+
24861+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24862+
24863+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24864+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24865
24866 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24867+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24868
24869 /*
24870 * Set up the identity mapping for the switchover. These
24871@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
24872 * after the boot processor executes this code.
24873 */
24874
24875+ orq $-1, %rbp
24876 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24877 1:
24878
24879- /* Enable PAE mode and PGE */
24880- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24881+ /* Enable PAE mode and PSE/PGE */
24882+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24883 movq %rcx, %cr4
24884
24885 /* Setup early boot stage 4 level pagetables. */
24886@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
24887 movl $MSR_EFER, %ecx
24888 rdmsr
24889 btsl $_EFER_SCE, %eax /* Enable System Call */
24890- btl $20,%edi /* No Execute supported? */
24891+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24892 jnc 1f
24893 btsl $_EFER_NX, %eax
24894+ cmpq $-1, %rbp
24895+ je 1f
24896 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24897+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24898+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24899+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24900+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24901+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24902+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24903+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24904 1: wrmsr /* Make changes effective */
24905
24906 /* Setup cr0 */
24907@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
24908 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24909 * address given in m16:64.
24910 */
24911+ pax_set_fptr_mask
24912 movq initial_code(%rip),%rax
24913 pushq $0 # fake return address to stop unwinder
24914 pushq $__KERNEL_CS # set correct cs
24915@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
24916 .quad INIT_PER_CPU_VAR(irq_stack_union)
24917
24918 GLOBAL(stack_start)
24919- .quad init_thread_union+THREAD_SIZE-8
24920+ .quad init_thread_union+THREAD_SIZE-16
24921 .word 0
24922 __FINITDATA
24923
24924@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
24925 call dump_stack
24926 #ifdef CONFIG_KALLSYMS
24927 leaq early_idt_ripmsg(%rip),%rdi
24928- movq 40(%rsp),%rsi # %rip again
24929+ movq 88(%rsp),%rsi # %rip again
24930 call __print_symbol
24931 #endif
24932 #endif /* EARLY_PRINTK */
24933@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
24934 early_recursion_flag:
24935 .long 0
24936
24937+ .section .rodata,"a",@progbits
24938 #ifdef CONFIG_EARLY_PRINTK
24939 early_idt_msg:
24940 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24941@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
24942 NEXT_PAGE(early_dynamic_pgts)
24943 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24944
24945- .data
24946+ .section .rodata,"a",@progbits
24947
24948-#ifndef CONFIG_XEN
24949 NEXT_PAGE(init_level4_pgt)
24950- .fill 512,8,0
24951-#else
24952-NEXT_PAGE(init_level4_pgt)
24953- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24954 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24955 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24956+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24957+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24958+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24959+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24960+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24961+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24962 .org init_level4_pgt + L4_START_KERNEL*8, 0
24963 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24964 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24965
24966+#ifdef CONFIG_PAX_PER_CPU_PGD
24967+NEXT_PAGE(cpu_pgd)
24968+ .rept 2*NR_CPUS
24969+ .fill 512,8,0
24970+ .endr
24971+#endif
24972+
24973 NEXT_PAGE(level3_ident_pgt)
24974 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24975+#ifdef CONFIG_XEN
24976 .fill 511, 8, 0
24977+#else
24978+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24979+ .fill 510,8,0
24980+#endif
24981+
24982+NEXT_PAGE(level3_vmalloc_start_pgt)
24983+ .fill 512,8,0
24984+
24985+NEXT_PAGE(level3_vmalloc_end_pgt)
24986+ .fill 512,8,0
24987+
24988+NEXT_PAGE(level3_vmemmap_pgt)
24989+ .fill L3_VMEMMAP_START,8,0
24990+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24991+
24992 NEXT_PAGE(level2_ident_pgt)
24993- /* Since I easily can, map the first 1G.
24994+ /* Since I easily can, map the first 2G.
24995 * Don't set NX because code runs from these pages.
24996 */
24997- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24998-#endif
24999+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25000
25001 NEXT_PAGE(level3_kernel_pgt)
25002 .fill L3_START_KERNEL,8,0
25003@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25004 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25005 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25006
25007+NEXT_PAGE(level2_vmemmap_pgt)
25008+ .fill 512,8,0
25009+
25010 NEXT_PAGE(level2_kernel_pgt)
25011 /*
25012 * 512 MB kernel mapping. We spend a full page on this pagetable
25013@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25014 NEXT_PAGE(level2_fixmap_pgt)
25015 .fill 506,8,0
25016 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25017- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25018- .fill 5,8,0
25019+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25020+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25021+ .fill 4,8,0
25022
25023 NEXT_PAGE(level1_fixmap_pgt)
25024 .fill 512,8,0
25025
25026+NEXT_PAGE(level1_vsyscall_pgt)
25027+ .fill 512,8,0
25028+
25029 #undef PMDS
25030
25031- .data
25032+ .align PAGE_SIZE
25033+ENTRY(cpu_gdt_table)
25034+ .rept NR_CPUS
25035+ .quad 0x0000000000000000 /* NULL descriptor */
25036+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25037+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25038+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25039+ .quad 0x00cffb000000ffff /* __USER32_CS */
25040+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25041+ .quad 0x00affb000000ffff /* __USER_CS */
25042+
25043+#ifdef CONFIG_PAX_KERNEXEC
25044+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25045+#else
25046+ .quad 0x0 /* unused */
25047+#endif
25048+
25049+ .quad 0,0 /* TSS */
25050+ .quad 0,0 /* LDT */
25051+ .quad 0,0,0 /* three TLS descriptors */
25052+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25053+ /* asm/segment.h:GDT_ENTRIES must match this */
25054+
25055+#ifdef CONFIG_PAX_MEMORY_UDEREF
25056+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25057+#else
25058+ .quad 0x0 /* unused */
25059+#endif
25060+
25061+ /* zero the remaining page */
25062+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25063+ .endr
25064+
25065 .align 16
25066 .globl early_gdt_descr
25067 early_gdt_descr:
25068 .word GDT_ENTRIES*8-1
25069 early_gdt_descr_base:
25070- .quad INIT_PER_CPU_VAR(gdt_page)
25071+ .quad cpu_gdt_table
25072
25073 ENTRY(phys_base)
25074 /* This must match the first entry in level2_kernel_pgt */
25075 .quad 0x0000000000000000
25076
25077 #include "../../x86/xen/xen-head.S"
25078-
25079- __PAGE_ALIGNED_BSS
25080+
25081+ .section .rodata,"a",@progbits
25082 NEXT_PAGE(empty_zero_page)
25083 .skip PAGE_SIZE
25084diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25085index 05fd74f..c3548b1 100644
25086--- a/arch/x86/kernel/i386_ksyms_32.c
25087+++ b/arch/x86/kernel/i386_ksyms_32.c
25088@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25089 EXPORT_SYMBOL(cmpxchg8b_emu);
25090 #endif
25091
25092+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25093+
25094 /* Networking helper routines. */
25095 EXPORT_SYMBOL(csum_partial_copy_generic);
25096+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25097+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25098
25099 EXPORT_SYMBOL(__get_user_1);
25100 EXPORT_SYMBOL(__get_user_2);
25101@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25102 EXPORT_SYMBOL(___preempt_schedule_context);
25103 #endif
25104 #endif
25105+
25106+#ifdef CONFIG_PAX_KERNEXEC
25107+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25108+#endif
25109+
25110+#ifdef CONFIG_PAX_PER_CPU_PGD
25111+EXPORT_SYMBOL(cpu_pgd);
25112+#endif
25113diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25114index a9a4229..6f4d476 100644
25115--- a/arch/x86/kernel/i387.c
25116+++ b/arch/x86/kernel/i387.c
25117@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25118 static inline bool interrupted_user_mode(void)
25119 {
25120 struct pt_regs *regs = get_irq_regs();
25121- return regs && user_mode_vm(regs);
25122+ return regs && user_mode(regs);
25123 }
25124
25125 /*
25126diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25127index e7cc537..67d7372 100644
25128--- a/arch/x86/kernel/i8259.c
25129+++ b/arch/x86/kernel/i8259.c
25130@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25131 static void make_8259A_irq(unsigned int irq)
25132 {
25133 disable_irq_nosync(irq);
25134- io_apic_irqs &= ~(1<<irq);
25135+ io_apic_irqs &= ~(1UL<<irq);
25136 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25137 enable_irq(irq);
25138 }
25139@@ -208,7 +208,7 @@ spurious_8259A_irq:
25140 "spurious 8259A interrupt: IRQ%d.\n", irq);
25141 spurious_irq_mask |= irqmask;
25142 }
25143- atomic_inc(&irq_err_count);
25144+ atomic_inc_unchecked(&irq_err_count);
25145 /*
25146 * Theoretically we do not have to handle this IRQ,
25147 * but in Linux this does not cause problems and is
25148@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25149 /* (slave's support for AEOI in flat mode is to be investigated) */
25150 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25151
25152+ pax_open_kernel();
25153 if (auto_eoi)
25154 /*
25155 * In AEOI mode we just have to mask the interrupt
25156 * when acking.
25157 */
25158- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25159+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25160 else
25161- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25162+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25163+ pax_close_kernel();
25164
25165 udelay(100); /* wait for 8259A to initialize */
25166
25167diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25168index a979b5b..1d6db75 100644
25169--- a/arch/x86/kernel/io_delay.c
25170+++ b/arch/x86/kernel/io_delay.c
25171@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25172 * Quirk table for systems that misbehave (lock up, etc.) if port
25173 * 0x80 is used:
25174 */
25175-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25176+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25177 {
25178 .callback = dmi_io_delay_0xed_port,
25179 .ident = "Compaq Presario V6000",
25180diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25181index 4ddaf66..49d5c18 100644
25182--- a/arch/x86/kernel/ioport.c
25183+++ b/arch/x86/kernel/ioport.c
25184@@ -6,6 +6,7 @@
25185 #include <linux/sched.h>
25186 #include <linux/kernel.h>
25187 #include <linux/capability.h>
25188+#include <linux/security.h>
25189 #include <linux/errno.h>
25190 #include <linux/types.h>
25191 #include <linux/ioport.h>
25192@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25193 return -EINVAL;
25194 if (turn_on && !capable(CAP_SYS_RAWIO))
25195 return -EPERM;
25196+#ifdef CONFIG_GRKERNSEC_IO
25197+ if (turn_on && grsec_disable_privio) {
25198+ gr_handle_ioperm();
25199+ return -ENODEV;
25200+ }
25201+#endif
25202
25203 /*
25204 * If it's the first ioperm() call in this thread's lifetime, set the
25205@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25206 * because the ->io_bitmap_max value must match the bitmap
25207 * contents:
25208 */
25209- tss = &per_cpu(init_tss, get_cpu());
25210+ tss = init_tss + get_cpu();
25211
25212 if (turn_on)
25213 bitmap_clear(t->io_bitmap_ptr, from, num);
25214@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25215 if (level > old) {
25216 if (!capable(CAP_SYS_RAWIO))
25217 return -EPERM;
25218+#ifdef CONFIG_GRKERNSEC_IO
25219+ if (grsec_disable_privio) {
25220+ gr_handle_iopl();
25221+ return -ENODEV;
25222+ }
25223+#endif
25224 }
25225 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25226 t->iopl = level << 12;
25227diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25228index 705ef8d..8672c9d 100644
25229--- a/arch/x86/kernel/irq.c
25230+++ b/arch/x86/kernel/irq.c
25231@@ -22,7 +22,7 @@
25232 #define CREATE_TRACE_POINTS
25233 #include <asm/trace/irq_vectors.h>
25234
25235-atomic_t irq_err_count;
25236+atomic_unchecked_t irq_err_count;
25237
25238 /* Function pointer for generic interrupt vector handling */
25239 void (*x86_platform_ipi_callback)(void) = NULL;
25240@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25241 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25242 seq_puts(p, " Hypervisor callback interrupts\n");
25243 #endif
25244- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25245+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25246 #if defined(CONFIG_X86_IO_APIC)
25247- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25248+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25249 #endif
25250 return 0;
25251 }
25252@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25253
25254 u64 arch_irq_stat(void)
25255 {
25256- u64 sum = atomic_read(&irq_err_count);
25257+ u64 sum = atomic_read_unchecked(&irq_err_count);
25258 return sum;
25259 }
25260
25261diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25262index 63ce838..2ea3e06 100644
25263--- a/arch/x86/kernel/irq_32.c
25264+++ b/arch/x86/kernel/irq_32.c
25265@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25266
25267 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25268
25269+extern void gr_handle_kernel_exploit(void);
25270+
25271 int sysctl_panic_on_stackoverflow __read_mostly;
25272
25273 /* Debugging check for stack overflow: is there less than 1KB free? */
25274@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25275 __asm__ __volatile__("andl %%esp,%0" :
25276 "=r" (sp) : "0" (THREAD_SIZE - 1));
25277
25278- return sp < (sizeof(struct thread_info) + STACK_WARN);
25279+ return sp < STACK_WARN;
25280 }
25281
25282 static void print_stack_overflow(void)
25283 {
25284 printk(KERN_WARNING "low stack detected by irq handler\n");
25285 dump_stack();
25286+ gr_handle_kernel_exploit();
25287 if (sysctl_panic_on_stackoverflow)
25288 panic("low stack detected by irq handler - check messages\n");
25289 }
25290@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25291 static inline int
25292 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25293 {
25294- struct irq_stack *curstk, *irqstk;
25295+ struct irq_stack *irqstk;
25296 u32 *isp, *prev_esp, arg1, arg2;
25297
25298- curstk = (struct irq_stack *) current_stack();
25299 irqstk = __this_cpu_read(hardirq_stack);
25300
25301 /*
25302@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25303 * handler) we can't do that and just have to keep using the
25304 * current stack (which is the irq stack already after all)
25305 */
25306- if (unlikely(curstk == irqstk))
25307+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25308 return 0;
25309
25310- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25311+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25312
25313 /* Save the next esp at the bottom of the stack */
25314 prev_esp = (u32 *)irqstk;
25315 *prev_esp = current_stack_pointer;
25316
25317+#ifdef CONFIG_PAX_MEMORY_UDEREF
25318+ __set_fs(MAKE_MM_SEG(0));
25319+#endif
25320+
25321 if (unlikely(overflow))
25322 call_on_stack(print_stack_overflow, isp);
25323
25324@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25325 : "0" (irq), "1" (desc), "2" (isp),
25326 "D" (desc->handle_irq)
25327 : "memory", "cc", "ecx");
25328+
25329+#ifdef CONFIG_PAX_MEMORY_UDEREF
25330+ __set_fs(current_thread_info()->addr_limit);
25331+#endif
25332+
25333 return 1;
25334 }
25335
25336@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25337 */
25338 void irq_ctx_init(int cpu)
25339 {
25340- struct irq_stack *irqstk;
25341-
25342 if (per_cpu(hardirq_stack, cpu))
25343 return;
25344
25345- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25346- THREADINFO_GFP,
25347- THREAD_SIZE_ORDER));
25348- per_cpu(hardirq_stack, cpu) = irqstk;
25349-
25350- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25351- THREADINFO_GFP,
25352- THREAD_SIZE_ORDER));
25353- per_cpu(softirq_stack, cpu) = irqstk;
25354-
25355- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25356- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25357+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25358+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25359 }
25360
25361 void do_softirq_own_stack(void)
25362 {
25363- struct thread_info *curstk;
25364 struct irq_stack *irqstk;
25365 u32 *isp, *prev_esp;
25366
25367- curstk = current_stack();
25368 irqstk = __this_cpu_read(softirq_stack);
25369
25370 /* build the stack frame on the softirq stack */
25371@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25372 prev_esp = (u32 *)irqstk;
25373 *prev_esp = current_stack_pointer;
25374
25375+#ifdef CONFIG_PAX_MEMORY_UDEREF
25376+ __set_fs(MAKE_MM_SEG(0));
25377+#endif
25378+
25379 call_on_stack(__do_softirq, isp);
25380+
25381+#ifdef CONFIG_PAX_MEMORY_UDEREF
25382+ __set_fs(current_thread_info()->addr_limit);
25383+#endif
25384+
25385 }
25386
25387 bool handle_irq(unsigned irq, struct pt_regs *regs)
25388@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25389 if (unlikely(!desc))
25390 return false;
25391
25392- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25393+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25394 if (unlikely(overflow))
25395 print_stack_overflow();
25396 desc->handle_irq(irq, desc);
25397diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25398index e4b503d..824fce8 100644
25399--- a/arch/x86/kernel/irq_64.c
25400+++ b/arch/x86/kernel/irq_64.c
25401@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25402 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25403 EXPORT_PER_CPU_SYMBOL(irq_regs);
25404
25405+extern void gr_handle_kernel_exploit(void);
25406+
25407 int sysctl_panic_on_stackoverflow;
25408
25409 /*
25410@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25411 u64 estack_top, estack_bottom;
25412 u64 curbase = (u64)task_stack_page(current);
25413
25414- if (user_mode_vm(regs))
25415+ if (user_mode(regs))
25416 return;
25417
25418 if (regs->sp >= curbase + sizeof(struct thread_info) +
25419@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25420 irq_stack_top, irq_stack_bottom,
25421 estack_top, estack_bottom);
25422
25423+ gr_handle_kernel_exploit();
25424+
25425 if (sysctl_panic_on_stackoverflow)
25426 panic("low stack detected by irq handler - check messages\n");
25427 #endif
25428diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25429index 26d5a55..a01160a 100644
25430--- a/arch/x86/kernel/jump_label.c
25431+++ b/arch/x86/kernel/jump_label.c
25432@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25433 * Jump label is enabled for the first time.
25434 * So we expect a default_nop...
25435 */
25436- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25437+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25438 != 0))
25439 bug_at((void *)entry->code, __LINE__);
25440 } else {
25441@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25442 * ...otherwise expect an ideal_nop. Otherwise
25443 * something went horribly wrong.
25444 */
25445- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25446+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25447 != 0))
25448 bug_at((void *)entry->code, __LINE__);
25449 }
25450@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25451 * are converting the default nop to the ideal nop.
25452 */
25453 if (init) {
25454- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25455+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25456 bug_at((void *)entry->code, __LINE__);
25457 } else {
25458 code.jump = 0xe9;
25459 code.offset = entry->target -
25460 (entry->code + JUMP_LABEL_NOP_SIZE);
25461- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25462+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25463 bug_at((void *)entry->code, __LINE__);
25464 }
25465 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25466diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25467index 7ec1d5f..5a7d130 100644
25468--- a/arch/x86/kernel/kgdb.c
25469+++ b/arch/x86/kernel/kgdb.c
25470@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25471 #ifdef CONFIG_X86_32
25472 switch (regno) {
25473 case GDB_SS:
25474- if (!user_mode_vm(regs))
25475+ if (!user_mode(regs))
25476 *(unsigned long *)mem = __KERNEL_DS;
25477 break;
25478 case GDB_SP:
25479- if (!user_mode_vm(regs))
25480+ if (!user_mode(regs))
25481 *(unsigned long *)mem = kernel_stack_pointer(regs);
25482 break;
25483 case GDB_GS:
25484@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25485 bp->attr.bp_addr = breakinfo[breakno].addr;
25486 bp->attr.bp_len = breakinfo[breakno].len;
25487 bp->attr.bp_type = breakinfo[breakno].type;
25488- info->address = breakinfo[breakno].addr;
25489+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25490+ info->address = ktla_ktva(breakinfo[breakno].addr);
25491+ else
25492+ info->address = breakinfo[breakno].addr;
25493 info->len = breakinfo[breakno].len;
25494 info->type = breakinfo[breakno].type;
25495 val = arch_install_hw_breakpoint(bp);
25496@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25497 case 'k':
25498 /* clear the trace bit */
25499 linux_regs->flags &= ~X86_EFLAGS_TF;
25500- atomic_set(&kgdb_cpu_doing_single_step, -1);
25501+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25502
25503 /* set the trace bit if we're stepping */
25504 if (remcomInBuffer[0] == 's') {
25505 linux_regs->flags |= X86_EFLAGS_TF;
25506- atomic_set(&kgdb_cpu_doing_single_step,
25507+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25508 raw_smp_processor_id());
25509 }
25510
25511@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25512
25513 switch (cmd) {
25514 case DIE_DEBUG:
25515- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25516+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25517 if (user_mode(regs))
25518 return single_step_cont(regs, args);
25519 break;
25520@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25521 #endif /* CONFIG_DEBUG_RODATA */
25522
25523 bpt->type = BP_BREAKPOINT;
25524- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25525+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25526 BREAK_INSTR_SIZE);
25527 if (err)
25528 return err;
25529- err = probe_kernel_write((char *)bpt->bpt_addr,
25530+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25531 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25532 #ifdef CONFIG_DEBUG_RODATA
25533 if (!err)
25534@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25535 return -EBUSY;
25536 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25537 BREAK_INSTR_SIZE);
25538- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25539+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25540 if (err)
25541 return err;
25542 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25543@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25544 if (mutex_is_locked(&text_mutex))
25545 goto knl_write;
25546 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25547- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25548+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25549 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25550 goto knl_write;
25551 return err;
25552 knl_write:
25553 #endif /* CONFIG_DEBUG_RODATA */
25554- return probe_kernel_write((char *)bpt->bpt_addr,
25555+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25556 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25557 }
25558
25559diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25560index 98f654d..ac04352 100644
25561--- a/arch/x86/kernel/kprobes/core.c
25562+++ b/arch/x86/kernel/kprobes/core.c
25563@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25564 s32 raddr;
25565 } __packed *insn;
25566
25567- insn = (struct __arch_relative_insn *)from;
25568+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25569+
25570+ pax_open_kernel();
25571 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25572 insn->op = op;
25573+ pax_close_kernel();
25574 }
25575
25576 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25577@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25578 kprobe_opcode_t opcode;
25579 kprobe_opcode_t *orig_opcodes = opcodes;
25580
25581- if (search_exception_tables((unsigned long)opcodes))
25582+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25583 return 0; /* Page fault may occur on this address. */
25584
25585 retry:
25586@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25587 * for the first byte, we can recover the original instruction
25588 * from it and kp->opcode.
25589 */
25590- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25591+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25592 buf[0] = kp->opcode;
25593- return (unsigned long)buf;
25594+ return ktva_ktla((unsigned long)buf);
25595 }
25596
25597 /*
25598@@ -338,7 +341,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25599 /* Another subsystem puts a breakpoint, failed to recover */
25600 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25601 return 0;
25602+ pax_open_kernel();
25603 memcpy(dest, insn.kaddr, insn.length);
25604+ pax_close_kernel();
25605
25606 #ifdef CONFIG_X86_64
25607 if (insn_rip_relative(&insn)) {
25608@@ -365,7 +370,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25609 return 0;
25610 }
25611 disp = (u8 *) dest + insn_offset_displacement(&insn);
25612+ pax_open_kernel();
25613 *(s32 *) disp = (s32) newdisp;
25614+ pax_close_kernel();
25615 }
25616 #endif
25617 return insn.length;
25618@@ -507,7 +514,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25619 * nor set current_kprobe, because it doesn't use single
25620 * stepping.
25621 */
25622- regs->ip = (unsigned long)p->ainsn.insn;
25623+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25624 preempt_enable_no_resched();
25625 return;
25626 }
25627@@ -524,9 +531,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25628 regs->flags &= ~X86_EFLAGS_IF;
25629 /* single step inline if the instruction is an int3 */
25630 if (p->opcode == BREAKPOINT_INSTRUCTION)
25631- regs->ip = (unsigned long)p->addr;
25632+ regs->ip = ktla_ktva((unsigned long)p->addr);
25633 else
25634- regs->ip = (unsigned long)p->ainsn.insn;
25635+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25636 }
25637 NOKPROBE_SYMBOL(setup_singlestep);
25638
25639@@ -576,7 +583,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25640 struct kprobe *p;
25641 struct kprobe_ctlblk *kcb;
25642
25643- if (user_mode_vm(regs))
25644+ if (user_mode(regs))
25645 return 0;
25646
25647 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25648@@ -611,7 +618,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25649 setup_singlestep(p, regs, kcb, 0);
25650 return 1;
25651 }
25652- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25653+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25654 /*
25655 * The breakpoint instruction was removed right
25656 * after we hit it. Another cpu has removed
25657@@ -658,6 +665,9 @@ static void __used kretprobe_trampoline_holder(void)
25658 " movq %rax, 152(%rsp)\n"
25659 RESTORE_REGS_STRING
25660 " popfq\n"
25661+#ifdef KERNEXEC_PLUGIN
25662+ " btsq $63,(%rsp)\n"
25663+#endif
25664 #else
25665 " pushf\n"
25666 SAVE_REGS_STRING
25667@@ -798,7 +808,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25668 struct kprobe_ctlblk *kcb)
25669 {
25670 unsigned long *tos = stack_addr(regs);
25671- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25672+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25673 unsigned long orig_ip = (unsigned long)p->addr;
25674 kprobe_opcode_t *insn = p->ainsn.insn;
25675
25676@@ -981,7 +991,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25677 struct die_args *args = data;
25678 int ret = NOTIFY_DONE;
25679
25680- if (args->regs && user_mode_vm(args->regs))
25681+ if (args->regs && user_mode(args->regs))
25682 return ret;
25683
25684 if (val == DIE_GPF) {
25685diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25686index 7c523bb..01b051b 100644
25687--- a/arch/x86/kernel/kprobes/opt.c
25688+++ b/arch/x86/kernel/kprobes/opt.c
25689@@ -79,6 +79,7 @@ found:
25690 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25691 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25692 {
25693+ pax_open_kernel();
25694 #ifdef CONFIG_X86_64
25695 *addr++ = 0x48;
25696 *addr++ = 0xbf;
25697@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25698 *addr++ = 0xb8;
25699 #endif
25700 *(unsigned long *)addr = val;
25701+ pax_close_kernel();
25702 }
25703
25704 asm (
25705@@ -339,7 +341,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25706 * Verify if the address gap is in 2GB range, because this uses
25707 * a relative jump.
25708 */
25709- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25710+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25711 if (abs(rel) > 0x7fffffff) {
25712 __arch_remove_optimized_kprobe(op, 0);
25713 return -ERANGE;
25714@@ -356,16 +358,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25715 op->optinsn.size = ret;
25716
25717 /* Copy arch-dep-instance from template */
25718- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25719+ pax_open_kernel();
25720+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25721+ pax_close_kernel();
25722
25723 /* Set probe information */
25724 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25725
25726 /* Set probe function call */
25727- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25728+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25729
25730 /* Set returning jmp instruction at the tail of out-of-line buffer */
25731- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25732+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25733 (u8 *)op->kp.addr + op->optinsn.size);
25734
25735 flush_icache_range((unsigned long) buf,
25736@@ -390,7 +394,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25737 WARN_ON(kprobe_disabled(&op->kp));
25738
25739 /* Backup instructions which will be replaced by jump address */
25740- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25741+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25742 RELATIVE_ADDR_SIZE);
25743
25744 insn_buf[0] = RELATIVEJUMP_OPCODE;
25745@@ -438,7 +442,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25746 /* This kprobe is really able to run optimized path. */
25747 op = container_of(p, struct optimized_kprobe, kp);
25748 /* Detour through copied instructions */
25749- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25750+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25751 if (!reenter)
25752 reset_current_kprobe();
25753 preempt_enable_no_resched();
25754diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25755index c2bedae..25e7ab60 100644
25756--- a/arch/x86/kernel/ksysfs.c
25757+++ b/arch/x86/kernel/ksysfs.c
25758@@ -184,7 +184,7 @@ out:
25759
25760 static struct kobj_attribute type_attr = __ATTR_RO(type);
25761
25762-static struct bin_attribute data_attr = {
25763+static bin_attribute_no_const data_attr __read_only = {
25764 .attr = {
25765 .name = "data",
25766 .mode = S_IRUGO,
25767diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25768index c37886d..d851d32 100644
25769--- a/arch/x86/kernel/ldt.c
25770+++ b/arch/x86/kernel/ldt.c
25771@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25772 if (reload) {
25773 #ifdef CONFIG_SMP
25774 preempt_disable();
25775- load_LDT(pc);
25776+ load_LDT_nolock(pc);
25777 if (!cpumask_equal(mm_cpumask(current->mm),
25778 cpumask_of(smp_processor_id())))
25779 smp_call_function(flush_ldt, current->mm, 1);
25780 preempt_enable();
25781 #else
25782- load_LDT(pc);
25783+ load_LDT_nolock(pc);
25784 #endif
25785 }
25786 if (oldsize) {
25787@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25788 return err;
25789
25790 for (i = 0; i < old->size; i++)
25791- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25792+ write_ldt_entry(new->ldt, i, old->ldt + i);
25793 return 0;
25794 }
25795
25796@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25797 retval = copy_ldt(&mm->context, &old_mm->context);
25798 mutex_unlock(&old_mm->context.lock);
25799 }
25800+
25801+ if (tsk == current) {
25802+ mm->context.vdso = 0;
25803+
25804+#ifdef CONFIG_X86_32
25805+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25806+ mm->context.user_cs_base = 0UL;
25807+ mm->context.user_cs_limit = ~0UL;
25808+
25809+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25810+ cpus_clear(mm->context.cpu_user_cs_mask);
25811+#endif
25812+
25813+#endif
25814+#endif
25815+
25816+ }
25817+
25818 return retval;
25819 }
25820
25821@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25822 }
25823 }
25824
25825+#ifdef CONFIG_PAX_SEGMEXEC
25826+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25827+ error = -EINVAL;
25828+ goto out_unlock;
25829+ }
25830+#endif
25831+
25832 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25833 error = -EINVAL;
25834 goto out_unlock;
25835diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25836index 469b23d..5449cfe 100644
25837--- a/arch/x86/kernel/machine_kexec_32.c
25838+++ b/arch/x86/kernel/machine_kexec_32.c
25839@@ -26,7 +26,7 @@
25840 #include <asm/cacheflush.h>
25841 #include <asm/debugreg.h>
25842
25843-static void set_idt(void *newidt, __u16 limit)
25844+static void set_idt(struct desc_struct *newidt, __u16 limit)
25845 {
25846 struct desc_ptr curidt;
25847
25848@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25849 }
25850
25851
25852-static void set_gdt(void *newgdt, __u16 limit)
25853+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25854 {
25855 struct desc_ptr curgdt;
25856
25857@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25858 }
25859
25860 control_page = page_address(image->control_code_page);
25861- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25862+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25863
25864 relocate_kernel_ptr = control_page;
25865 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25866diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25867index 94ea120..4154cea 100644
25868--- a/arch/x86/kernel/mcount_64.S
25869+++ b/arch/x86/kernel/mcount_64.S
25870@@ -7,7 +7,7 @@
25871 #include <linux/linkage.h>
25872 #include <asm/ptrace.h>
25873 #include <asm/ftrace.h>
25874-
25875+#include <asm/alternative-asm.h>
25876
25877 .code64
25878 .section .entry.text, "ax"
25879@@ -148,8 +148,9 @@
25880 #ifdef CONFIG_DYNAMIC_FTRACE
25881
25882 ENTRY(function_hook)
25883+ pax_force_retaddr
25884 retq
25885-END(function_hook)
25886+ENDPROC(function_hook)
25887
25888 ENTRY(ftrace_caller)
25889 /* save_mcount_regs fills in first two parameters */
25890@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25891 #endif
25892
25893 GLOBAL(ftrace_stub)
25894+ pax_force_retaddr
25895 retq
25896-END(ftrace_caller)
25897+ENDPROC(ftrace_caller)
25898
25899 ENTRY(ftrace_regs_caller)
25900 /* Save the current flags before any operations that can change them */
25901@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25902
25903 jmp ftrace_return
25904
25905-END(ftrace_regs_caller)
25906+ENDPROC(ftrace_regs_caller)
25907
25908
25909 #else /* ! CONFIG_DYNAMIC_FTRACE */
25910@@ -272,18 +274,20 @@ fgraph_trace:
25911 #endif
25912
25913 GLOBAL(ftrace_stub)
25914+ pax_force_retaddr
25915 retq
25916
25917 trace:
25918 /* save_mcount_regs fills in first two parameters */
25919 save_mcount_regs
25920
25921+ pax_force_fptr ftrace_trace_function
25922 call *ftrace_trace_function
25923
25924 restore_mcount_regs
25925
25926 jmp fgraph_trace
25927-END(function_hook)
25928+ENDPROC(function_hook)
25929 #endif /* CONFIG_DYNAMIC_FTRACE */
25930 #endif /* CONFIG_FUNCTION_TRACER */
25931
25932@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25933
25934 restore_mcount_regs
25935
25936+ pax_force_retaddr
25937 retq
25938-END(ftrace_graph_caller)
25939+ENDPROC(ftrace_graph_caller)
25940
25941 GLOBAL(return_to_handler)
25942 subq $24, %rsp
25943@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
25944 movq 8(%rsp), %rdx
25945 movq (%rsp), %rax
25946 addq $24, %rsp
25947+ pax_force_fptr %rdi
25948 jmp *%rdi
25949+ENDPROC(return_to_handler)
25950 #endif
25951diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25952index e69f988..72902b7 100644
25953--- a/arch/x86/kernel/module.c
25954+++ b/arch/x86/kernel/module.c
25955@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
25956 }
25957 #endif
25958
25959-void *module_alloc(unsigned long size)
25960+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25961 {
25962- if (PAGE_ALIGN(size) > MODULES_LEN)
25963+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25964 return NULL;
25965 return __vmalloc_node_range(size, 1,
25966 MODULES_VADDR + get_module_load_offset(),
25967- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
25968- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
25969+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
25970+ prot, NUMA_NO_NODE,
25971 __builtin_return_address(0));
25972 }
25973
25974+void *module_alloc(unsigned long size)
25975+{
25976+
25977+#ifdef CONFIG_PAX_KERNEXEC
25978+ return __module_alloc(size, PAGE_KERNEL);
25979+#else
25980+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25981+#endif
25982+
25983+}
25984+
25985+#ifdef CONFIG_PAX_KERNEXEC
25986+#ifdef CONFIG_X86_32
25987+void *module_alloc_exec(unsigned long size)
25988+{
25989+ struct vm_struct *area;
25990+
25991+ if (size == 0)
25992+ return NULL;
25993+
25994+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25995+return area ? area->addr : NULL;
25996+}
25997+EXPORT_SYMBOL(module_alloc_exec);
25998+
25999+void module_memfree_exec(void *module_region)
26000+{
26001+ vunmap(module_region);
26002+}
26003+EXPORT_SYMBOL(module_memfree_exec);
26004+#else
26005+void module_memfree_exec(void *module_region)
26006+{
26007+ module_memfree(module_region);
26008+}
26009+EXPORT_SYMBOL(module_memfree_exec);
26010+
26011+void *module_alloc_exec(unsigned long size)
26012+{
26013+ return __module_alloc(size, PAGE_KERNEL_RX);
26014+}
26015+EXPORT_SYMBOL(module_alloc_exec);
26016+#endif
26017+#endif
26018+
26019 #ifdef CONFIG_X86_32
26020 int apply_relocate(Elf32_Shdr *sechdrs,
26021 const char *strtab,
26022@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26023 unsigned int i;
26024 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26025 Elf32_Sym *sym;
26026- uint32_t *location;
26027+ uint32_t *plocation, location;
26028
26029 DEBUGP("Applying relocate section %u to %u\n",
26030 relsec, sechdrs[relsec].sh_info);
26031 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26032 /* This is where to make the change */
26033- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26034- + rel[i].r_offset;
26035+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26036+ location = (uint32_t)plocation;
26037+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26038+ plocation = ktla_ktva((void *)plocation);
26039 /* This is the symbol it is referring to. Note that all
26040 undefined symbols have been resolved. */
26041 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26042@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26043 switch (ELF32_R_TYPE(rel[i].r_info)) {
26044 case R_386_32:
26045 /* We add the value into the location given */
26046- *location += sym->st_value;
26047+ pax_open_kernel();
26048+ *plocation += sym->st_value;
26049+ pax_close_kernel();
26050 break;
26051 case R_386_PC32:
26052 /* Add the value, subtract its position */
26053- *location += sym->st_value - (uint32_t)location;
26054+ pax_open_kernel();
26055+ *plocation += sym->st_value - location;
26056+ pax_close_kernel();
26057 break;
26058 default:
26059 pr_err("%s: Unknown relocation: %u\n",
26060@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26061 case R_X86_64_NONE:
26062 break;
26063 case R_X86_64_64:
26064+ pax_open_kernel();
26065 *(u64 *)loc = val;
26066+ pax_close_kernel();
26067 break;
26068 case R_X86_64_32:
26069+ pax_open_kernel();
26070 *(u32 *)loc = val;
26071+ pax_close_kernel();
26072 if (val != *(u32 *)loc)
26073 goto overflow;
26074 break;
26075 case R_X86_64_32S:
26076+ pax_open_kernel();
26077 *(s32 *)loc = val;
26078+ pax_close_kernel();
26079 if ((s64)val != *(s32 *)loc)
26080 goto overflow;
26081 break;
26082 case R_X86_64_PC32:
26083 val -= (u64)loc;
26084+ pax_open_kernel();
26085 *(u32 *)loc = val;
26086+ pax_close_kernel();
26087+
26088 #if 0
26089 if ((s64)val != *(s32 *)loc)
26090 goto overflow;
26091diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26092index 113e707..0a690e1 100644
26093--- a/arch/x86/kernel/msr.c
26094+++ b/arch/x86/kernel/msr.c
26095@@ -39,6 +39,7 @@
26096 #include <linux/notifier.h>
26097 #include <linux/uaccess.h>
26098 #include <linux/gfp.h>
26099+#include <linux/grsecurity.h>
26100
26101 #include <asm/processor.h>
26102 #include <asm/msr.h>
26103@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26104 int err = 0;
26105 ssize_t bytes = 0;
26106
26107+#ifdef CONFIG_GRKERNSEC_KMEM
26108+ gr_handle_msr_write();
26109+ return -EPERM;
26110+#endif
26111+
26112 if (count % 8)
26113 return -EINVAL; /* Invalid chunk size */
26114
26115@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26116 err = -EBADF;
26117 break;
26118 }
26119+#ifdef CONFIG_GRKERNSEC_KMEM
26120+ gr_handle_msr_write();
26121+ return -EPERM;
26122+#endif
26123 if (copy_from_user(&regs, uregs, sizeof regs)) {
26124 err = -EFAULT;
26125 break;
26126@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26127 return notifier_from_errno(err);
26128 }
26129
26130-static struct notifier_block __refdata msr_class_cpu_notifier = {
26131+static struct notifier_block msr_class_cpu_notifier = {
26132 .notifier_call = msr_class_cpu_callback,
26133 };
26134
26135diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26136index c3e985d..110a36a 100644
26137--- a/arch/x86/kernel/nmi.c
26138+++ b/arch/x86/kernel/nmi.c
26139@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26140
26141 static void nmi_max_handler(struct irq_work *w)
26142 {
26143- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26144+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26145 int remainder_ns, decimal_msecs;
26146- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26147+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26148
26149 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26150 decimal_msecs = remainder_ns / 1000;
26151
26152 printk_ratelimited(KERN_INFO
26153 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26154- a->handler, whole_msecs, decimal_msecs);
26155+ n->action->handler, whole_msecs, decimal_msecs);
26156 }
26157
26158 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26159@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26160 delta = sched_clock() - delta;
26161 trace_nmi_handler(a->handler, (int)delta, thishandled);
26162
26163- if (delta < nmi_longest_ns || delta < a->max_duration)
26164+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26165 continue;
26166
26167- a->max_duration = delta;
26168- irq_work_queue(&a->irq_work);
26169+ a->work->max_duration = delta;
26170+ irq_work_queue(&a->work->irq_work);
26171 }
26172
26173 rcu_read_unlock();
26174@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26175 }
26176 NOKPROBE_SYMBOL(nmi_handle);
26177
26178-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26179+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26180 {
26181 struct nmi_desc *desc = nmi_to_desc(type);
26182 unsigned long flags;
26183@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26184 if (!action->handler)
26185 return -EINVAL;
26186
26187- init_irq_work(&action->irq_work, nmi_max_handler);
26188+ action->work->action = action;
26189+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26190
26191 spin_lock_irqsave(&desc->lock, flags);
26192
26193@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26194 * event confuses some handlers (kdump uses this flag)
26195 */
26196 if (action->flags & NMI_FLAG_FIRST)
26197- list_add_rcu(&action->list, &desc->head);
26198+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26199 else
26200- list_add_tail_rcu(&action->list, &desc->head);
26201+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26202
26203 spin_unlock_irqrestore(&desc->lock, flags);
26204 return 0;
26205@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26206 if (!strcmp(n->name, name)) {
26207 WARN(in_nmi(),
26208 "Trying to free NMI (%s) from NMI context!\n", n->name);
26209- list_del_rcu(&n->list);
26210+ pax_list_del_rcu((struct list_head *)&n->list);
26211 break;
26212 }
26213 }
26214@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26215 dotraplinkage notrace void
26216 do_nmi(struct pt_regs *regs, long error_code)
26217 {
26218+
26219+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26220+ if (!user_mode(regs)) {
26221+ unsigned long cs = regs->cs & 0xFFFF;
26222+ unsigned long ip = ktva_ktla(regs->ip);
26223+
26224+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26225+ regs->ip = ip;
26226+ }
26227+#endif
26228+
26229 nmi_nesting_preprocess(regs);
26230
26231 nmi_enter();
26232diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26233index 6d9582e..f746287 100644
26234--- a/arch/x86/kernel/nmi_selftest.c
26235+++ b/arch/x86/kernel/nmi_selftest.c
26236@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26237 {
26238 /* trap all the unknown NMIs we may generate */
26239 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26240- __initdata);
26241+ __initconst);
26242 }
26243
26244 static void __init cleanup_nmi_testsuite(void)
26245@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26246 unsigned long timeout;
26247
26248 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26249- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26250+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26251 nmi_fail = FAILURE;
26252 return;
26253 }
26254diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26255index bbb6c73..24a58ef 100644
26256--- a/arch/x86/kernel/paravirt-spinlocks.c
26257+++ b/arch/x86/kernel/paravirt-spinlocks.c
26258@@ -8,7 +8,7 @@
26259
26260 #include <asm/paravirt.h>
26261
26262-struct pv_lock_ops pv_lock_ops = {
26263+struct pv_lock_ops pv_lock_ops __read_only = {
26264 #ifdef CONFIG_SMP
26265 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26266 .unlock_kick = paravirt_nop,
26267diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26268index 548d25f..f8fb99c 100644
26269--- a/arch/x86/kernel/paravirt.c
26270+++ b/arch/x86/kernel/paravirt.c
26271@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26272 {
26273 return x;
26274 }
26275+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26276+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26277+#endif
26278
26279 void __init default_banner(void)
26280 {
26281@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26282
26283 if (opfunc == NULL)
26284 /* If there's no function, patch it with a ud2a (BUG) */
26285- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26286- else if (opfunc == _paravirt_nop)
26287+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26288+ else if (opfunc == (void *)_paravirt_nop)
26289 /* If the operation is a nop, then nop the callsite */
26290 ret = paravirt_patch_nop();
26291
26292 /* identity functions just return their single argument */
26293- else if (opfunc == _paravirt_ident_32)
26294+ else if (opfunc == (void *)_paravirt_ident_32)
26295 ret = paravirt_patch_ident_32(insnbuf, len);
26296- else if (opfunc == _paravirt_ident_64)
26297+ else if (opfunc == (void *)_paravirt_ident_64)
26298 ret = paravirt_patch_ident_64(insnbuf, len);
26299+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26300+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26301+ ret = paravirt_patch_ident_64(insnbuf, len);
26302+#endif
26303
26304 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26305 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26306@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26307 if (insn_len > len || start == NULL)
26308 insn_len = len;
26309 else
26310- memcpy(insnbuf, start, insn_len);
26311+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26312
26313 return insn_len;
26314 }
26315@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26316 return this_cpu_read(paravirt_lazy_mode);
26317 }
26318
26319-struct pv_info pv_info = {
26320+struct pv_info pv_info __read_only = {
26321 .name = "bare hardware",
26322 .paravirt_enabled = 0,
26323 .kernel_rpl = 0,
26324@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26325 #endif
26326 };
26327
26328-struct pv_init_ops pv_init_ops = {
26329+struct pv_init_ops pv_init_ops __read_only = {
26330 .patch = native_patch,
26331 };
26332
26333-struct pv_time_ops pv_time_ops = {
26334+struct pv_time_ops pv_time_ops __read_only = {
26335 .sched_clock = native_sched_clock,
26336 .steal_clock = native_steal_clock,
26337 };
26338
26339-__visible struct pv_irq_ops pv_irq_ops = {
26340+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26341 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26342 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26343 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26344@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26345 #endif
26346 };
26347
26348-__visible struct pv_cpu_ops pv_cpu_ops = {
26349+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26350 .cpuid = native_cpuid,
26351 .get_debugreg = native_get_debugreg,
26352 .set_debugreg = native_set_debugreg,
26353@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26354 NOKPROBE_SYMBOL(native_set_debugreg);
26355 NOKPROBE_SYMBOL(native_load_idt);
26356
26357-struct pv_apic_ops pv_apic_ops = {
26358+struct pv_apic_ops pv_apic_ops __read_only= {
26359 #ifdef CONFIG_X86_LOCAL_APIC
26360 .startup_ipi_hook = paravirt_nop,
26361 #endif
26362 };
26363
26364-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26365+#ifdef CONFIG_X86_32
26366+#ifdef CONFIG_X86_PAE
26367+/* 64-bit pagetable entries */
26368+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26369+#else
26370 /* 32-bit pagetable entries */
26371 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26372+#endif
26373 #else
26374 /* 64-bit pagetable entries */
26375 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26376 #endif
26377
26378-struct pv_mmu_ops pv_mmu_ops = {
26379+struct pv_mmu_ops pv_mmu_ops __read_only = {
26380
26381 .read_cr2 = native_read_cr2,
26382 .write_cr2 = native_write_cr2,
26383@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26384 .make_pud = PTE_IDENT,
26385
26386 .set_pgd = native_set_pgd,
26387+ .set_pgd_batched = native_set_pgd_batched,
26388 #endif
26389 #endif /* PAGETABLE_LEVELS >= 3 */
26390
26391@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26392 },
26393
26394 .set_fixmap = native_set_fixmap,
26395+
26396+#ifdef CONFIG_PAX_KERNEXEC
26397+ .pax_open_kernel = native_pax_open_kernel,
26398+ .pax_close_kernel = native_pax_close_kernel,
26399+#endif
26400+
26401 };
26402
26403 EXPORT_SYMBOL_GPL(pv_time_ops);
26404diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26405index a1da673..b6f5831 100644
26406--- a/arch/x86/kernel/paravirt_patch_64.c
26407+++ b/arch/x86/kernel/paravirt_patch_64.c
26408@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26409 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26410 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26411 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26412+
26413+#ifndef CONFIG_PAX_MEMORY_UDEREF
26414 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26415+#endif
26416+
26417 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26418 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26419
26420@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26421 PATCH_SITE(pv_mmu_ops, read_cr3);
26422 PATCH_SITE(pv_mmu_ops, write_cr3);
26423 PATCH_SITE(pv_cpu_ops, clts);
26424+
26425+#ifndef CONFIG_PAX_MEMORY_UDEREF
26426 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26427+#endif
26428+
26429 PATCH_SITE(pv_cpu_ops, wbinvd);
26430
26431 patch_site:
26432diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26433index 0497f71..7186c0d 100644
26434--- a/arch/x86/kernel/pci-calgary_64.c
26435+++ b/arch/x86/kernel/pci-calgary_64.c
26436@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26437 tce_space = be64_to_cpu(readq(target));
26438 tce_space = tce_space & TAR_SW_BITS;
26439
26440- tce_space = tce_space & (~specified_table_size);
26441+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26442 info->tce_space = (u64 *)__va(tce_space);
26443 }
26444 }
26445diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26446index 35ccf75..7a15747 100644
26447--- a/arch/x86/kernel/pci-iommu_table.c
26448+++ b/arch/x86/kernel/pci-iommu_table.c
26449@@ -2,7 +2,7 @@
26450 #include <asm/iommu_table.h>
26451 #include <linux/string.h>
26452 #include <linux/kallsyms.h>
26453-
26454+#include <linux/sched.h>
26455
26456 #define DEBUG 1
26457
26458diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26459index 77dd0ad..9ec4723 100644
26460--- a/arch/x86/kernel/pci-swiotlb.c
26461+++ b/arch/x86/kernel/pci-swiotlb.c
26462@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26463 struct dma_attrs *attrs)
26464 {
26465 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26466- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26467+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26468 else
26469 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26470 }
26471diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26472index e127dda..94e384d 100644
26473--- a/arch/x86/kernel/process.c
26474+++ b/arch/x86/kernel/process.c
26475@@ -36,7 +36,8 @@
26476 * section. Since TSS's are completely CPU-local, we want them
26477 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26478 */
26479-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26480+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26481+EXPORT_SYMBOL(init_tss);
26482
26483 #ifdef CONFIG_X86_64
26484 static DEFINE_PER_CPU(unsigned char, is_idle);
26485@@ -94,7 +95,7 @@ void arch_task_cache_init(void)
26486 task_xstate_cachep =
26487 kmem_cache_create("task_xstate", xstate_size,
26488 __alignof__(union thread_xstate),
26489- SLAB_PANIC | SLAB_NOTRACK, NULL);
26490+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26491 setup_xstate_comp();
26492 }
26493
26494@@ -108,7 +109,7 @@ void exit_thread(void)
26495 unsigned long *bp = t->io_bitmap_ptr;
26496
26497 if (bp) {
26498- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26499+ struct tss_struct *tss = init_tss + get_cpu();
26500
26501 t->io_bitmap_ptr = NULL;
26502 clear_thread_flag(TIF_IO_BITMAP);
26503@@ -128,6 +129,9 @@ void flush_thread(void)
26504 {
26505 struct task_struct *tsk = current;
26506
26507+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26508+ loadsegment(gs, 0);
26509+#endif
26510 flush_ptrace_hw_breakpoint(tsk);
26511 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26512 drop_init_fpu(tsk);
26513@@ -274,7 +278,7 @@ static void __exit_idle(void)
26514 void exit_idle(void)
26515 {
26516 /* idle loop has pid 0 */
26517- if (current->pid)
26518+ if (task_pid_nr(current))
26519 return;
26520 __exit_idle();
26521 }
26522@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
26523 return ret;
26524 }
26525 #endif
26526-void stop_this_cpu(void *dummy)
26527+__noreturn void stop_this_cpu(void *dummy)
26528 {
26529 local_irq_disable();
26530 /*
26531@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
26532 }
26533 early_param("idle", idle_setup);
26534
26535-unsigned long arch_align_stack(unsigned long sp)
26536+#ifdef CONFIG_PAX_RANDKSTACK
26537+void pax_randomize_kstack(struct pt_regs *regs)
26538 {
26539- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26540- sp -= get_random_int() % 8192;
26541- return sp & ~0xf;
26542-}
26543+ struct thread_struct *thread = &current->thread;
26544+ unsigned long time;
26545
26546-unsigned long arch_randomize_brk(struct mm_struct *mm)
26547-{
26548- unsigned long range_end = mm->brk + 0x02000000;
26549- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26550-}
26551+ if (!randomize_va_space)
26552+ return;
26553+
26554+ if (v8086_mode(regs))
26555+ return;
26556
26557+ rdtscl(time);
26558+
26559+ /* P4 seems to return a 0 LSB, ignore it */
26560+#ifdef CONFIG_MPENTIUM4
26561+ time &= 0x3EUL;
26562+ time <<= 2;
26563+#elif defined(CONFIG_X86_64)
26564+ time &= 0xFUL;
26565+ time <<= 4;
26566+#else
26567+ time &= 0x1FUL;
26568+ time <<= 3;
26569+#endif
26570+
26571+ thread->sp0 ^= time;
26572+ load_sp0(init_tss + smp_processor_id(), thread);
26573+
26574+#ifdef CONFIG_X86_64
26575+ this_cpu_write(kernel_stack, thread->sp0);
26576+#endif
26577+}
26578+#endif
26579diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26580index 8f3ebfe..cbc731b 100644
26581--- a/arch/x86/kernel/process_32.c
26582+++ b/arch/x86/kernel/process_32.c
26583@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26584 unsigned long thread_saved_pc(struct task_struct *tsk)
26585 {
26586 return ((unsigned long *)tsk->thread.sp)[3];
26587+//XXX return tsk->thread.eip;
26588 }
26589
26590 void __show_regs(struct pt_regs *regs, int all)
26591@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26592 unsigned long sp;
26593 unsigned short ss, gs;
26594
26595- if (user_mode_vm(regs)) {
26596+ if (user_mode(regs)) {
26597 sp = regs->sp;
26598 ss = regs->ss & 0xffff;
26599- gs = get_user_gs(regs);
26600 } else {
26601 sp = kernel_stack_pointer(regs);
26602 savesegment(ss, ss);
26603- savesegment(gs, gs);
26604 }
26605+ gs = get_user_gs(regs);
26606
26607 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26608 (u16)regs->cs, regs->ip, regs->flags,
26609- smp_processor_id());
26610+ raw_smp_processor_id());
26611 print_symbol("EIP is at %s\n", regs->ip);
26612
26613 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26614@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26615 int copy_thread(unsigned long clone_flags, unsigned long sp,
26616 unsigned long arg, struct task_struct *p)
26617 {
26618- struct pt_regs *childregs = task_pt_regs(p);
26619+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26620 struct task_struct *tsk;
26621 int err;
26622
26623 p->thread.sp = (unsigned long) childregs;
26624 p->thread.sp0 = (unsigned long) (childregs+1);
26625+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26626 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26627
26628 if (unlikely(p->flags & PF_KTHREAD)) {
26629 /* kernel thread */
26630 memset(childregs, 0, sizeof(struct pt_regs));
26631 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26632- task_user_gs(p) = __KERNEL_STACK_CANARY;
26633- childregs->ds = __USER_DS;
26634- childregs->es = __USER_DS;
26635+ savesegment(gs, childregs->gs);
26636+ childregs->ds = __KERNEL_DS;
26637+ childregs->es = __KERNEL_DS;
26638 childregs->fs = __KERNEL_PERCPU;
26639 childregs->bx = sp; /* function */
26640 childregs->bp = arg;
26641@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26642 struct thread_struct *prev = &prev_p->thread,
26643 *next = &next_p->thread;
26644 int cpu = smp_processor_id();
26645- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26646+ struct tss_struct *tss = init_tss + cpu;
26647 fpu_switch_t fpu;
26648
26649 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26650@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26651 */
26652 lazy_save_gs(prev->gs);
26653
26654+#ifdef CONFIG_PAX_MEMORY_UDEREF
26655+ __set_fs(task_thread_info(next_p)->addr_limit);
26656+#endif
26657+
26658 /*
26659 * Load the per-thread Thread-Local Storage descriptor.
26660 */
26661@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26662 */
26663 arch_end_context_switch(next_p);
26664
26665- this_cpu_write(kernel_stack,
26666- (unsigned long)task_stack_page(next_p) +
26667- THREAD_SIZE - KERNEL_STACK_OFFSET);
26668+ this_cpu_write(current_task, next_p);
26669+ this_cpu_write(current_tinfo, &next_p->tinfo);
26670+ this_cpu_write(kernel_stack, next->sp0);
26671
26672 /*
26673 * Restore %gs if needed (which is common)
26674@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26675
26676 switch_fpu_finish(next_p, fpu);
26677
26678- this_cpu_write(current_task, next_p);
26679-
26680 return prev_p;
26681 }
26682
26683@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26684 } while (count++ < 16);
26685 return 0;
26686 }
26687-
26688diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26689index 5a2c029..ec8611d 100644
26690--- a/arch/x86/kernel/process_64.c
26691+++ b/arch/x86/kernel/process_64.c
26692@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26693 struct pt_regs *childregs;
26694 struct task_struct *me = current;
26695
26696- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26697+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26698 childregs = task_pt_regs(p);
26699 p->thread.sp = (unsigned long) childregs;
26700 p->thread.usersp = me->thread.usersp;
26701+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26702 set_tsk_thread_flag(p, TIF_FORK);
26703 p->thread.io_bitmap_ptr = NULL;
26704
26705@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26706 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26707 savesegment(es, p->thread.es);
26708 savesegment(ds, p->thread.ds);
26709+ savesegment(ss, p->thread.ss);
26710+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26711 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26712
26713 if (unlikely(p->flags & PF_KTHREAD)) {
26714@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26715 struct thread_struct *prev = &prev_p->thread;
26716 struct thread_struct *next = &next_p->thread;
26717 int cpu = smp_processor_id();
26718- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26719+ struct tss_struct *tss = init_tss + cpu;
26720 unsigned fsindex, gsindex;
26721 fpu_switch_t fpu;
26722
26723@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26724 if (unlikely(next->ds | prev->ds))
26725 loadsegment(ds, next->ds);
26726
26727+ savesegment(ss, prev->ss);
26728+ if (unlikely(next->ss != prev->ss))
26729+ loadsegment(ss, next->ss);
26730+
26731 /*
26732 * Switch FS and GS.
26733 *
26734@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26735 prev->usersp = this_cpu_read(old_rsp);
26736 this_cpu_write(old_rsp, next->usersp);
26737 this_cpu_write(current_task, next_p);
26738+ this_cpu_write(current_tinfo, &next_p->tinfo);
26739
26740 /*
26741 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26742@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26743 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26744 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26745
26746- this_cpu_write(kernel_stack,
26747- (unsigned long)task_stack_page(next_p) +
26748- THREAD_SIZE - KERNEL_STACK_OFFSET);
26749+ this_cpu_write(kernel_stack, next->sp0);
26750
26751 /*
26752 * Now maybe reload the debug registers and handle I/O bitmaps
26753@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26754 if (!p || p == current || p->state == TASK_RUNNING)
26755 return 0;
26756 stack = (unsigned long)task_stack_page(p);
26757- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26758+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26759 return 0;
26760 fp = *(u64 *)(p->thread.sp);
26761 do {
26762- if (fp < (unsigned long)stack ||
26763- fp >= (unsigned long)stack+THREAD_SIZE)
26764+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26765 return 0;
26766 ip = *(u64 *)(fp+8);
26767 if (!in_sched_functions(ip))
26768diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26769index e510618..5165ac0 100644
26770--- a/arch/x86/kernel/ptrace.c
26771+++ b/arch/x86/kernel/ptrace.c
26772@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26773 unsigned long sp = (unsigned long)&regs->sp;
26774 u32 *prev_esp;
26775
26776- if (context == (sp & ~(THREAD_SIZE - 1)))
26777+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26778 return sp;
26779
26780- prev_esp = (u32 *)(context);
26781+ prev_esp = *(u32 **)(context);
26782 if (prev_esp)
26783 return (unsigned long)prev_esp;
26784
26785@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26786 if (child->thread.gs != value)
26787 return do_arch_prctl(child, ARCH_SET_GS, value);
26788 return 0;
26789+
26790+ case offsetof(struct user_regs_struct,ip):
26791+ /*
26792+ * Protect against any attempt to set ip to an
26793+ * impossible address. There are dragons lurking if the
26794+ * address is noncanonical. (This explicitly allows
26795+ * setting ip to TASK_SIZE_MAX, because user code can do
26796+ * that all by itself by running off the end of its
26797+ * address space.
26798+ */
26799+ if (value > TASK_SIZE_MAX)
26800+ return -EIO;
26801+ break;
26802+
26803 #endif
26804 }
26805
26806@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26807 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26808 {
26809 int i;
26810- int dr7 = 0;
26811+ unsigned long dr7 = 0;
26812 struct arch_hw_breakpoint *info;
26813
26814 for (i = 0; i < HBP_NUM; i++) {
26815@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26816 unsigned long addr, unsigned long data)
26817 {
26818 int ret;
26819- unsigned long __user *datap = (unsigned long __user *)data;
26820+ unsigned long __user *datap = (__force unsigned long __user *)data;
26821
26822 switch (request) {
26823 /* read the word at location addr in the USER area. */
26824@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26825 if ((int) addr < 0)
26826 return -EIO;
26827 ret = do_get_thread_area(child, addr,
26828- (struct user_desc __user *)data);
26829+ (__force struct user_desc __user *) data);
26830 break;
26831
26832 case PTRACE_SET_THREAD_AREA:
26833 if ((int) addr < 0)
26834 return -EIO;
26835 ret = do_set_thread_area(child, addr,
26836- (struct user_desc __user *)data, 0);
26837+ (__force struct user_desc __user *) data, 0);
26838 break;
26839 #endif
26840
26841@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26842
26843 #ifdef CONFIG_X86_64
26844
26845-static struct user_regset x86_64_regsets[] __read_mostly = {
26846+static user_regset_no_const x86_64_regsets[] __read_only = {
26847 [REGSET_GENERAL] = {
26848 .core_note_type = NT_PRSTATUS,
26849 .n = sizeof(struct user_regs_struct) / sizeof(long),
26850@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26851 #endif /* CONFIG_X86_64 */
26852
26853 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26854-static struct user_regset x86_32_regsets[] __read_mostly = {
26855+static user_regset_no_const x86_32_regsets[] __read_only = {
26856 [REGSET_GENERAL] = {
26857 .core_note_type = NT_PRSTATUS,
26858 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26859@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26860 */
26861 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26862
26863-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26864+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26865 {
26866 #ifdef CONFIG_X86_64
26867 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26868@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26869 memset(info, 0, sizeof(*info));
26870 info->si_signo = SIGTRAP;
26871 info->si_code = si_code;
26872- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26873+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26874 }
26875
26876 void user_single_step_siginfo(struct task_struct *tsk,
26877@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26878 }
26879 }
26880
26881+#ifdef CONFIG_GRKERNSEC_SETXID
26882+extern void gr_delayed_cred_worker(void);
26883+#endif
26884+
26885 /*
26886 * We can return 0 to resume the syscall or anything else to go to phase
26887 * 2. If we resume the syscall, we need to put something appropriate in
26888@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26889
26890 BUG_ON(regs != task_pt_regs(current));
26891
26892+#ifdef CONFIG_GRKERNSEC_SETXID
26893+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26894+ gr_delayed_cred_worker();
26895+#endif
26896+
26897 /*
26898 * If we stepped into a sysenter/syscall insn, it trapped in
26899 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26900@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26901 */
26902 user_exit();
26903
26904+#ifdef CONFIG_GRKERNSEC_SETXID
26905+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26906+ gr_delayed_cred_worker();
26907+#endif
26908+
26909 audit_syscall_exit(regs);
26910
26911 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26912diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26913index 2f355d2..e75ed0a 100644
26914--- a/arch/x86/kernel/pvclock.c
26915+++ b/arch/x86/kernel/pvclock.c
26916@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26917 reset_hung_task_detector();
26918 }
26919
26920-static atomic64_t last_value = ATOMIC64_INIT(0);
26921+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26922
26923 void pvclock_resume(void)
26924 {
26925- atomic64_set(&last_value, 0);
26926+ atomic64_set_unchecked(&last_value, 0);
26927 }
26928
26929 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26930@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26931 * updating at the same time, and one of them could be slightly behind,
26932 * making the assumption that last_value always go forward fail to hold.
26933 */
26934- last = atomic64_read(&last_value);
26935+ last = atomic64_read_unchecked(&last_value);
26936 do {
26937 if (ret < last)
26938 return last;
26939- last = atomic64_cmpxchg(&last_value, last, ret);
26940+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26941 } while (unlikely(last != ret));
26942
26943 return ret;
26944diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26945index bae6c60..b438619 100644
26946--- a/arch/x86/kernel/reboot.c
26947+++ b/arch/x86/kernel/reboot.c
26948@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26949
26950 void __noreturn machine_real_restart(unsigned int type)
26951 {
26952+
26953+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26954+ struct desc_struct *gdt;
26955+#endif
26956+
26957 local_irq_disable();
26958
26959 /*
26960@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
26961
26962 /* Jump to the identity-mapped low memory code */
26963 #ifdef CONFIG_X86_32
26964- asm volatile("jmpl *%0" : :
26965+
26966+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26967+ gdt = get_cpu_gdt_table(smp_processor_id());
26968+ pax_open_kernel();
26969+#ifdef CONFIG_PAX_MEMORY_UDEREF
26970+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26971+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26972+ loadsegment(ds, __KERNEL_DS);
26973+ loadsegment(es, __KERNEL_DS);
26974+ loadsegment(ss, __KERNEL_DS);
26975+#endif
26976+#ifdef CONFIG_PAX_KERNEXEC
26977+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26978+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26979+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26980+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26981+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26982+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26983+#endif
26984+ pax_close_kernel();
26985+#endif
26986+
26987+ asm volatile("ljmpl *%0" : :
26988 "rm" (real_mode_header->machine_real_restart_asm),
26989 "a" (type));
26990 #else
26991@@ -501,7 +528,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
26992 * This means that this function can never return, it can misbehave
26993 * by not rebooting properly and hanging.
26994 */
26995-static void native_machine_emergency_restart(void)
26996+static void __noreturn native_machine_emergency_restart(void)
26997 {
26998 int i;
26999 int attempt = 0;
27000@@ -621,13 +648,13 @@ void native_machine_shutdown(void)
27001 #endif
27002 }
27003
27004-static void __machine_emergency_restart(int emergency)
27005+static void __noreturn __machine_emergency_restart(int emergency)
27006 {
27007 reboot_emergency = emergency;
27008 machine_ops.emergency_restart();
27009 }
27010
27011-static void native_machine_restart(char *__unused)
27012+static void __noreturn native_machine_restart(char *__unused)
27013 {
27014 pr_notice("machine restart\n");
27015
27016@@ -636,7 +663,7 @@ static void native_machine_restart(char *__unused)
27017 __machine_emergency_restart(0);
27018 }
27019
27020-static void native_machine_halt(void)
27021+static void __noreturn native_machine_halt(void)
27022 {
27023 /* Stop other cpus and apics */
27024 machine_shutdown();
27025@@ -646,7 +673,7 @@ static void native_machine_halt(void)
27026 stop_this_cpu(NULL);
27027 }
27028
27029-static void native_machine_power_off(void)
27030+static void __noreturn native_machine_power_off(void)
27031 {
27032 if (pm_power_off) {
27033 if (!reboot_force)
27034@@ -655,9 +682,10 @@ static void native_machine_power_off(void)
27035 }
27036 /* A fallback in case there is no PM info available */
27037 tboot_shutdown(TB_SHUTDOWN_HALT);
27038+ unreachable();
27039 }
27040
27041-struct machine_ops machine_ops = {
27042+struct machine_ops machine_ops __read_only = {
27043 .power_off = native_machine_power_off,
27044 .shutdown = native_machine_shutdown,
27045 .emergency_restart = native_machine_emergency_restart,
27046diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27047index c8e41e9..64049ef 100644
27048--- a/arch/x86/kernel/reboot_fixups_32.c
27049+++ b/arch/x86/kernel/reboot_fixups_32.c
27050@@ -57,7 +57,7 @@ struct device_fixup {
27051 unsigned int vendor;
27052 unsigned int device;
27053 void (*reboot_fixup)(struct pci_dev *);
27054-};
27055+} __do_const;
27056
27057 /*
27058 * PCI ids solely used for fixups_table go here
27059diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27060index 3fd2c69..a444264 100644
27061--- a/arch/x86/kernel/relocate_kernel_64.S
27062+++ b/arch/x86/kernel/relocate_kernel_64.S
27063@@ -96,8 +96,7 @@ relocate_kernel:
27064
27065 /* jump to identity mapped page */
27066 addq $(identity_mapped - relocate_kernel), %r8
27067- pushq %r8
27068- ret
27069+ jmp *%r8
27070
27071 identity_mapped:
27072 /* set return address to 0 if not preserving context */
27073diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27074index ab4734e..c4ca0eb 100644
27075--- a/arch/x86/kernel/setup.c
27076+++ b/arch/x86/kernel/setup.c
27077@@ -110,6 +110,7 @@
27078 #include <asm/mce.h>
27079 #include <asm/alternative.h>
27080 #include <asm/prom.h>
27081+#include <asm/boot.h>
27082
27083 /*
27084 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27085@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27086 #endif
27087
27088
27089-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27090-__visible unsigned long mmu_cr4_features;
27091+#ifdef CONFIG_X86_64
27092+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27093+#elif defined(CONFIG_X86_PAE)
27094+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27095 #else
27096-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27097+__visible unsigned long mmu_cr4_features __read_only;
27098 #endif
27099
27100+void set_in_cr4(unsigned long mask)
27101+{
27102+ unsigned long cr4 = read_cr4();
27103+
27104+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27105+ return;
27106+
27107+ pax_open_kernel();
27108+ mmu_cr4_features |= mask;
27109+ pax_close_kernel();
27110+
27111+ if (trampoline_cr4_features)
27112+ *trampoline_cr4_features = mmu_cr4_features;
27113+ cr4 |= mask;
27114+ write_cr4(cr4);
27115+}
27116+EXPORT_SYMBOL(set_in_cr4);
27117+
27118+void clear_in_cr4(unsigned long mask)
27119+{
27120+ unsigned long cr4 = read_cr4();
27121+
27122+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27123+ return;
27124+
27125+ pax_open_kernel();
27126+ mmu_cr4_features &= ~mask;
27127+ pax_close_kernel();
27128+
27129+ if (trampoline_cr4_features)
27130+ *trampoline_cr4_features = mmu_cr4_features;
27131+ cr4 &= ~mask;
27132+ write_cr4(cr4);
27133+}
27134+EXPORT_SYMBOL(clear_in_cr4);
27135+
27136 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27137 int bootloader_type, bootloader_version;
27138
27139@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27140 * area (640->1Mb) as ram even though it is not.
27141 * take them out.
27142 */
27143- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27144+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27145
27146 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27147 }
27148@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27149 /* called before trim_bios_range() to spare extra sanitize */
27150 static void __init e820_add_kernel_range(void)
27151 {
27152- u64 start = __pa_symbol(_text);
27153+ u64 start = __pa_symbol(ktla_ktva(_text));
27154 u64 size = __pa_symbol(_end) - start;
27155
27156 /*
27157@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27158
27159 void __init setup_arch(char **cmdline_p)
27160 {
27161+#ifdef CONFIG_X86_32
27162+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27163+#else
27164 memblock_reserve(__pa_symbol(_text),
27165 (unsigned long)__bss_stop - (unsigned long)_text);
27166+#endif
27167
27168 early_reserve_initrd();
27169
27170@@ -955,16 +998,16 @@ void __init setup_arch(char **cmdline_p)
27171
27172 if (!boot_params.hdr.root_flags)
27173 root_mountflags &= ~MS_RDONLY;
27174- init_mm.start_code = (unsigned long) _text;
27175- init_mm.end_code = (unsigned long) _etext;
27176+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27177+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27178 init_mm.end_data = (unsigned long) _edata;
27179 init_mm.brk = _brk_end;
27180
27181 mpx_mm_init(&init_mm);
27182
27183- code_resource.start = __pa_symbol(_text);
27184- code_resource.end = __pa_symbol(_etext)-1;
27185- data_resource.start = __pa_symbol(_etext);
27186+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27187+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27188+ data_resource.start = __pa_symbol(_sdata);
27189 data_resource.end = __pa_symbol(_edata)-1;
27190 bss_resource.start = __pa_symbol(__bss_start);
27191 bss_resource.end = __pa_symbol(__bss_stop)-1;
27192diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27193index e4fcb87..9c06c55 100644
27194--- a/arch/x86/kernel/setup_percpu.c
27195+++ b/arch/x86/kernel/setup_percpu.c
27196@@ -21,19 +21,17 @@
27197 #include <asm/cpu.h>
27198 #include <asm/stackprotector.h>
27199
27200-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27201+#ifdef CONFIG_SMP
27202+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27203 EXPORT_PER_CPU_SYMBOL(cpu_number);
27204+#endif
27205
27206-#ifdef CONFIG_X86_64
27207 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27208-#else
27209-#define BOOT_PERCPU_OFFSET 0
27210-#endif
27211
27212 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27213 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27214
27215-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27216+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27217 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27218 };
27219 EXPORT_SYMBOL(__per_cpu_offset);
27220@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27221 {
27222 #ifdef CONFIG_NEED_MULTIPLE_NODES
27223 pg_data_t *last = NULL;
27224- unsigned int cpu;
27225+ int cpu;
27226
27227 for_each_possible_cpu(cpu) {
27228 int node = early_cpu_to_node(cpu);
27229@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27230 {
27231 #ifdef CONFIG_X86_32
27232 struct desc_struct gdt;
27233+ unsigned long base = per_cpu_offset(cpu);
27234
27235- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27236- 0x2 | DESCTYPE_S, 0x8);
27237- gdt.s = 1;
27238+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27239+ 0x83 | DESCTYPE_S, 0xC);
27240 write_gdt_entry(get_cpu_gdt_table(cpu),
27241 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27242 #endif
27243@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27244 /* alrighty, percpu areas up and running */
27245 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27246 for_each_possible_cpu(cpu) {
27247+#ifdef CONFIG_CC_STACKPROTECTOR
27248+#ifdef CONFIG_X86_32
27249+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27250+#endif
27251+#endif
27252 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27253 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27254 per_cpu(cpu_number, cpu) = cpu;
27255@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27256 */
27257 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27258 #endif
27259+#ifdef CONFIG_CC_STACKPROTECTOR
27260+#ifdef CONFIG_X86_32
27261+ if (!cpu)
27262+ per_cpu(stack_canary.canary, cpu) = canary;
27263+#endif
27264+#endif
27265 /*
27266 * Up to this point, the boot CPU has been using .init.data
27267 * area. Reload any changed state for the boot CPU.
27268diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27269index ed37a76..39f936e 100644
27270--- a/arch/x86/kernel/signal.c
27271+++ b/arch/x86/kernel/signal.c
27272@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27273 * Align the stack pointer according to the i386 ABI,
27274 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27275 */
27276- sp = ((sp + 4) & -16ul) - 4;
27277+ sp = ((sp - 12) & -16ul) - 4;
27278 #else /* !CONFIG_X86_32 */
27279 sp = round_down(sp, 16) - 8;
27280 #endif
27281@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27282 }
27283
27284 if (current->mm->context.vdso)
27285- restorer = current->mm->context.vdso +
27286- selected_vdso32->sym___kernel_sigreturn;
27287+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27288 else
27289- restorer = &frame->retcode;
27290+ restorer = (void __user *)&frame->retcode;
27291 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27292 restorer = ksig->ka.sa.sa_restorer;
27293
27294@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27295 * reasons and because gdb uses it as a signature to notice
27296 * signal handler stack frames.
27297 */
27298- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27299+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27300
27301 if (err)
27302 return -EFAULT;
27303@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27304 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27305
27306 /* Set up to return from userspace. */
27307- restorer = current->mm->context.vdso +
27308- selected_vdso32->sym___kernel_rt_sigreturn;
27309+ if (current->mm->context.vdso)
27310+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27311+ else
27312+ restorer = (void __user *)&frame->retcode;
27313 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27314 restorer = ksig->ka.sa.sa_restorer;
27315 put_user_ex(restorer, &frame->pretcode);
27316@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27317 * reasons and because gdb uses it as a signature to notice
27318 * signal handler stack frames.
27319 */
27320- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27321+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27322 } put_user_catch(err);
27323
27324 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27325@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27326 {
27327 int usig = signr_convert(ksig->sig);
27328 sigset_t *set = sigmask_to_save();
27329- compat_sigset_t *cset = (compat_sigset_t *) set;
27330+ sigset_t sigcopy;
27331+ compat_sigset_t *cset;
27332+
27333+ sigcopy = *set;
27334+
27335+ cset = (compat_sigset_t *) &sigcopy;
27336
27337 /* Set up the stack frame */
27338 if (is_ia32_frame()) {
27339@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27340 } else if (is_x32_frame()) {
27341 return x32_setup_rt_frame(ksig, cset, regs);
27342 } else {
27343- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27344+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27345 }
27346 }
27347
27348diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27349index be8e1bd..a3d93fa 100644
27350--- a/arch/x86/kernel/smp.c
27351+++ b/arch/x86/kernel/smp.c
27352@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27353
27354 __setup("nonmi_ipi", nonmi_ipi_setup);
27355
27356-struct smp_ops smp_ops = {
27357+struct smp_ops smp_ops __read_only = {
27358 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27359 .smp_prepare_cpus = native_smp_prepare_cpus,
27360 .smp_cpus_done = native_smp_cpus_done,
27361diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27362index 6d7022c..4feb6be 100644
27363--- a/arch/x86/kernel/smpboot.c
27364+++ b/arch/x86/kernel/smpboot.c
27365@@ -194,14 +194,17 @@ static void notrace start_secondary(void *unused)
27366
27367 enable_start_cpu0 = 0;
27368
27369-#ifdef CONFIG_X86_32
27370+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27371+ barrier();
27372+
27373 /* switch away from the initial page table */
27374+#ifdef CONFIG_PAX_PER_CPU_PGD
27375+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27376+#else
27377 load_cr3(swapper_pg_dir);
27378+#endif
27379 __flush_tlb_all();
27380-#endif
27381
27382- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27383- barrier();
27384 /*
27385 * Check TSC synchronization with the BP:
27386 */
27387@@ -765,8 +768,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27388 alternatives_enable_smp();
27389
27390 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27391- (THREAD_SIZE + task_stack_page(idle))) - 1);
27392+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27393 per_cpu(current_task, cpu) = idle;
27394+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27395
27396 #ifdef CONFIG_X86_32
27397 /* Stack for startup_32 can be just as for start_secondary onwards */
27398@@ -775,10 +779,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27399 clear_tsk_thread_flag(idle, TIF_FORK);
27400 initial_gs = per_cpu_offset(cpu);
27401 #endif
27402- per_cpu(kernel_stack, cpu) =
27403- (unsigned long)task_stack_page(idle) -
27404- KERNEL_STACK_OFFSET + THREAD_SIZE;
27405+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27406+ pax_open_kernel();
27407 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27408+ pax_close_kernel();
27409 initial_code = (unsigned long)start_secondary;
27410 stack_start = idle->thread.sp;
27411
27412@@ -918,6 +922,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27413 /* the FPU context is blank, nobody can own it */
27414 __cpu_disable_lazy_restore(cpu);
27415
27416+#ifdef CONFIG_PAX_PER_CPU_PGD
27417+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27418+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27419+ KERNEL_PGD_PTRS);
27420+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27421+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27422+ KERNEL_PGD_PTRS);
27423+#endif
27424+
27425 err = do_boot_cpu(apicid, cpu, tidle);
27426 if (err) {
27427 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27428diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27429index 9b4d51d..5d28b58 100644
27430--- a/arch/x86/kernel/step.c
27431+++ b/arch/x86/kernel/step.c
27432@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27433 struct desc_struct *desc;
27434 unsigned long base;
27435
27436- seg &= ~7UL;
27437+ seg >>= 3;
27438
27439 mutex_lock(&child->mm->context.lock);
27440- if (unlikely((seg >> 3) >= child->mm->context.size))
27441+ if (unlikely(seg >= child->mm->context.size))
27442 addr = -1L; /* bogus selector, access would fault */
27443 else {
27444 desc = child->mm->context.ldt + seg;
27445@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27446 addr += base;
27447 }
27448 mutex_unlock(&child->mm->context.lock);
27449- }
27450+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27451+ addr = ktla_ktva(addr);
27452
27453 return addr;
27454 }
27455@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27456 unsigned char opcode[15];
27457 unsigned long addr = convert_ip_to_linear(child, regs);
27458
27459+ if (addr == -EINVAL)
27460+ return 0;
27461+
27462 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27463 for (i = 0; i < copied; i++) {
27464 switch (opcode[i]) {
27465diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27466new file mode 100644
27467index 0000000..5877189
27468--- /dev/null
27469+++ b/arch/x86/kernel/sys_i386_32.c
27470@@ -0,0 +1,189 @@
27471+/*
27472+ * This file contains various random system calls that
27473+ * have a non-standard calling sequence on the Linux/i386
27474+ * platform.
27475+ */
27476+
27477+#include <linux/errno.h>
27478+#include <linux/sched.h>
27479+#include <linux/mm.h>
27480+#include <linux/fs.h>
27481+#include <linux/smp.h>
27482+#include <linux/sem.h>
27483+#include <linux/msg.h>
27484+#include <linux/shm.h>
27485+#include <linux/stat.h>
27486+#include <linux/syscalls.h>
27487+#include <linux/mman.h>
27488+#include <linux/file.h>
27489+#include <linux/utsname.h>
27490+#include <linux/ipc.h>
27491+#include <linux/elf.h>
27492+
27493+#include <linux/uaccess.h>
27494+#include <linux/unistd.h>
27495+
27496+#include <asm/syscalls.h>
27497+
27498+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27499+{
27500+ unsigned long pax_task_size = TASK_SIZE;
27501+
27502+#ifdef CONFIG_PAX_SEGMEXEC
27503+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27504+ pax_task_size = SEGMEXEC_TASK_SIZE;
27505+#endif
27506+
27507+ if (flags & MAP_FIXED)
27508+ if (len > pax_task_size || addr > pax_task_size - len)
27509+ return -EINVAL;
27510+
27511+ return 0;
27512+}
27513+
27514+/*
27515+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27516+ */
27517+static unsigned long get_align_mask(void)
27518+{
27519+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27520+ return 0;
27521+
27522+ if (!(current->flags & PF_RANDOMIZE))
27523+ return 0;
27524+
27525+ return va_align.mask;
27526+}
27527+
27528+unsigned long
27529+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27530+ unsigned long len, unsigned long pgoff, unsigned long flags)
27531+{
27532+ struct mm_struct *mm = current->mm;
27533+ struct vm_area_struct *vma;
27534+ unsigned long pax_task_size = TASK_SIZE;
27535+ struct vm_unmapped_area_info info;
27536+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27537+
27538+#ifdef CONFIG_PAX_SEGMEXEC
27539+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27540+ pax_task_size = SEGMEXEC_TASK_SIZE;
27541+#endif
27542+
27543+ pax_task_size -= PAGE_SIZE;
27544+
27545+ if (len > pax_task_size)
27546+ return -ENOMEM;
27547+
27548+ if (flags & MAP_FIXED)
27549+ return addr;
27550+
27551+#ifdef CONFIG_PAX_RANDMMAP
27552+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27553+#endif
27554+
27555+ if (addr) {
27556+ addr = PAGE_ALIGN(addr);
27557+ if (pax_task_size - len >= addr) {
27558+ vma = find_vma(mm, addr);
27559+ if (check_heap_stack_gap(vma, addr, len, offset))
27560+ return addr;
27561+ }
27562+ }
27563+
27564+ info.flags = 0;
27565+ info.length = len;
27566+ info.align_mask = filp ? get_align_mask() : 0;
27567+ info.align_offset = pgoff << PAGE_SHIFT;
27568+ info.threadstack_offset = offset;
27569+
27570+#ifdef CONFIG_PAX_PAGEEXEC
27571+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27572+ info.low_limit = 0x00110000UL;
27573+ info.high_limit = mm->start_code;
27574+
27575+#ifdef CONFIG_PAX_RANDMMAP
27576+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27577+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27578+#endif
27579+
27580+ if (info.low_limit < info.high_limit) {
27581+ addr = vm_unmapped_area(&info);
27582+ if (!IS_ERR_VALUE(addr))
27583+ return addr;
27584+ }
27585+ } else
27586+#endif
27587+
27588+ info.low_limit = mm->mmap_base;
27589+ info.high_limit = pax_task_size;
27590+
27591+ return vm_unmapped_area(&info);
27592+}
27593+
27594+unsigned long
27595+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27596+ const unsigned long len, const unsigned long pgoff,
27597+ const unsigned long flags)
27598+{
27599+ struct vm_area_struct *vma;
27600+ struct mm_struct *mm = current->mm;
27601+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27602+ struct vm_unmapped_area_info info;
27603+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27604+
27605+#ifdef CONFIG_PAX_SEGMEXEC
27606+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27607+ pax_task_size = SEGMEXEC_TASK_SIZE;
27608+#endif
27609+
27610+ pax_task_size -= PAGE_SIZE;
27611+
27612+ /* requested length too big for entire address space */
27613+ if (len > pax_task_size)
27614+ return -ENOMEM;
27615+
27616+ if (flags & MAP_FIXED)
27617+ return addr;
27618+
27619+#ifdef CONFIG_PAX_PAGEEXEC
27620+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27621+ goto bottomup;
27622+#endif
27623+
27624+#ifdef CONFIG_PAX_RANDMMAP
27625+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27626+#endif
27627+
27628+ /* requesting a specific address */
27629+ if (addr) {
27630+ addr = PAGE_ALIGN(addr);
27631+ if (pax_task_size - len >= addr) {
27632+ vma = find_vma(mm, addr);
27633+ if (check_heap_stack_gap(vma, addr, len, offset))
27634+ return addr;
27635+ }
27636+ }
27637+
27638+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27639+ info.length = len;
27640+ info.low_limit = PAGE_SIZE;
27641+ info.high_limit = mm->mmap_base;
27642+ info.align_mask = filp ? get_align_mask() : 0;
27643+ info.align_offset = pgoff << PAGE_SHIFT;
27644+ info.threadstack_offset = offset;
27645+
27646+ addr = vm_unmapped_area(&info);
27647+ if (!(addr & ~PAGE_MASK))
27648+ return addr;
27649+ VM_BUG_ON(addr != -ENOMEM);
27650+
27651+bottomup:
27652+ /*
27653+ * A failed mmap() very likely causes application failure,
27654+ * so fall back to the bottom-up function here. This scenario
27655+ * can happen with large stack limits and large mmap()
27656+ * allocations.
27657+ */
27658+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27659+}
27660diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27661index 30277e2..5664a29 100644
27662--- a/arch/x86/kernel/sys_x86_64.c
27663+++ b/arch/x86/kernel/sys_x86_64.c
27664@@ -81,8 +81,8 @@ out:
27665 return error;
27666 }
27667
27668-static void find_start_end(unsigned long flags, unsigned long *begin,
27669- unsigned long *end)
27670+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27671+ unsigned long *begin, unsigned long *end)
27672 {
27673 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27674 unsigned long new_begin;
27675@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27676 *begin = new_begin;
27677 }
27678 } else {
27679- *begin = current->mm->mmap_legacy_base;
27680+ *begin = mm->mmap_legacy_base;
27681 *end = TASK_SIZE;
27682 }
27683 }
27684@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27685 struct vm_area_struct *vma;
27686 struct vm_unmapped_area_info info;
27687 unsigned long begin, end;
27688+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27689
27690 if (flags & MAP_FIXED)
27691 return addr;
27692
27693- find_start_end(flags, &begin, &end);
27694+ find_start_end(mm, flags, &begin, &end);
27695
27696 if (len > end)
27697 return -ENOMEM;
27698
27699+#ifdef CONFIG_PAX_RANDMMAP
27700+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27701+#endif
27702+
27703 if (addr) {
27704 addr = PAGE_ALIGN(addr);
27705 vma = find_vma(mm, addr);
27706- if (end - len >= addr &&
27707- (!vma || addr + len <= vma->vm_start))
27708+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27709 return addr;
27710 }
27711
27712@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27713 info.high_limit = end;
27714 info.align_mask = filp ? get_align_mask() : 0;
27715 info.align_offset = pgoff << PAGE_SHIFT;
27716+ info.threadstack_offset = offset;
27717 return vm_unmapped_area(&info);
27718 }
27719
27720@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27721 struct mm_struct *mm = current->mm;
27722 unsigned long addr = addr0;
27723 struct vm_unmapped_area_info info;
27724+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27725
27726 /* requested length too big for entire address space */
27727 if (len > TASK_SIZE)
27728@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27729 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27730 goto bottomup;
27731
27732+#ifdef CONFIG_PAX_RANDMMAP
27733+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27734+#endif
27735+
27736 /* requesting a specific address */
27737 if (addr) {
27738 addr = PAGE_ALIGN(addr);
27739 vma = find_vma(mm, addr);
27740- if (TASK_SIZE - len >= addr &&
27741- (!vma || addr + len <= vma->vm_start))
27742+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27743 return addr;
27744 }
27745
27746@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27747 info.high_limit = mm->mmap_base;
27748 info.align_mask = filp ? get_align_mask() : 0;
27749 info.align_offset = pgoff << PAGE_SHIFT;
27750+ info.threadstack_offset = offset;
27751 addr = vm_unmapped_area(&info);
27752 if (!(addr & ~PAGE_MASK))
27753 return addr;
27754diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27755index 91a4496..bb87552 100644
27756--- a/arch/x86/kernel/tboot.c
27757+++ b/arch/x86/kernel/tboot.c
27758@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27759
27760 void tboot_shutdown(u32 shutdown_type)
27761 {
27762- void (*shutdown)(void);
27763+ void (* __noreturn shutdown)(void);
27764
27765 if (!tboot_enabled())
27766 return;
27767@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27768
27769 switch_to_tboot_pt();
27770
27771- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27772+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27773 shutdown();
27774
27775 /* should not reach here */
27776@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27777 return -ENODEV;
27778 }
27779
27780-static atomic_t ap_wfs_count;
27781+static atomic_unchecked_t ap_wfs_count;
27782
27783 static int tboot_wait_for_aps(int num_aps)
27784 {
27785@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27786 {
27787 switch (action) {
27788 case CPU_DYING:
27789- atomic_inc(&ap_wfs_count);
27790+ atomic_inc_unchecked(&ap_wfs_count);
27791 if (num_online_cpus() == 1)
27792- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27793+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27794 return NOTIFY_BAD;
27795 break;
27796 }
27797@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27798
27799 tboot_create_trampoline();
27800
27801- atomic_set(&ap_wfs_count, 0);
27802+ atomic_set_unchecked(&ap_wfs_count, 0);
27803 register_hotcpu_notifier(&tboot_cpu_notifier);
27804
27805 #ifdef CONFIG_DEBUG_FS
27806diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27807index 25adc0e..1df4349 100644
27808--- a/arch/x86/kernel/time.c
27809+++ b/arch/x86/kernel/time.c
27810@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27811 {
27812 unsigned long pc = instruction_pointer(regs);
27813
27814- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27815+ if (!user_mode(regs) && in_lock_functions(pc)) {
27816 #ifdef CONFIG_FRAME_POINTER
27817- return *(unsigned long *)(regs->bp + sizeof(long));
27818+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27819 #else
27820 unsigned long *sp =
27821 (unsigned long *)kernel_stack_pointer(regs);
27822@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27823 * or above a saved flags. Eflags has bits 22-31 zero,
27824 * kernel addresses don't.
27825 */
27826+
27827+#ifdef CONFIG_PAX_KERNEXEC
27828+ return ktla_ktva(sp[0]);
27829+#else
27830 if (sp[0] >> 22)
27831 return sp[0];
27832 if (sp[1] >> 22)
27833 return sp[1];
27834 #endif
27835+
27836+#endif
27837 }
27838 return pc;
27839 }
27840diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27841index 7fc5e84..c6e445a 100644
27842--- a/arch/x86/kernel/tls.c
27843+++ b/arch/x86/kernel/tls.c
27844@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27845 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27846 return -EINVAL;
27847
27848+#ifdef CONFIG_PAX_SEGMEXEC
27849+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27850+ return -EINVAL;
27851+#endif
27852+
27853 set_tls_desc(p, idx, &info, 1);
27854
27855 return 0;
27856@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27857
27858 if (kbuf)
27859 info = kbuf;
27860- else if (__copy_from_user(infobuf, ubuf, count))
27861+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27862 return -EFAULT;
27863 else
27864 info = infobuf;
27865diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27866index 1c113db..287b42e 100644
27867--- a/arch/x86/kernel/tracepoint.c
27868+++ b/arch/x86/kernel/tracepoint.c
27869@@ -9,11 +9,11 @@
27870 #include <linux/atomic.h>
27871
27872 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27873-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27874+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27875 (unsigned long) trace_idt_table };
27876
27877 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27878-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27879+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27880
27881 static int trace_irq_vector_refcount;
27882 static DEFINE_MUTEX(irq_vector_mutex);
27883diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27884index 89f4e64..aa4149d 100644
27885--- a/arch/x86/kernel/traps.c
27886+++ b/arch/x86/kernel/traps.c
27887@@ -68,7 +68,7 @@
27888 #include <asm/proto.h>
27889
27890 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27891-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27892+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27893 #else
27894 #include <asm/processor-flags.h>
27895 #include <asm/setup.h>
27896@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27897 #endif
27898
27899 /* Must be page-aligned because the real IDT is used in a fixmap. */
27900-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27901+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27902
27903 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27904 EXPORT_SYMBOL_GPL(used_vectors);
27905@@ -109,11 +109,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
27906 }
27907
27908 static nokprobe_inline int
27909-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27910+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27911 struct pt_regs *regs, long error_code)
27912 {
27913 #ifdef CONFIG_X86_32
27914- if (regs->flags & X86_VM_MASK) {
27915+ if (v8086_mode(regs)) {
27916 /*
27917 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27918 * On nmi (interrupt 2), do_trap should not be called.
27919@@ -126,12 +126,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27920 return -1;
27921 }
27922 #endif
27923- if (!user_mode(regs)) {
27924+ if (!user_mode_novm(regs)) {
27925 if (!fixup_exception(regs)) {
27926 tsk->thread.error_code = error_code;
27927 tsk->thread.trap_nr = trapnr;
27928+
27929+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27930+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27931+ str = "PAX: suspicious stack segment fault";
27932+#endif
27933+
27934 die(str, regs, error_code);
27935 }
27936+
27937+#ifdef CONFIG_PAX_REFCOUNT
27938+ if (trapnr == X86_TRAP_OF)
27939+ pax_report_refcount_overflow(regs);
27940+#endif
27941+
27942 return 0;
27943 }
27944
27945@@ -170,7 +182,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
27946 }
27947
27948 static void
27949-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27950+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27951 long error_code, siginfo_t *info)
27952 {
27953 struct task_struct *tsk = current;
27954@@ -194,7 +206,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27955 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27956 printk_ratelimit()) {
27957 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27958- tsk->comm, tsk->pid, str,
27959+ tsk->comm, task_pid_nr(tsk), str,
27960 regs->ip, regs->sp, error_code);
27961 print_vma_addr(" in ", regs->ip);
27962 pr_cont("\n");
27963@@ -274,6 +286,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27964 tsk->thread.error_code = error_code;
27965 tsk->thread.trap_nr = X86_TRAP_DF;
27966
27967+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27968+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
27969+ die("grsec: kernel stack overflow detected", regs, error_code);
27970+#endif
27971+
27972 #ifdef CONFIG_DOUBLEFAULT
27973 df_debug(regs, error_code);
27974 #endif
27975@@ -300,7 +317,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
27976 goto exit;
27977 conditional_sti(regs);
27978
27979- if (!user_mode_vm(regs))
27980+ if (!user_mode(regs))
27981 die("bounds", regs, error_code);
27982
27983 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
27984@@ -379,7 +396,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27985 conditional_sti(regs);
27986
27987 #ifdef CONFIG_X86_32
27988- if (regs->flags & X86_VM_MASK) {
27989+ if (v8086_mode(regs)) {
27990 local_irq_enable();
27991 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
27992 goto exit;
27993@@ -387,18 +404,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
27994 #endif
27995
27996 tsk = current;
27997- if (!user_mode(regs)) {
27998+ if (!user_mode_novm(regs)) {
27999 if (fixup_exception(regs))
28000 goto exit;
28001
28002 tsk->thread.error_code = error_code;
28003 tsk->thread.trap_nr = X86_TRAP_GP;
28004 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28005- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28006+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28007+
28008+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28009+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28010+ die("PAX: suspicious general protection fault", regs, error_code);
28011+ else
28012+#endif
28013+
28014 die("general protection fault", regs, error_code);
28015+ }
28016 goto exit;
28017 }
28018
28019+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28020+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28021+ struct mm_struct *mm = tsk->mm;
28022+ unsigned long limit;
28023+
28024+ down_write(&mm->mmap_sem);
28025+ limit = mm->context.user_cs_limit;
28026+ if (limit < TASK_SIZE) {
28027+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28028+ up_write(&mm->mmap_sem);
28029+ return;
28030+ }
28031+ up_write(&mm->mmap_sem);
28032+ }
28033+#endif
28034+
28035 tsk->thread.error_code = error_code;
28036 tsk->thread.trap_nr = X86_TRAP_GP;
28037
28038@@ -510,13 +551,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28039 container_of(task_pt_regs(current),
28040 struct bad_iret_stack, regs);
28041
28042+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28043+ new_stack = s;
28044+
28045 /* Copy the IRET target to the new stack. */
28046 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28047
28048 /* Copy the remainder of the stack from the current stack. */
28049 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28050
28051- BUG_ON(!user_mode_vm(&new_stack->regs));
28052+ BUG_ON(!user_mode(&new_stack->regs));
28053 return new_stack;
28054 }
28055 NOKPROBE_SYMBOL(fixup_bad_iret);
28056@@ -566,7 +610,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28057 * then it's very likely the result of an icebp/int01 trap.
28058 * User wants a sigtrap for that.
28059 */
28060- if (!dr6 && user_mode_vm(regs))
28061+ if (!dr6 && user_mode(regs))
28062 user_icebp = 1;
28063
28064 /* Catch kmemcheck conditions first of all! */
28065@@ -602,7 +646,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28066 /* It's safe to allow irq's after DR6 has been saved */
28067 preempt_conditional_sti(regs);
28068
28069- if (regs->flags & X86_VM_MASK) {
28070+ if (v8086_mode(regs)) {
28071 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28072 X86_TRAP_DB);
28073 preempt_conditional_cli(regs);
28074@@ -617,7 +661,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28075 * We already checked v86 mode above, so we can check for kernel mode
28076 * by just checking the CPL of CS.
28077 */
28078- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28079+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28080 tsk->thread.debugreg6 &= ~DR_STEP;
28081 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28082 regs->flags &= ~X86_EFLAGS_TF;
28083@@ -650,7 +694,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28084 return;
28085 conditional_sti(regs);
28086
28087- if (!user_mode_vm(regs))
28088+ if (!user_mode(regs))
28089 {
28090 if (!fixup_exception(regs)) {
28091 task->thread.error_code = error_code;
28092diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28093index 5054497..139f8f8 100644
28094--- a/arch/x86/kernel/tsc.c
28095+++ b/arch/x86/kernel/tsc.c
28096@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28097 */
28098 smp_wmb();
28099
28100- ACCESS_ONCE(c2n->head) = data;
28101+ ACCESS_ONCE_RW(c2n->head) = data;
28102 }
28103
28104 /*
28105diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28106index 8b96a94..792b410 100644
28107--- a/arch/x86/kernel/uprobes.c
28108+++ b/arch/x86/kernel/uprobes.c
28109@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28110 int ret = NOTIFY_DONE;
28111
28112 /* We are only interested in userspace traps */
28113- if (regs && !user_mode_vm(regs))
28114+ if (regs && !user_mode(regs))
28115 return NOTIFY_DONE;
28116
28117 switch (val) {
28118@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28119
28120 if (nleft != rasize) {
28121 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28122- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28123+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28124
28125 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28126 }
28127diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28128index b9242ba..50c5edd 100644
28129--- a/arch/x86/kernel/verify_cpu.S
28130+++ b/arch/x86/kernel/verify_cpu.S
28131@@ -20,6 +20,7 @@
28132 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28133 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28134 * arch/x86/kernel/head_32.S: processor startup
28135+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28136 *
28137 * verify_cpu, returns the status of longmode and SSE in register %eax.
28138 * 0: Success 1: Failure
28139diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28140index e8edcf5..27f9344 100644
28141--- a/arch/x86/kernel/vm86_32.c
28142+++ b/arch/x86/kernel/vm86_32.c
28143@@ -44,6 +44,7 @@
28144 #include <linux/ptrace.h>
28145 #include <linux/audit.h>
28146 #include <linux/stddef.h>
28147+#include <linux/grsecurity.h>
28148
28149 #include <asm/uaccess.h>
28150 #include <asm/io.h>
28151@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28152 do_exit(SIGSEGV);
28153 }
28154
28155- tss = &per_cpu(init_tss, get_cpu());
28156+ tss = init_tss + get_cpu();
28157 current->thread.sp0 = current->thread.saved_sp0;
28158 current->thread.sysenter_cs = __KERNEL_CS;
28159 load_sp0(tss, &current->thread);
28160@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28161
28162 if (tsk->thread.saved_sp0)
28163 return -EPERM;
28164+
28165+#ifdef CONFIG_GRKERNSEC_VM86
28166+ if (!capable(CAP_SYS_RAWIO)) {
28167+ gr_handle_vm86();
28168+ return -EPERM;
28169+ }
28170+#endif
28171+
28172 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28173 offsetof(struct kernel_vm86_struct, vm86plus) -
28174 sizeof(info.regs));
28175@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28176 int tmp;
28177 struct vm86plus_struct __user *v86;
28178
28179+#ifdef CONFIG_GRKERNSEC_VM86
28180+ if (!capable(CAP_SYS_RAWIO)) {
28181+ gr_handle_vm86();
28182+ return -EPERM;
28183+ }
28184+#endif
28185+
28186 tsk = current;
28187 switch (cmd) {
28188 case VM86_REQUEST_IRQ:
28189@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28190 tsk->thread.saved_fs = info->regs32->fs;
28191 tsk->thread.saved_gs = get_user_gs(info->regs32);
28192
28193- tss = &per_cpu(init_tss, get_cpu());
28194+ tss = init_tss + get_cpu();
28195 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28196 if (cpu_has_sep)
28197 tsk->thread.sysenter_cs = 0;
28198@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28199 goto cannot_handle;
28200 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28201 goto cannot_handle;
28202- intr_ptr = (unsigned long __user *) (i << 2);
28203+ intr_ptr = (__force unsigned long __user *) (i << 2);
28204 if (get_user(segoffs, intr_ptr))
28205 goto cannot_handle;
28206 if ((segoffs >> 16) == BIOSSEG)
28207diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28208index 00bf300..129df8e 100644
28209--- a/arch/x86/kernel/vmlinux.lds.S
28210+++ b/arch/x86/kernel/vmlinux.lds.S
28211@@ -26,6 +26,13 @@
28212 #include <asm/page_types.h>
28213 #include <asm/cache.h>
28214 #include <asm/boot.h>
28215+#include <asm/segment.h>
28216+
28217+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28218+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28219+#else
28220+#define __KERNEL_TEXT_OFFSET 0
28221+#endif
28222
28223 #undef i386 /* in case the preprocessor is a 32bit one */
28224
28225@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28226
28227 PHDRS {
28228 text PT_LOAD FLAGS(5); /* R_E */
28229+#ifdef CONFIG_X86_32
28230+ module PT_LOAD FLAGS(5); /* R_E */
28231+#endif
28232+#ifdef CONFIG_XEN
28233+ rodata PT_LOAD FLAGS(5); /* R_E */
28234+#else
28235+ rodata PT_LOAD FLAGS(4); /* R__ */
28236+#endif
28237 data PT_LOAD FLAGS(6); /* RW_ */
28238-#ifdef CONFIG_X86_64
28239+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28240 #ifdef CONFIG_SMP
28241 percpu PT_LOAD FLAGS(6); /* RW_ */
28242 #endif
28243+ text.init PT_LOAD FLAGS(5); /* R_E */
28244+ text.exit PT_LOAD FLAGS(5); /* R_E */
28245 init PT_LOAD FLAGS(7); /* RWE */
28246-#endif
28247 note PT_NOTE FLAGS(0); /* ___ */
28248 }
28249
28250 SECTIONS
28251 {
28252 #ifdef CONFIG_X86_32
28253- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28254- phys_startup_32 = startup_32 - LOAD_OFFSET;
28255+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28256 #else
28257- . = __START_KERNEL;
28258- phys_startup_64 = startup_64 - LOAD_OFFSET;
28259+ . = __START_KERNEL;
28260 #endif
28261
28262 /* Text and read-only data */
28263- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28264- _text = .;
28265+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28266 /* bootstrapping code */
28267+#ifdef CONFIG_X86_32
28268+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28269+#else
28270+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28271+#endif
28272+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28273+ _text = .;
28274 HEAD_TEXT
28275 . = ALIGN(8);
28276 _stext = .;
28277@@ -104,13 +124,47 @@ SECTIONS
28278 IRQENTRY_TEXT
28279 *(.fixup)
28280 *(.gnu.warning)
28281- /* End of text section */
28282- _etext = .;
28283 } :text = 0x9090
28284
28285- NOTES :text :note
28286+ . += __KERNEL_TEXT_OFFSET;
28287
28288- EXCEPTION_TABLE(16) :text = 0x9090
28289+#ifdef CONFIG_X86_32
28290+ . = ALIGN(PAGE_SIZE);
28291+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28292+
28293+#ifdef CONFIG_PAX_KERNEXEC
28294+ MODULES_EXEC_VADDR = .;
28295+ BYTE(0)
28296+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28297+ . = ALIGN(HPAGE_SIZE) - 1;
28298+ MODULES_EXEC_END = .;
28299+#endif
28300+
28301+ } :module
28302+#endif
28303+
28304+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28305+ /* End of text section */
28306+ BYTE(0)
28307+ _etext = . - __KERNEL_TEXT_OFFSET;
28308+ }
28309+
28310+#ifdef CONFIG_X86_32
28311+ . = ALIGN(PAGE_SIZE);
28312+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28313+ . = ALIGN(PAGE_SIZE);
28314+ *(.empty_zero_page)
28315+ *(.initial_pg_fixmap)
28316+ *(.initial_pg_pmd)
28317+ *(.initial_page_table)
28318+ *(.swapper_pg_dir)
28319+ } :rodata
28320+#endif
28321+
28322+ . = ALIGN(PAGE_SIZE);
28323+ NOTES :rodata :note
28324+
28325+ EXCEPTION_TABLE(16) :rodata
28326
28327 #if defined(CONFIG_DEBUG_RODATA)
28328 /* .text should occupy whole number of pages */
28329@@ -122,16 +176,20 @@ SECTIONS
28330
28331 /* Data */
28332 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28333+
28334+#ifdef CONFIG_PAX_KERNEXEC
28335+ . = ALIGN(HPAGE_SIZE);
28336+#else
28337+ . = ALIGN(PAGE_SIZE);
28338+#endif
28339+
28340 /* Start of data section */
28341 _sdata = .;
28342
28343 /* init_task */
28344 INIT_TASK_DATA(THREAD_SIZE)
28345
28346-#ifdef CONFIG_X86_32
28347- /* 32 bit has nosave before _edata */
28348 NOSAVE_DATA
28349-#endif
28350
28351 PAGE_ALIGNED_DATA(PAGE_SIZE)
28352
28353@@ -174,12 +232,19 @@ SECTIONS
28354 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28355
28356 /* Init code and data - will be freed after init */
28357- . = ALIGN(PAGE_SIZE);
28358 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28359+ BYTE(0)
28360+
28361+#ifdef CONFIG_PAX_KERNEXEC
28362+ . = ALIGN(HPAGE_SIZE);
28363+#else
28364+ . = ALIGN(PAGE_SIZE);
28365+#endif
28366+
28367 __init_begin = .; /* paired with __init_end */
28368- }
28369+ } :init.begin
28370
28371-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28372+#ifdef CONFIG_SMP
28373 /*
28374 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28375 * output PHDR, so the next output section - .init.text - should
28376@@ -190,12 +255,27 @@ SECTIONS
28377 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28378 #endif
28379
28380- INIT_TEXT_SECTION(PAGE_SIZE)
28381-#ifdef CONFIG_X86_64
28382- :init
28383-#endif
28384+ . = ALIGN(PAGE_SIZE);
28385+ init_begin = .;
28386+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28387+ VMLINUX_SYMBOL(_sinittext) = .;
28388+ INIT_TEXT
28389+ . = ALIGN(PAGE_SIZE);
28390+ } :text.init
28391
28392- INIT_DATA_SECTION(16)
28393+ /*
28394+ * .exit.text is discard at runtime, not link time, to deal with
28395+ * references from .altinstructions and .eh_frame
28396+ */
28397+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28398+ EXIT_TEXT
28399+ VMLINUX_SYMBOL(_einittext) = .;
28400+ . = ALIGN(16);
28401+ } :text.exit
28402+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28403+
28404+ . = ALIGN(PAGE_SIZE);
28405+ INIT_DATA_SECTION(16) :init
28406
28407 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28408 __x86_cpu_dev_start = .;
28409@@ -266,19 +346,12 @@ SECTIONS
28410 }
28411
28412 . = ALIGN(8);
28413- /*
28414- * .exit.text is discard at runtime, not link time, to deal with
28415- * references from .altinstructions and .eh_frame
28416- */
28417- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28418- EXIT_TEXT
28419- }
28420
28421 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28422 EXIT_DATA
28423 }
28424
28425-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28426+#ifndef CONFIG_SMP
28427 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28428 #endif
28429
28430@@ -297,16 +370,10 @@ SECTIONS
28431 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28432 __smp_locks = .;
28433 *(.smp_locks)
28434- . = ALIGN(PAGE_SIZE);
28435 __smp_locks_end = .;
28436+ . = ALIGN(PAGE_SIZE);
28437 }
28438
28439-#ifdef CONFIG_X86_64
28440- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28441- NOSAVE_DATA
28442- }
28443-#endif
28444-
28445 /* BSS */
28446 . = ALIGN(PAGE_SIZE);
28447 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28448@@ -322,6 +389,7 @@ SECTIONS
28449 __brk_base = .;
28450 . += 64 * 1024; /* 64k alignment slop space */
28451 *(.brk_reservation) /* areas brk users have reserved */
28452+ . = ALIGN(HPAGE_SIZE);
28453 __brk_limit = .;
28454 }
28455
28456@@ -348,13 +416,12 @@ SECTIONS
28457 * for the boot processor.
28458 */
28459 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28460-INIT_PER_CPU(gdt_page);
28461 INIT_PER_CPU(irq_stack_union);
28462
28463 /*
28464 * Build-time check on the image size:
28465 */
28466-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28467+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28468 "kernel image bigger than KERNEL_IMAGE_SIZE");
28469
28470 #ifdef CONFIG_SMP
28471diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28472index 2dcc6ff..082dc7a 100644
28473--- a/arch/x86/kernel/vsyscall_64.c
28474+++ b/arch/x86/kernel/vsyscall_64.c
28475@@ -38,15 +38,13 @@
28476 #define CREATE_TRACE_POINTS
28477 #include "vsyscall_trace.h"
28478
28479-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28480+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28481
28482 static int __init vsyscall_setup(char *str)
28483 {
28484 if (str) {
28485 if (!strcmp("emulate", str))
28486 vsyscall_mode = EMULATE;
28487- else if (!strcmp("native", str))
28488- vsyscall_mode = NATIVE;
28489 else if (!strcmp("none", str))
28490 vsyscall_mode = NONE;
28491 else
28492@@ -264,8 +262,7 @@ do_ret:
28493 return true;
28494
28495 sigsegv:
28496- force_sig(SIGSEGV, current);
28497- return true;
28498+ do_group_exit(SIGKILL);
28499 }
28500
28501 /*
28502@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28503 static struct vm_area_struct gate_vma = {
28504 .vm_start = VSYSCALL_ADDR,
28505 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28506- .vm_page_prot = PAGE_READONLY_EXEC,
28507- .vm_flags = VM_READ | VM_EXEC,
28508+ .vm_page_prot = PAGE_READONLY,
28509+ .vm_flags = VM_READ,
28510 .vm_ops = &gate_vma_ops,
28511 };
28512
28513@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28514 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28515
28516 if (vsyscall_mode != NONE)
28517- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28518- vsyscall_mode == NATIVE
28519- ? PAGE_KERNEL_VSYSCALL
28520- : PAGE_KERNEL_VVAR);
28521+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28522
28523 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28524 (unsigned long)VSYSCALL_ADDR);
28525diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28526index 04068192..4d75aa6 100644
28527--- a/arch/x86/kernel/x8664_ksyms_64.c
28528+++ b/arch/x86/kernel/x8664_ksyms_64.c
28529@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28530 EXPORT_SYMBOL(copy_user_generic_unrolled);
28531 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28532 EXPORT_SYMBOL(__copy_user_nocache);
28533-EXPORT_SYMBOL(_copy_from_user);
28534-EXPORT_SYMBOL(_copy_to_user);
28535
28536 EXPORT_SYMBOL(copy_page);
28537 EXPORT_SYMBOL(clear_page);
28538@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28539 EXPORT_SYMBOL(___preempt_schedule_context);
28540 #endif
28541 #endif
28542+
28543+#ifdef CONFIG_PAX_PER_CPU_PGD
28544+EXPORT_SYMBOL(cpu_pgd);
28545+#endif
28546diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28547index 234b072..b7ab191 100644
28548--- a/arch/x86/kernel/x86_init.c
28549+++ b/arch/x86/kernel/x86_init.c
28550@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28551 static void default_nmi_init(void) { };
28552 static int default_i8042_detect(void) { return 1; };
28553
28554-struct x86_platform_ops x86_platform = {
28555+struct x86_platform_ops x86_platform __read_only = {
28556 .calibrate_tsc = native_calibrate_tsc,
28557 .get_wallclock = mach_get_cmos_time,
28558 .set_wallclock = mach_set_rtc_mmss,
28559@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28560 EXPORT_SYMBOL_GPL(x86_platform);
28561
28562 #if defined(CONFIG_PCI_MSI)
28563-struct x86_msi_ops x86_msi = {
28564+struct x86_msi_ops x86_msi __read_only = {
28565 .setup_msi_irqs = native_setup_msi_irqs,
28566 .compose_msi_msg = native_compose_msi_msg,
28567 .teardown_msi_irq = native_teardown_msi_irq,
28568@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28569 }
28570 #endif
28571
28572-struct x86_io_apic_ops x86_io_apic_ops = {
28573+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28574 .init = native_io_apic_init_mappings,
28575 .read = native_io_apic_read,
28576 .write = native_io_apic_write,
28577diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28578index 8be1e17..07dd990 100644
28579--- a/arch/x86/kernel/xsave.c
28580+++ b/arch/x86/kernel/xsave.c
28581@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28582
28583 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28584 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28585- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28586+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28587
28588 if (!use_xsave())
28589 return err;
28590
28591- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28592+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28593
28594 /*
28595 * Read the xstate_bv which we copied (directly from the cpu or
28596 * from the state in task struct) to the user buffers.
28597 */
28598- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28599+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28600
28601 /*
28602 * For legacy compatible, we always set FP/SSE bits in the bit
28603@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28604 */
28605 xstate_bv |= XSTATE_FPSSE;
28606
28607- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28608+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28609
28610 return err;
28611 }
28612@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28613 {
28614 int err;
28615
28616+ buf = (struct xsave_struct __user *)____m(buf);
28617 if (use_xsave())
28618 err = xsave_user(buf);
28619 else if (use_fxsr())
28620@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28621 */
28622 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28623 {
28624+ buf = (void __user *)____m(buf);
28625 if (use_xsave()) {
28626 if ((unsigned long)buf % 64 || fx_only) {
28627 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28628diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28629index 8a80737..bac4961 100644
28630--- a/arch/x86/kvm/cpuid.c
28631+++ b/arch/x86/kvm/cpuid.c
28632@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28633 struct kvm_cpuid2 *cpuid,
28634 struct kvm_cpuid_entry2 __user *entries)
28635 {
28636- int r;
28637+ int r, i;
28638
28639 r = -E2BIG;
28640 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28641 goto out;
28642 r = -EFAULT;
28643- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28644- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28645+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28646 goto out;
28647+ for (i = 0; i < cpuid->nent; ++i) {
28648+ struct kvm_cpuid_entry2 cpuid_entry;
28649+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28650+ goto out;
28651+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28652+ }
28653 vcpu->arch.cpuid_nent = cpuid->nent;
28654 kvm_apic_set_version(vcpu);
28655 kvm_x86_ops->cpuid_update(vcpu);
28656@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28657 struct kvm_cpuid2 *cpuid,
28658 struct kvm_cpuid_entry2 __user *entries)
28659 {
28660- int r;
28661+ int r, i;
28662
28663 r = -E2BIG;
28664 if (cpuid->nent < vcpu->arch.cpuid_nent)
28665 goto out;
28666 r = -EFAULT;
28667- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28668- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28669+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28670 goto out;
28671+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28672+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28673+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28674+ goto out;
28675+ }
28676 return 0;
28677
28678 out:
28679diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28680index b24c2d8..e1e4e259 100644
28681--- a/arch/x86/kvm/emulate.c
28682+++ b/arch/x86/kvm/emulate.c
28683@@ -3503,7 +3503,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28684 int cr = ctxt->modrm_reg;
28685 u64 efer = 0;
28686
28687- static u64 cr_reserved_bits[] = {
28688+ static const u64 cr_reserved_bits[] = {
28689 0xffffffff00000000ULL,
28690 0, 0, 0, /* CR3 checked later */
28691 CR4_RESERVED_BITS,
28692diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28693index d52dcf0..cec7e84 100644
28694--- a/arch/x86/kvm/lapic.c
28695+++ b/arch/x86/kvm/lapic.c
28696@@ -55,7 +55,7 @@
28697 #define APIC_BUS_CYCLE_NS 1
28698
28699 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28700-#define apic_debug(fmt, arg...)
28701+#define apic_debug(fmt, arg...) do {} while (0)
28702
28703 #define APIC_LVT_NUM 6
28704 /* 14 is the version for Xeon and Pentium 8.4.8*/
28705diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28706index fd49c86..77e1aa0 100644
28707--- a/arch/x86/kvm/paging_tmpl.h
28708+++ b/arch/x86/kvm/paging_tmpl.h
28709@@ -343,7 +343,7 @@ retry_walk:
28710 if (unlikely(kvm_is_error_hva(host_addr)))
28711 goto error;
28712
28713- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28714+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28715 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28716 goto error;
28717 walker->ptep_user[walker->level - 1] = ptep_user;
28718diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28719index 41dd038..de331cf 100644
28720--- a/arch/x86/kvm/svm.c
28721+++ b/arch/x86/kvm/svm.c
28722@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28723 int cpu = raw_smp_processor_id();
28724
28725 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28726+
28727+ pax_open_kernel();
28728 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28729+ pax_close_kernel();
28730+
28731 load_TR_desc();
28732 }
28733
28734@@ -3969,6 +3973,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28735 #endif
28736 #endif
28737
28738+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28739+ __set_fs(current_thread_info()->addr_limit);
28740+#endif
28741+
28742 reload_tss(vcpu);
28743
28744 local_irq_disable();
28745diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28746index d4c58d8..eaf2568 100644
28747--- a/arch/x86/kvm/vmx.c
28748+++ b/arch/x86/kvm/vmx.c
28749@@ -1380,12 +1380,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28750 #endif
28751 }
28752
28753-static void vmcs_clear_bits(unsigned long field, u32 mask)
28754+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28755 {
28756 vmcs_writel(field, vmcs_readl(field) & ~mask);
28757 }
28758
28759-static void vmcs_set_bits(unsigned long field, u32 mask)
28760+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28761 {
28762 vmcs_writel(field, vmcs_readl(field) | mask);
28763 }
28764@@ -1645,7 +1645,11 @@ static void reload_tss(void)
28765 struct desc_struct *descs;
28766
28767 descs = (void *)gdt->address;
28768+
28769+ pax_open_kernel();
28770 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28771+ pax_close_kernel();
28772+
28773 load_TR_desc();
28774 }
28775
28776@@ -1881,6 +1885,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28777 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28778 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28779
28780+#ifdef CONFIG_PAX_PER_CPU_PGD
28781+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28782+#endif
28783+
28784 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28785 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28786 vmx->loaded_vmcs->cpu = cpu;
28787@@ -2170,7 +2178,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28788 * reads and returns guest's timestamp counter "register"
28789 * guest_tsc = host_tsc + tsc_offset -- 21.3
28790 */
28791-static u64 guest_read_tsc(void)
28792+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28793 {
28794 u64 host_tsc, tsc_offset;
28795
28796@@ -4252,7 +4260,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28797 unsigned long cr4;
28798
28799 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28800+
28801+#ifndef CONFIG_PAX_PER_CPU_PGD
28802 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28803+#endif
28804
28805 /* Save the most likely value for this task's CR4 in the VMCS. */
28806 cr4 = read_cr4();
28807@@ -4279,7 +4290,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28808 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28809 vmx->host_idt_base = dt.address;
28810
28811- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28812+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28813
28814 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28815 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28816@@ -5876,11 +5887,16 @@ static __init int hardware_setup(void)
28817 * page upon invalidation. No need to do anything if the
28818 * processor does not have the APIC_ACCESS_ADDR VMCS field.
28819 */
28820- kvm_x86_ops->set_apic_access_page_addr = NULL;
28821+ pax_open_kernel();
28822+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28823+ pax_close_kernel();
28824 }
28825
28826- if (!cpu_has_vmx_tpr_shadow())
28827- kvm_x86_ops->update_cr8_intercept = NULL;
28828+ if (!cpu_has_vmx_tpr_shadow()) {
28829+ pax_open_kernel();
28830+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28831+ pax_close_kernel();
28832+ }
28833
28834 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28835 kvm_disable_largepages();
28836@@ -5891,13 +5907,15 @@ static __init int hardware_setup(void)
28837 if (!cpu_has_vmx_apicv())
28838 enable_apicv = 0;
28839
28840+ pax_open_kernel();
28841 if (enable_apicv)
28842- kvm_x86_ops->update_cr8_intercept = NULL;
28843+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28844 else {
28845- kvm_x86_ops->hwapic_irr_update = NULL;
28846- kvm_x86_ops->deliver_posted_interrupt = NULL;
28847- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28848+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28849+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28850+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28851 }
28852+ pax_close_kernel();
28853
28854 if (nested)
28855 nested_vmx_setup_ctls_msrs();
28856@@ -7846,6 +7864,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28857 "jmp 2f \n\t"
28858 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28859 "2: "
28860+
28861+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28862+ "ljmp %[cs],$3f\n\t"
28863+ "3: "
28864+#endif
28865+
28866 /* Save guest registers, load host registers, keep flags */
28867 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28868 "pop %0 \n\t"
28869@@ -7898,6 +7922,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28870 #endif
28871 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28872 [wordsize]"i"(sizeof(ulong))
28873+
28874+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28875+ ,[cs]"i"(__KERNEL_CS)
28876+#endif
28877+
28878 : "cc", "memory"
28879 #ifdef CONFIG_X86_64
28880 , "rax", "rbx", "rdi", "rsi"
28881@@ -7911,7 +7940,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28882 if (debugctlmsr)
28883 update_debugctlmsr(debugctlmsr);
28884
28885-#ifndef CONFIG_X86_64
28886+#ifdef CONFIG_X86_32
28887 /*
28888 * The sysexit path does not restore ds/es, so we must set them to
28889 * a reasonable value ourselves.
28890@@ -7920,8 +7949,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28891 * may be executed in interrupt context, which saves and restore segments
28892 * around it, nullifying its effect.
28893 */
28894- loadsegment(ds, __USER_DS);
28895- loadsegment(es, __USER_DS);
28896+ loadsegment(ds, __KERNEL_DS);
28897+ loadsegment(es, __KERNEL_DS);
28898+ loadsegment(ss, __KERNEL_DS);
28899+
28900+#ifdef CONFIG_PAX_KERNEXEC
28901+ loadsegment(fs, __KERNEL_PERCPU);
28902+#endif
28903+
28904+#ifdef CONFIG_PAX_MEMORY_UDEREF
28905+ __set_fs(current_thread_info()->addr_limit);
28906+#endif
28907+
28908 #endif
28909
28910 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28911diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28912index 64d76c1..e20a4c1 100644
28913--- a/arch/x86/kvm/x86.c
28914+++ b/arch/x86/kvm/x86.c
28915@@ -1882,8 +1882,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28916 {
28917 struct kvm *kvm = vcpu->kvm;
28918 int lm = is_long_mode(vcpu);
28919- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28920- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28921+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28922+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28923 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28924 : kvm->arch.xen_hvm_config.blob_size_32;
28925 u32 page_num = data & ~PAGE_MASK;
28926@@ -2809,6 +2809,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28927 if (n < msr_list.nmsrs)
28928 goto out;
28929 r = -EFAULT;
28930+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28931+ goto out;
28932 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28933 num_msrs_to_save * sizeof(u32)))
28934 goto out;
28935@@ -5745,7 +5747,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28936 };
28937 #endif
28938
28939-int kvm_arch_init(void *opaque)
28940+int kvm_arch_init(const void *opaque)
28941 {
28942 int r;
28943 struct kvm_x86_ops *ops = opaque;
28944diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28945index c1c1544..f90c9d5 100644
28946--- a/arch/x86/lguest/boot.c
28947+++ b/arch/x86/lguest/boot.c
28948@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28949 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28950 * Launcher to reboot us.
28951 */
28952-static void lguest_restart(char *reason)
28953+static __noreturn void lguest_restart(char *reason)
28954 {
28955 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28956+ BUG();
28957 }
28958
28959 /*G:050
28960diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28961index 00933d5..3a64af9 100644
28962--- a/arch/x86/lib/atomic64_386_32.S
28963+++ b/arch/x86/lib/atomic64_386_32.S
28964@@ -48,6 +48,10 @@ BEGIN(read)
28965 movl (v), %eax
28966 movl 4(v), %edx
28967 RET_ENDP
28968+BEGIN(read_unchecked)
28969+ movl (v), %eax
28970+ movl 4(v), %edx
28971+RET_ENDP
28972 #undef v
28973
28974 #define v %esi
28975@@ -55,6 +59,10 @@ BEGIN(set)
28976 movl %ebx, (v)
28977 movl %ecx, 4(v)
28978 RET_ENDP
28979+BEGIN(set_unchecked)
28980+ movl %ebx, (v)
28981+ movl %ecx, 4(v)
28982+RET_ENDP
28983 #undef v
28984
28985 #define v %esi
28986@@ -70,6 +78,20 @@ RET_ENDP
28987 BEGIN(add)
28988 addl %eax, (v)
28989 adcl %edx, 4(v)
28990+
28991+#ifdef CONFIG_PAX_REFCOUNT
28992+ jno 0f
28993+ subl %eax, (v)
28994+ sbbl %edx, 4(v)
28995+ int $4
28996+0:
28997+ _ASM_EXTABLE(0b, 0b)
28998+#endif
28999+
29000+RET_ENDP
29001+BEGIN(add_unchecked)
29002+ addl %eax, (v)
29003+ adcl %edx, 4(v)
29004 RET_ENDP
29005 #undef v
29006
29007@@ -77,6 +99,24 @@ RET_ENDP
29008 BEGIN(add_return)
29009 addl (v), %eax
29010 adcl 4(v), %edx
29011+
29012+#ifdef CONFIG_PAX_REFCOUNT
29013+ into
29014+1234:
29015+ _ASM_EXTABLE(1234b, 2f)
29016+#endif
29017+
29018+ movl %eax, (v)
29019+ movl %edx, 4(v)
29020+
29021+#ifdef CONFIG_PAX_REFCOUNT
29022+2:
29023+#endif
29024+
29025+RET_ENDP
29026+BEGIN(add_return_unchecked)
29027+ addl (v), %eax
29028+ adcl 4(v), %edx
29029 movl %eax, (v)
29030 movl %edx, 4(v)
29031 RET_ENDP
29032@@ -86,6 +126,20 @@ RET_ENDP
29033 BEGIN(sub)
29034 subl %eax, (v)
29035 sbbl %edx, 4(v)
29036+
29037+#ifdef CONFIG_PAX_REFCOUNT
29038+ jno 0f
29039+ addl %eax, (v)
29040+ adcl %edx, 4(v)
29041+ int $4
29042+0:
29043+ _ASM_EXTABLE(0b, 0b)
29044+#endif
29045+
29046+RET_ENDP
29047+BEGIN(sub_unchecked)
29048+ subl %eax, (v)
29049+ sbbl %edx, 4(v)
29050 RET_ENDP
29051 #undef v
29052
29053@@ -96,6 +150,27 @@ BEGIN(sub_return)
29054 sbbl $0, %edx
29055 addl (v), %eax
29056 adcl 4(v), %edx
29057+
29058+#ifdef CONFIG_PAX_REFCOUNT
29059+ into
29060+1234:
29061+ _ASM_EXTABLE(1234b, 2f)
29062+#endif
29063+
29064+ movl %eax, (v)
29065+ movl %edx, 4(v)
29066+
29067+#ifdef CONFIG_PAX_REFCOUNT
29068+2:
29069+#endif
29070+
29071+RET_ENDP
29072+BEGIN(sub_return_unchecked)
29073+ negl %edx
29074+ negl %eax
29075+ sbbl $0, %edx
29076+ addl (v), %eax
29077+ adcl 4(v), %edx
29078 movl %eax, (v)
29079 movl %edx, 4(v)
29080 RET_ENDP
29081@@ -105,6 +180,20 @@ RET_ENDP
29082 BEGIN(inc)
29083 addl $1, (v)
29084 adcl $0, 4(v)
29085+
29086+#ifdef CONFIG_PAX_REFCOUNT
29087+ jno 0f
29088+ subl $1, (v)
29089+ sbbl $0, 4(v)
29090+ int $4
29091+0:
29092+ _ASM_EXTABLE(0b, 0b)
29093+#endif
29094+
29095+RET_ENDP
29096+BEGIN(inc_unchecked)
29097+ addl $1, (v)
29098+ adcl $0, 4(v)
29099 RET_ENDP
29100 #undef v
29101
29102@@ -114,6 +203,26 @@ BEGIN(inc_return)
29103 movl 4(v), %edx
29104 addl $1, %eax
29105 adcl $0, %edx
29106+
29107+#ifdef CONFIG_PAX_REFCOUNT
29108+ into
29109+1234:
29110+ _ASM_EXTABLE(1234b, 2f)
29111+#endif
29112+
29113+ movl %eax, (v)
29114+ movl %edx, 4(v)
29115+
29116+#ifdef CONFIG_PAX_REFCOUNT
29117+2:
29118+#endif
29119+
29120+RET_ENDP
29121+BEGIN(inc_return_unchecked)
29122+ movl (v), %eax
29123+ movl 4(v), %edx
29124+ addl $1, %eax
29125+ adcl $0, %edx
29126 movl %eax, (v)
29127 movl %edx, 4(v)
29128 RET_ENDP
29129@@ -123,6 +232,20 @@ RET_ENDP
29130 BEGIN(dec)
29131 subl $1, (v)
29132 sbbl $0, 4(v)
29133+
29134+#ifdef CONFIG_PAX_REFCOUNT
29135+ jno 0f
29136+ addl $1, (v)
29137+ adcl $0, 4(v)
29138+ int $4
29139+0:
29140+ _ASM_EXTABLE(0b, 0b)
29141+#endif
29142+
29143+RET_ENDP
29144+BEGIN(dec_unchecked)
29145+ subl $1, (v)
29146+ sbbl $0, 4(v)
29147 RET_ENDP
29148 #undef v
29149
29150@@ -132,6 +255,26 @@ BEGIN(dec_return)
29151 movl 4(v), %edx
29152 subl $1, %eax
29153 sbbl $0, %edx
29154+
29155+#ifdef CONFIG_PAX_REFCOUNT
29156+ into
29157+1234:
29158+ _ASM_EXTABLE(1234b, 2f)
29159+#endif
29160+
29161+ movl %eax, (v)
29162+ movl %edx, 4(v)
29163+
29164+#ifdef CONFIG_PAX_REFCOUNT
29165+2:
29166+#endif
29167+
29168+RET_ENDP
29169+BEGIN(dec_return_unchecked)
29170+ movl (v), %eax
29171+ movl 4(v), %edx
29172+ subl $1, %eax
29173+ sbbl $0, %edx
29174 movl %eax, (v)
29175 movl %edx, 4(v)
29176 RET_ENDP
29177@@ -143,6 +286,13 @@ BEGIN(add_unless)
29178 adcl %edx, %edi
29179 addl (v), %eax
29180 adcl 4(v), %edx
29181+
29182+#ifdef CONFIG_PAX_REFCOUNT
29183+ into
29184+1234:
29185+ _ASM_EXTABLE(1234b, 2f)
29186+#endif
29187+
29188 cmpl %eax, %ecx
29189 je 3f
29190 1:
29191@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29192 1:
29193 addl $1, %eax
29194 adcl $0, %edx
29195+
29196+#ifdef CONFIG_PAX_REFCOUNT
29197+ into
29198+1234:
29199+ _ASM_EXTABLE(1234b, 2f)
29200+#endif
29201+
29202 movl %eax, (v)
29203 movl %edx, 4(v)
29204 movl $1, %eax
29205@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29206 movl 4(v), %edx
29207 subl $1, %eax
29208 sbbl $0, %edx
29209+
29210+#ifdef CONFIG_PAX_REFCOUNT
29211+ into
29212+1234:
29213+ _ASM_EXTABLE(1234b, 1f)
29214+#endif
29215+
29216 js 1f
29217 movl %eax, (v)
29218 movl %edx, 4(v)
29219diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29220index f5cc9eb..51fa319 100644
29221--- a/arch/x86/lib/atomic64_cx8_32.S
29222+++ b/arch/x86/lib/atomic64_cx8_32.S
29223@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29224 CFI_STARTPROC
29225
29226 read64 %ecx
29227+ pax_force_retaddr
29228 ret
29229 CFI_ENDPROC
29230 ENDPROC(atomic64_read_cx8)
29231
29232+ENTRY(atomic64_read_unchecked_cx8)
29233+ CFI_STARTPROC
29234+
29235+ read64 %ecx
29236+ pax_force_retaddr
29237+ ret
29238+ CFI_ENDPROC
29239+ENDPROC(atomic64_read_unchecked_cx8)
29240+
29241 ENTRY(atomic64_set_cx8)
29242 CFI_STARTPROC
29243
29244@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29245 cmpxchg8b (%esi)
29246 jne 1b
29247
29248+ pax_force_retaddr
29249 ret
29250 CFI_ENDPROC
29251 ENDPROC(atomic64_set_cx8)
29252
29253+ENTRY(atomic64_set_unchecked_cx8)
29254+ CFI_STARTPROC
29255+
29256+1:
29257+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29258+ * are atomic on 586 and newer */
29259+ cmpxchg8b (%esi)
29260+ jne 1b
29261+
29262+ pax_force_retaddr
29263+ ret
29264+ CFI_ENDPROC
29265+ENDPROC(atomic64_set_unchecked_cx8)
29266+
29267 ENTRY(atomic64_xchg_cx8)
29268 CFI_STARTPROC
29269
29270@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29271 cmpxchg8b (%esi)
29272 jne 1b
29273
29274+ pax_force_retaddr
29275 ret
29276 CFI_ENDPROC
29277 ENDPROC(atomic64_xchg_cx8)
29278
29279-.macro addsub_return func ins insc
29280-ENTRY(atomic64_\func\()_return_cx8)
29281+.macro addsub_return func ins insc unchecked=""
29282+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29283 CFI_STARTPROC
29284 SAVE ebp
29285 SAVE ebx
29286@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29287 movl %edx, %ecx
29288 \ins\()l %esi, %ebx
29289 \insc\()l %edi, %ecx
29290+
29291+.ifb \unchecked
29292+#ifdef CONFIG_PAX_REFCOUNT
29293+ into
29294+2:
29295+ _ASM_EXTABLE(2b, 3f)
29296+#endif
29297+.endif
29298+
29299 LOCK_PREFIX
29300 cmpxchg8b (%ebp)
29301 jne 1b
29302-
29303-10:
29304 movl %ebx, %eax
29305 movl %ecx, %edx
29306+
29307+.ifb \unchecked
29308+#ifdef CONFIG_PAX_REFCOUNT
29309+3:
29310+#endif
29311+.endif
29312+
29313 RESTORE edi
29314 RESTORE esi
29315 RESTORE ebx
29316 RESTORE ebp
29317+ pax_force_retaddr
29318 ret
29319 CFI_ENDPROC
29320-ENDPROC(atomic64_\func\()_return_cx8)
29321+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29322 .endm
29323
29324 addsub_return add add adc
29325 addsub_return sub sub sbb
29326+addsub_return add add adc _unchecked
29327+addsub_return sub sub sbb _unchecked
29328
29329-.macro incdec_return func ins insc
29330-ENTRY(atomic64_\func\()_return_cx8)
29331+.macro incdec_return func ins insc unchecked=""
29332+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29333 CFI_STARTPROC
29334 SAVE ebx
29335
29336@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29337 movl %edx, %ecx
29338 \ins\()l $1, %ebx
29339 \insc\()l $0, %ecx
29340+
29341+.ifb \unchecked
29342+#ifdef CONFIG_PAX_REFCOUNT
29343+ into
29344+2:
29345+ _ASM_EXTABLE(2b, 3f)
29346+#endif
29347+.endif
29348+
29349 LOCK_PREFIX
29350 cmpxchg8b (%esi)
29351 jne 1b
29352
29353-10:
29354 movl %ebx, %eax
29355 movl %ecx, %edx
29356+
29357+.ifb \unchecked
29358+#ifdef CONFIG_PAX_REFCOUNT
29359+3:
29360+#endif
29361+.endif
29362+
29363 RESTORE ebx
29364+ pax_force_retaddr
29365 ret
29366 CFI_ENDPROC
29367-ENDPROC(atomic64_\func\()_return_cx8)
29368+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29369 .endm
29370
29371 incdec_return inc add adc
29372 incdec_return dec sub sbb
29373+incdec_return inc add adc _unchecked
29374+incdec_return dec sub sbb _unchecked
29375
29376 ENTRY(atomic64_dec_if_positive_cx8)
29377 CFI_STARTPROC
29378@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29379 movl %edx, %ecx
29380 subl $1, %ebx
29381 sbb $0, %ecx
29382+
29383+#ifdef CONFIG_PAX_REFCOUNT
29384+ into
29385+1234:
29386+ _ASM_EXTABLE(1234b, 2f)
29387+#endif
29388+
29389 js 2f
29390 LOCK_PREFIX
29391 cmpxchg8b (%esi)
29392@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29393 movl %ebx, %eax
29394 movl %ecx, %edx
29395 RESTORE ebx
29396+ pax_force_retaddr
29397 ret
29398 CFI_ENDPROC
29399 ENDPROC(atomic64_dec_if_positive_cx8)
29400@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29401 movl %edx, %ecx
29402 addl %ebp, %ebx
29403 adcl %edi, %ecx
29404+
29405+#ifdef CONFIG_PAX_REFCOUNT
29406+ into
29407+1234:
29408+ _ASM_EXTABLE(1234b, 3f)
29409+#endif
29410+
29411 LOCK_PREFIX
29412 cmpxchg8b (%esi)
29413 jne 1b
29414@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29415 CFI_ADJUST_CFA_OFFSET -8
29416 RESTORE ebx
29417 RESTORE ebp
29418+ pax_force_retaddr
29419 ret
29420 4:
29421 cmpl %edx, 4(%esp)
29422@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29423 xorl %ecx, %ecx
29424 addl $1, %ebx
29425 adcl %edx, %ecx
29426+
29427+#ifdef CONFIG_PAX_REFCOUNT
29428+ into
29429+1234:
29430+ _ASM_EXTABLE(1234b, 3f)
29431+#endif
29432+
29433 LOCK_PREFIX
29434 cmpxchg8b (%esi)
29435 jne 1b
29436@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29437 movl $1, %eax
29438 3:
29439 RESTORE ebx
29440+ pax_force_retaddr
29441 ret
29442 CFI_ENDPROC
29443 ENDPROC(atomic64_inc_not_zero_cx8)
29444diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29445index e78b8eee..7e173a8 100644
29446--- a/arch/x86/lib/checksum_32.S
29447+++ b/arch/x86/lib/checksum_32.S
29448@@ -29,7 +29,8 @@
29449 #include <asm/dwarf2.h>
29450 #include <asm/errno.h>
29451 #include <asm/asm.h>
29452-
29453+#include <asm/segment.h>
29454+
29455 /*
29456 * computes a partial checksum, e.g. for TCP/UDP fragments
29457 */
29458@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29459
29460 #define ARGBASE 16
29461 #define FP 12
29462-
29463-ENTRY(csum_partial_copy_generic)
29464+
29465+ENTRY(csum_partial_copy_generic_to_user)
29466 CFI_STARTPROC
29467+
29468+#ifdef CONFIG_PAX_MEMORY_UDEREF
29469+ pushl_cfi %gs
29470+ popl_cfi %es
29471+ jmp csum_partial_copy_generic
29472+#endif
29473+
29474+ENTRY(csum_partial_copy_generic_from_user)
29475+
29476+#ifdef CONFIG_PAX_MEMORY_UDEREF
29477+ pushl_cfi %gs
29478+ popl_cfi %ds
29479+#endif
29480+
29481+ENTRY(csum_partial_copy_generic)
29482 subl $4,%esp
29483 CFI_ADJUST_CFA_OFFSET 4
29484 pushl_cfi %edi
29485@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29486 jmp 4f
29487 SRC(1: movw (%esi), %bx )
29488 addl $2, %esi
29489-DST( movw %bx, (%edi) )
29490+DST( movw %bx, %es:(%edi) )
29491 addl $2, %edi
29492 addw %bx, %ax
29493 adcl $0, %eax
29494@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29495 SRC(1: movl (%esi), %ebx )
29496 SRC( movl 4(%esi), %edx )
29497 adcl %ebx, %eax
29498-DST( movl %ebx, (%edi) )
29499+DST( movl %ebx, %es:(%edi) )
29500 adcl %edx, %eax
29501-DST( movl %edx, 4(%edi) )
29502+DST( movl %edx, %es:4(%edi) )
29503
29504 SRC( movl 8(%esi), %ebx )
29505 SRC( movl 12(%esi), %edx )
29506 adcl %ebx, %eax
29507-DST( movl %ebx, 8(%edi) )
29508+DST( movl %ebx, %es:8(%edi) )
29509 adcl %edx, %eax
29510-DST( movl %edx, 12(%edi) )
29511+DST( movl %edx, %es:12(%edi) )
29512
29513 SRC( movl 16(%esi), %ebx )
29514 SRC( movl 20(%esi), %edx )
29515 adcl %ebx, %eax
29516-DST( movl %ebx, 16(%edi) )
29517+DST( movl %ebx, %es:16(%edi) )
29518 adcl %edx, %eax
29519-DST( movl %edx, 20(%edi) )
29520+DST( movl %edx, %es:20(%edi) )
29521
29522 SRC( movl 24(%esi), %ebx )
29523 SRC( movl 28(%esi), %edx )
29524 adcl %ebx, %eax
29525-DST( movl %ebx, 24(%edi) )
29526+DST( movl %ebx, %es:24(%edi) )
29527 adcl %edx, %eax
29528-DST( movl %edx, 28(%edi) )
29529+DST( movl %edx, %es:28(%edi) )
29530
29531 lea 32(%esi), %esi
29532 lea 32(%edi), %edi
29533@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29534 shrl $2, %edx # This clears CF
29535 SRC(3: movl (%esi), %ebx )
29536 adcl %ebx, %eax
29537-DST( movl %ebx, (%edi) )
29538+DST( movl %ebx, %es:(%edi) )
29539 lea 4(%esi), %esi
29540 lea 4(%edi), %edi
29541 dec %edx
29542@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29543 jb 5f
29544 SRC( movw (%esi), %cx )
29545 leal 2(%esi), %esi
29546-DST( movw %cx, (%edi) )
29547+DST( movw %cx, %es:(%edi) )
29548 leal 2(%edi), %edi
29549 je 6f
29550 shll $16,%ecx
29551 SRC(5: movb (%esi), %cl )
29552-DST( movb %cl, (%edi) )
29553+DST( movb %cl, %es:(%edi) )
29554 6: addl %ecx, %eax
29555 adcl $0, %eax
29556 7:
29557@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29558
29559 6001:
29560 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29561- movl $-EFAULT, (%ebx)
29562+ movl $-EFAULT, %ss:(%ebx)
29563
29564 # zero the complete destination - computing the rest
29565 # is too much work
29566@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29567
29568 6002:
29569 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29570- movl $-EFAULT,(%ebx)
29571+ movl $-EFAULT,%ss:(%ebx)
29572 jmp 5000b
29573
29574 .previous
29575
29576+ pushl_cfi %ss
29577+ popl_cfi %ds
29578+ pushl_cfi %ss
29579+ popl_cfi %es
29580 popl_cfi %ebx
29581 CFI_RESTORE ebx
29582 popl_cfi %esi
29583@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29584 popl_cfi %ecx # equivalent to addl $4,%esp
29585 ret
29586 CFI_ENDPROC
29587-ENDPROC(csum_partial_copy_generic)
29588+ENDPROC(csum_partial_copy_generic_to_user)
29589
29590 #else
29591
29592 /* Version for PentiumII/PPro */
29593
29594 #define ROUND1(x) \
29595+ nop; nop; nop; \
29596 SRC(movl x(%esi), %ebx ) ; \
29597 addl %ebx, %eax ; \
29598- DST(movl %ebx, x(%edi) ) ;
29599+ DST(movl %ebx, %es:x(%edi)) ;
29600
29601 #define ROUND(x) \
29602+ nop; nop; nop; \
29603 SRC(movl x(%esi), %ebx ) ; \
29604 adcl %ebx, %eax ; \
29605- DST(movl %ebx, x(%edi) ) ;
29606+ DST(movl %ebx, %es:x(%edi)) ;
29607
29608 #define ARGBASE 12
29609-
29610-ENTRY(csum_partial_copy_generic)
29611+
29612+ENTRY(csum_partial_copy_generic_to_user)
29613 CFI_STARTPROC
29614+
29615+#ifdef CONFIG_PAX_MEMORY_UDEREF
29616+ pushl_cfi %gs
29617+ popl_cfi %es
29618+ jmp csum_partial_copy_generic
29619+#endif
29620+
29621+ENTRY(csum_partial_copy_generic_from_user)
29622+
29623+#ifdef CONFIG_PAX_MEMORY_UDEREF
29624+ pushl_cfi %gs
29625+ popl_cfi %ds
29626+#endif
29627+
29628+ENTRY(csum_partial_copy_generic)
29629 pushl_cfi %ebx
29630 CFI_REL_OFFSET ebx, 0
29631 pushl_cfi %edi
29632@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29633 subl %ebx, %edi
29634 lea -1(%esi),%edx
29635 andl $-32,%edx
29636- lea 3f(%ebx,%ebx), %ebx
29637+ lea 3f(%ebx,%ebx,2), %ebx
29638 testl %esi, %esi
29639 jmp *%ebx
29640 1: addl $64,%esi
29641@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29642 jb 5f
29643 SRC( movw (%esi), %dx )
29644 leal 2(%esi), %esi
29645-DST( movw %dx, (%edi) )
29646+DST( movw %dx, %es:(%edi) )
29647 leal 2(%edi), %edi
29648 je 6f
29649 shll $16,%edx
29650 5:
29651 SRC( movb (%esi), %dl )
29652-DST( movb %dl, (%edi) )
29653+DST( movb %dl, %es:(%edi) )
29654 6: addl %edx, %eax
29655 adcl $0, %eax
29656 7:
29657 .section .fixup, "ax"
29658 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29659- movl $-EFAULT, (%ebx)
29660+ movl $-EFAULT, %ss:(%ebx)
29661 # zero the complete destination (computing the rest is too much work)
29662 movl ARGBASE+8(%esp),%edi # dst
29663 movl ARGBASE+12(%esp),%ecx # len
29664@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29665 rep; stosb
29666 jmp 7b
29667 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29668- movl $-EFAULT, (%ebx)
29669+ movl $-EFAULT, %ss:(%ebx)
29670 jmp 7b
29671 .previous
29672
29673+#ifdef CONFIG_PAX_MEMORY_UDEREF
29674+ pushl_cfi %ss
29675+ popl_cfi %ds
29676+ pushl_cfi %ss
29677+ popl_cfi %es
29678+#endif
29679+
29680 popl_cfi %esi
29681 CFI_RESTORE esi
29682 popl_cfi %edi
29683@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29684 CFI_RESTORE ebx
29685 ret
29686 CFI_ENDPROC
29687-ENDPROC(csum_partial_copy_generic)
29688+ENDPROC(csum_partial_copy_generic_to_user)
29689
29690 #undef ROUND
29691 #undef ROUND1
29692diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29693index f2145cf..cea889d 100644
29694--- a/arch/x86/lib/clear_page_64.S
29695+++ b/arch/x86/lib/clear_page_64.S
29696@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29697 movl $4096/8,%ecx
29698 xorl %eax,%eax
29699 rep stosq
29700+ pax_force_retaddr
29701 ret
29702 CFI_ENDPROC
29703 ENDPROC(clear_page_c)
29704@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29705 movl $4096,%ecx
29706 xorl %eax,%eax
29707 rep stosb
29708+ pax_force_retaddr
29709 ret
29710 CFI_ENDPROC
29711 ENDPROC(clear_page_c_e)
29712@@ -43,6 +45,7 @@ ENTRY(clear_page)
29713 leaq 64(%rdi),%rdi
29714 jnz .Lloop
29715 nop
29716+ pax_force_retaddr
29717 ret
29718 CFI_ENDPROC
29719 .Lclear_page_end:
29720@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29721
29722 #include <asm/cpufeature.h>
29723
29724- .section .altinstr_replacement,"ax"
29725+ .section .altinstr_replacement,"a"
29726 1: .byte 0xeb /* jmp <disp8> */
29727 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29728 2: .byte 0xeb /* jmp <disp8> */
29729diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29730index 40a1725..5d12ac4 100644
29731--- a/arch/x86/lib/cmpxchg16b_emu.S
29732+++ b/arch/x86/lib/cmpxchg16b_emu.S
29733@@ -8,6 +8,7 @@
29734 #include <linux/linkage.h>
29735 #include <asm/dwarf2.h>
29736 #include <asm/percpu.h>
29737+#include <asm/alternative-asm.h>
29738
29739 .text
29740
29741@@ -46,12 +47,14 @@ CFI_STARTPROC
29742 CFI_REMEMBER_STATE
29743 popfq_cfi
29744 mov $1, %al
29745+ pax_force_retaddr
29746 ret
29747
29748 CFI_RESTORE_STATE
29749 .Lnot_same:
29750 popfq_cfi
29751 xor %al,%al
29752+ pax_force_retaddr
29753 ret
29754
29755 CFI_ENDPROC
29756diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29757index 176cca6..e0d658e 100644
29758--- a/arch/x86/lib/copy_page_64.S
29759+++ b/arch/x86/lib/copy_page_64.S
29760@@ -9,6 +9,7 @@ copy_page_rep:
29761 CFI_STARTPROC
29762 movl $4096/8, %ecx
29763 rep movsq
29764+ pax_force_retaddr
29765 ret
29766 CFI_ENDPROC
29767 ENDPROC(copy_page_rep)
29768@@ -24,8 +25,8 @@ ENTRY(copy_page)
29769 CFI_ADJUST_CFA_OFFSET 2*8
29770 movq %rbx, (%rsp)
29771 CFI_REL_OFFSET rbx, 0
29772- movq %r12, 1*8(%rsp)
29773- CFI_REL_OFFSET r12, 1*8
29774+ movq %r13, 1*8(%rsp)
29775+ CFI_REL_OFFSET r13, 1*8
29776
29777 movl $(4096/64)-5, %ecx
29778 .p2align 4
29779@@ -38,7 +39,7 @@ ENTRY(copy_page)
29780 movq 0x8*4(%rsi), %r9
29781 movq 0x8*5(%rsi), %r10
29782 movq 0x8*6(%rsi), %r11
29783- movq 0x8*7(%rsi), %r12
29784+ movq 0x8*7(%rsi), %r13
29785
29786 prefetcht0 5*64(%rsi)
29787
29788@@ -49,7 +50,7 @@ ENTRY(copy_page)
29789 movq %r9, 0x8*4(%rdi)
29790 movq %r10, 0x8*5(%rdi)
29791 movq %r11, 0x8*6(%rdi)
29792- movq %r12, 0x8*7(%rdi)
29793+ movq %r13, 0x8*7(%rdi)
29794
29795 leaq 64 (%rsi), %rsi
29796 leaq 64 (%rdi), %rdi
29797@@ -68,7 +69,7 @@ ENTRY(copy_page)
29798 movq 0x8*4(%rsi), %r9
29799 movq 0x8*5(%rsi), %r10
29800 movq 0x8*6(%rsi), %r11
29801- movq 0x8*7(%rsi), %r12
29802+ movq 0x8*7(%rsi), %r13
29803
29804 movq %rax, 0x8*0(%rdi)
29805 movq %rbx, 0x8*1(%rdi)
29806@@ -77,7 +78,7 @@ ENTRY(copy_page)
29807 movq %r9, 0x8*4(%rdi)
29808 movq %r10, 0x8*5(%rdi)
29809 movq %r11, 0x8*6(%rdi)
29810- movq %r12, 0x8*7(%rdi)
29811+ movq %r13, 0x8*7(%rdi)
29812
29813 leaq 64(%rdi), %rdi
29814 leaq 64(%rsi), %rsi
29815@@ -85,10 +86,11 @@ ENTRY(copy_page)
29816
29817 movq (%rsp), %rbx
29818 CFI_RESTORE rbx
29819- movq 1*8(%rsp), %r12
29820- CFI_RESTORE r12
29821+ movq 1*8(%rsp), %r13
29822+ CFI_RESTORE r13
29823 addq $2*8, %rsp
29824 CFI_ADJUST_CFA_OFFSET -2*8
29825+ pax_force_retaddr
29826 ret
29827 .Lcopy_page_end:
29828 CFI_ENDPROC
29829@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29830
29831 #include <asm/cpufeature.h>
29832
29833- .section .altinstr_replacement,"ax"
29834+ .section .altinstr_replacement,"a"
29835 1: .byte 0xeb /* jmp <disp8> */
29836 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29837 2:
29838diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29839index dee945d..a84067b 100644
29840--- a/arch/x86/lib/copy_user_64.S
29841+++ b/arch/x86/lib/copy_user_64.S
29842@@ -18,31 +18,7 @@
29843 #include <asm/alternative-asm.h>
29844 #include <asm/asm.h>
29845 #include <asm/smap.h>
29846-
29847-/*
29848- * By placing feature2 after feature1 in altinstructions section, we logically
29849- * implement:
29850- * If CPU has feature2, jmp to alt2 is used
29851- * else if CPU has feature1, jmp to alt1 is used
29852- * else jmp to orig is used.
29853- */
29854- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29855-0:
29856- .byte 0xe9 /* 32bit jump */
29857- .long \orig-1f /* by default jump to orig */
29858-1:
29859- .section .altinstr_replacement,"ax"
29860-2: .byte 0xe9 /* near jump with 32bit immediate */
29861- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29862-3: .byte 0xe9 /* near jump with 32bit immediate */
29863- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29864- .previous
29865-
29866- .section .altinstructions,"a"
29867- altinstruction_entry 0b,2b,\feature1,5,5
29868- altinstruction_entry 0b,3b,\feature2,5,5
29869- .previous
29870- .endm
29871+#include <asm/pgtable.h>
29872
29873 .macro ALIGN_DESTINATION
29874 #ifdef FIX_ALIGNMENT
29875@@ -70,52 +46,6 @@
29876 #endif
29877 .endm
29878
29879-/* Standard copy_to_user with segment limit checking */
29880-ENTRY(_copy_to_user)
29881- CFI_STARTPROC
29882- GET_THREAD_INFO(%rax)
29883- movq %rdi,%rcx
29884- addq %rdx,%rcx
29885- jc bad_to_user
29886- cmpq TI_addr_limit(%rax),%rcx
29887- ja bad_to_user
29888- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29889- copy_user_generic_unrolled,copy_user_generic_string, \
29890- copy_user_enhanced_fast_string
29891- CFI_ENDPROC
29892-ENDPROC(_copy_to_user)
29893-
29894-/* Standard copy_from_user with segment limit checking */
29895-ENTRY(_copy_from_user)
29896- CFI_STARTPROC
29897- GET_THREAD_INFO(%rax)
29898- movq %rsi,%rcx
29899- addq %rdx,%rcx
29900- jc bad_from_user
29901- cmpq TI_addr_limit(%rax),%rcx
29902- ja bad_from_user
29903- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29904- copy_user_generic_unrolled,copy_user_generic_string, \
29905- copy_user_enhanced_fast_string
29906- CFI_ENDPROC
29907-ENDPROC(_copy_from_user)
29908-
29909- .section .fixup,"ax"
29910- /* must zero dest */
29911-ENTRY(bad_from_user)
29912-bad_from_user:
29913- CFI_STARTPROC
29914- movl %edx,%ecx
29915- xorl %eax,%eax
29916- rep
29917- stosb
29918-bad_to_user:
29919- movl %edx,%eax
29920- ret
29921- CFI_ENDPROC
29922-ENDPROC(bad_from_user)
29923- .previous
29924-
29925 /*
29926 * copy_user_generic_unrolled - memory copy with exception handling.
29927 * This version is for CPUs like P4 that don't have efficient micro
29928@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29929 */
29930 ENTRY(copy_user_generic_unrolled)
29931 CFI_STARTPROC
29932+ ASM_PAX_OPEN_USERLAND
29933 ASM_STAC
29934 cmpl $8,%edx
29935 jb 20f /* less then 8 bytes, go to byte copy loop */
29936@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29937 jnz 21b
29938 23: xor %eax,%eax
29939 ASM_CLAC
29940+ ASM_PAX_CLOSE_USERLAND
29941+ pax_force_retaddr
29942 ret
29943
29944 .section .fixup,"ax"
29945@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29946 */
29947 ENTRY(copy_user_generic_string)
29948 CFI_STARTPROC
29949+ ASM_PAX_OPEN_USERLAND
29950 ASM_STAC
29951 cmpl $8,%edx
29952 jb 2f /* less than 8 bytes, go to byte copy loop */
29953@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29954 movsb
29955 xorl %eax,%eax
29956 ASM_CLAC
29957+ ASM_PAX_CLOSE_USERLAND
29958+ pax_force_retaddr
29959 ret
29960
29961 .section .fixup,"ax"
29962@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29963 */
29964 ENTRY(copy_user_enhanced_fast_string)
29965 CFI_STARTPROC
29966+ ASM_PAX_OPEN_USERLAND
29967 ASM_STAC
29968 movl %edx,%ecx
29969 1: rep
29970 movsb
29971 xorl %eax,%eax
29972 ASM_CLAC
29973+ ASM_PAX_CLOSE_USERLAND
29974+ pax_force_retaddr
29975 ret
29976
29977 .section .fixup,"ax"
29978diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
29979index 6a4f43c..c70fb52 100644
29980--- a/arch/x86/lib/copy_user_nocache_64.S
29981+++ b/arch/x86/lib/copy_user_nocache_64.S
29982@@ -8,6 +8,7 @@
29983
29984 #include <linux/linkage.h>
29985 #include <asm/dwarf2.h>
29986+#include <asm/alternative-asm.h>
29987
29988 #define FIX_ALIGNMENT 1
29989
29990@@ -16,6 +17,7 @@
29991 #include <asm/thread_info.h>
29992 #include <asm/asm.h>
29993 #include <asm/smap.h>
29994+#include <asm/pgtable.h>
29995
29996 .macro ALIGN_DESTINATION
29997 #ifdef FIX_ALIGNMENT
29998@@ -49,6 +51,16 @@
29999 */
30000 ENTRY(__copy_user_nocache)
30001 CFI_STARTPROC
30002+
30003+#ifdef CONFIG_PAX_MEMORY_UDEREF
30004+ mov pax_user_shadow_base,%rcx
30005+ cmp %rcx,%rsi
30006+ jae 1f
30007+ add %rcx,%rsi
30008+1:
30009+#endif
30010+
30011+ ASM_PAX_OPEN_USERLAND
30012 ASM_STAC
30013 cmpl $8,%edx
30014 jb 20f /* less then 8 bytes, go to byte copy loop */
30015@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30016 jnz 21b
30017 23: xorl %eax,%eax
30018 ASM_CLAC
30019+ ASM_PAX_CLOSE_USERLAND
30020 sfence
30021+ pax_force_retaddr
30022 ret
30023
30024 .section .fixup,"ax"
30025diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30026index 2419d5f..fe52d0e 100644
30027--- a/arch/x86/lib/csum-copy_64.S
30028+++ b/arch/x86/lib/csum-copy_64.S
30029@@ -9,6 +9,7 @@
30030 #include <asm/dwarf2.h>
30031 #include <asm/errno.h>
30032 #include <asm/asm.h>
30033+#include <asm/alternative-asm.h>
30034
30035 /*
30036 * Checksum copy with exception handling.
30037@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30038 CFI_ADJUST_CFA_OFFSET 7*8
30039 movq %rbx, 2*8(%rsp)
30040 CFI_REL_OFFSET rbx, 2*8
30041- movq %r12, 3*8(%rsp)
30042- CFI_REL_OFFSET r12, 3*8
30043+ movq %r15, 3*8(%rsp)
30044+ CFI_REL_OFFSET r15, 3*8
30045 movq %r14, 4*8(%rsp)
30046 CFI_REL_OFFSET r14, 4*8
30047 movq %r13, 5*8(%rsp)
30048@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30049 movl %edx, %ecx
30050
30051 xorl %r9d, %r9d
30052- movq %rcx, %r12
30053+ movq %rcx, %r15
30054
30055- shrq $6, %r12
30056+ shrq $6, %r15
30057 jz .Lhandle_tail /* < 64 */
30058
30059 clc
30060
30061 /* main loop. clear in 64 byte blocks */
30062 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30063- /* r11: temp3, rdx: temp4, r12 loopcnt */
30064+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30065 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30066 .p2align 4
30067 .Lloop:
30068@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30069 adcq %r14, %rax
30070 adcq %r13, %rax
30071
30072- decl %r12d
30073+ decl %r15d
30074
30075 dest
30076 movq %rbx, (%rsi)
30077@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30078 .Lende:
30079 movq 2*8(%rsp), %rbx
30080 CFI_RESTORE rbx
30081- movq 3*8(%rsp), %r12
30082- CFI_RESTORE r12
30083+ movq 3*8(%rsp), %r15
30084+ CFI_RESTORE r15
30085 movq 4*8(%rsp), %r14
30086 CFI_RESTORE r14
30087 movq 5*8(%rsp), %r13
30088@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30089 CFI_RESTORE rbp
30090 addq $7*8, %rsp
30091 CFI_ADJUST_CFA_OFFSET -7*8
30092+ pax_force_retaddr
30093 ret
30094 CFI_RESTORE_STATE
30095
30096diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30097index 1318f75..44c30fd 100644
30098--- a/arch/x86/lib/csum-wrappers_64.c
30099+++ b/arch/x86/lib/csum-wrappers_64.c
30100@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30101 len -= 2;
30102 }
30103 }
30104+ pax_open_userland();
30105 stac();
30106- isum = csum_partial_copy_generic((__force const void *)src,
30107+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30108 dst, len, isum, errp, NULL);
30109 clac();
30110+ pax_close_userland();
30111 if (unlikely(*errp))
30112 goto out_err;
30113
30114@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30115 }
30116
30117 *errp = 0;
30118+ pax_open_userland();
30119 stac();
30120- ret = csum_partial_copy_generic(src, (void __force *)dst,
30121+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30122 len, isum, NULL, errp);
30123 clac();
30124+ pax_close_userland();
30125 return ret;
30126 }
30127 EXPORT_SYMBOL(csum_partial_copy_to_user);
30128diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30129index a451235..1daa956 100644
30130--- a/arch/x86/lib/getuser.S
30131+++ b/arch/x86/lib/getuser.S
30132@@ -33,17 +33,40 @@
30133 #include <asm/thread_info.h>
30134 #include <asm/asm.h>
30135 #include <asm/smap.h>
30136+#include <asm/segment.h>
30137+#include <asm/pgtable.h>
30138+#include <asm/alternative-asm.h>
30139+
30140+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30141+#define __copyuser_seg gs;
30142+#else
30143+#define __copyuser_seg
30144+#endif
30145
30146 .text
30147 ENTRY(__get_user_1)
30148 CFI_STARTPROC
30149+
30150+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30151 GET_THREAD_INFO(%_ASM_DX)
30152 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30153 jae bad_get_user
30154 ASM_STAC
30155-1: movzbl (%_ASM_AX),%edx
30156+
30157+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30158+ mov pax_user_shadow_base,%_ASM_DX
30159+ cmp %_ASM_DX,%_ASM_AX
30160+ jae 1234f
30161+ add %_ASM_DX,%_ASM_AX
30162+1234:
30163+#endif
30164+
30165+#endif
30166+
30167+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30168 xor %eax,%eax
30169 ASM_CLAC
30170+ pax_force_retaddr
30171 ret
30172 CFI_ENDPROC
30173 ENDPROC(__get_user_1)
30174@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30175 ENTRY(__get_user_2)
30176 CFI_STARTPROC
30177 add $1,%_ASM_AX
30178+
30179+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30180 jc bad_get_user
30181 GET_THREAD_INFO(%_ASM_DX)
30182 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30183 jae bad_get_user
30184 ASM_STAC
30185-2: movzwl -1(%_ASM_AX),%edx
30186+
30187+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30188+ mov pax_user_shadow_base,%_ASM_DX
30189+ cmp %_ASM_DX,%_ASM_AX
30190+ jae 1234f
30191+ add %_ASM_DX,%_ASM_AX
30192+1234:
30193+#endif
30194+
30195+#endif
30196+
30197+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30198 xor %eax,%eax
30199 ASM_CLAC
30200+ pax_force_retaddr
30201 ret
30202 CFI_ENDPROC
30203 ENDPROC(__get_user_2)
30204@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30205 ENTRY(__get_user_4)
30206 CFI_STARTPROC
30207 add $3,%_ASM_AX
30208+
30209+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30210 jc bad_get_user
30211 GET_THREAD_INFO(%_ASM_DX)
30212 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30213 jae bad_get_user
30214 ASM_STAC
30215-3: movl -3(%_ASM_AX),%edx
30216+
30217+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30218+ mov pax_user_shadow_base,%_ASM_DX
30219+ cmp %_ASM_DX,%_ASM_AX
30220+ jae 1234f
30221+ add %_ASM_DX,%_ASM_AX
30222+1234:
30223+#endif
30224+
30225+#endif
30226+
30227+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30228 xor %eax,%eax
30229 ASM_CLAC
30230+ pax_force_retaddr
30231 ret
30232 CFI_ENDPROC
30233 ENDPROC(__get_user_4)
30234@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30235 GET_THREAD_INFO(%_ASM_DX)
30236 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30237 jae bad_get_user
30238+
30239+#ifdef CONFIG_PAX_MEMORY_UDEREF
30240+ mov pax_user_shadow_base,%_ASM_DX
30241+ cmp %_ASM_DX,%_ASM_AX
30242+ jae 1234f
30243+ add %_ASM_DX,%_ASM_AX
30244+1234:
30245+#endif
30246+
30247 ASM_STAC
30248 4: movq -7(%_ASM_AX),%rdx
30249 xor %eax,%eax
30250 ASM_CLAC
30251+ pax_force_retaddr
30252 ret
30253 #else
30254 add $7,%_ASM_AX
30255@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30256 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30257 jae bad_get_user_8
30258 ASM_STAC
30259-4: movl -7(%_ASM_AX),%edx
30260-5: movl -3(%_ASM_AX),%ecx
30261+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30262+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30263 xor %eax,%eax
30264 ASM_CLAC
30265+ pax_force_retaddr
30266 ret
30267 #endif
30268 CFI_ENDPROC
30269@@ -113,6 +175,7 @@ bad_get_user:
30270 xor %edx,%edx
30271 mov $(-EFAULT),%_ASM_AX
30272 ASM_CLAC
30273+ pax_force_retaddr
30274 ret
30275 CFI_ENDPROC
30276 END(bad_get_user)
30277@@ -124,6 +187,7 @@ bad_get_user_8:
30278 xor %ecx,%ecx
30279 mov $(-EFAULT),%_ASM_AX
30280 ASM_CLAC
30281+ pax_force_retaddr
30282 ret
30283 CFI_ENDPROC
30284 END(bad_get_user_8)
30285diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30286index 1313ae6..84f25ea 100644
30287--- a/arch/x86/lib/insn.c
30288+++ b/arch/x86/lib/insn.c
30289@@ -20,8 +20,10 @@
30290
30291 #ifdef __KERNEL__
30292 #include <linux/string.h>
30293+#include <asm/pgtable_types.h>
30294 #else
30295 #include <string.h>
30296+#define ktla_ktva(addr) addr
30297 #endif
30298 #include <asm/inat.h>
30299 #include <asm/insn.h>
30300@@ -53,9 +55,9 @@
30301 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30302 {
30303 memset(insn, 0, sizeof(*insn));
30304- insn->kaddr = kaddr;
30305- insn->end_kaddr = kaddr + buf_len;
30306- insn->next_byte = kaddr;
30307+ insn->kaddr = ktla_ktva(kaddr);
30308+ insn->end_kaddr = insn->kaddr + buf_len;
30309+ insn->next_byte = insn->kaddr;
30310 insn->x86_64 = x86_64 ? 1 : 0;
30311 insn->opnd_bytes = 4;
30312 if (x86_64)
30313diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30314index 05a95e7..326f2fa 100644
30315--- a/arch/x86/lib/iomap_copy_64.S
30316+++ b/arch/x86/lib/iomap_copy_64.S
30317@@ -17,6 +17,7 @@
30318
30319 #include <linux/linkage.h>
30320 #include <asm/dwarf2.h>
30321+#include <asm/alternative-asm.h>
30322
30323 /*
30324 * override generic version in lib/iomap_copy.c
30325@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30326 CFI_STARTPROC
30327 movl %edx,%ecx
30328 rep movsd
30329+ pax_force_retaddr
30330 ret
30331 CFI_ENDPROC
30332 ENDPROC(__iowrite32_copy)
30333diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30334index 56313a3..0db417e 100644
30335--- a/arch/x86/lib/memcpy_64.S
30336+++ b/arch/x86/lib/memcpy_64.S
30337@@ -24,7 +24,7 @@
30338 * This gets patched over the unrolled variant (below) via the
30339 * alternative instructions framework:
30340 */
30341- .section .altinstr_replacement, "ax", @progbits
30342+ .section .altinstr_replacement, "a", @progbits
30343 .Lmemcpy_c:
30344 movq %rdi, %rax
30345 movq %rdx, %rcx
30346@@ -33,6 +33,7 @@
30347 rep movsq
30348 movl %edx, %ecx
30349 rep movsb
30350+ pax_force_retaddr
30351 ret
30352 .Lmemcpy_e:
30353 .previous
30354@@ -44,11 +45,12 @@
30355 * This gets patched over the unrolled variant (below) via the
30356 * alternative instructions framework:
30357 */
30358- .section .altinstr_replacement, "ax", @progbits
30359+ .section .altinstr_replacement, "a", @progbits
30360 .Lmemcpy_c_e:
30361 movq %rdi, %rax
30362 movq %rdx, %rcx
30363 rep movsb
30364+ pax_force_retaddr
30365 ret
30366 .Lmemcpy_e_e:
30367 .previous
30368@@ -136,6 +138,7 @@ ENTRY(memcpy)
30369 movq %r9, 1*8(%rdi)
30370 movq %r10, -2*8(%rdi, %rdx)
30371 movq %r11, -1*8(%rdi, %rdx)
30372+ pax_force_retaddr
30373 retq
30374 .p2align 4
30375 .Lless_16bytes:
30376@@ -148,6 +151,7 @@ ENTRY(memcpy)
30377 movq -1*8(%rsi, %rdx), %r9
30378 movq %r8, 0*8(%rdi)
30379 movq %r9, -1*8(%rdi, %rdx)
30380+ pax_force_retaddr
30381 retq
30382 .p2align 4
30383 .Lless_8bytes:
30384@@ -161,6 +165,7 @@ ENTRY(memcpy)
30385 movl -4(%rsi, %rdx), %r8d
30386 movl %ecx, (%rdi)
30387 movl %r8d, -4(%rdi, %rdx)
30388+ pax_force_retaddr
30389 retq
30390 .p2align 4
30391 .Lless_3bytes:
30392@@ -179,6 +184,7 @@ ENTRY(memcpy)
30393 movb %cl, (%rdi)
30394
30395 .Lend:
30396+ pax_force_retaddr
30397 retq
30398 CFI_ENDPROC
30399 ENDPROC(memcpy)
30400diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30401index 65268a6..dd1de11 100644
30402--- a/arch/x86/lib/memmove_64.S
30403+++ b/arch/x86/lib/memmove_64.S
30404@@ -202,14 +202,16 @@ ENTRY(memmove)
30405 movb (%rsi), %r11b
30406 movb %r11b, (%rdi)
30407 13:
30408+ pax_force_retaddr
30409 retq
30410 CFI_ENDPROC
30411
30412- .section .altinstr_replacement,"ax"
30413+ .section .altinstr_replacement,"a"
30414 .Lmemmove_begin_forward_efs:
30415 /* Forward moving data. */
30416 movq %rdx, %rcx
30417 rep movsb
30418+ pax_force_retaddr
30419 retq
30420 .Lmemmove_end_forward_efs:
30421 .previous
30422diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30423index 2dcb380..2eb79fe 100644
30424--- a/arch/x86/lib/memset_64.S
30425+++ b/arch/x86/lib/memset_64.S
30426@@ -16,7 +16,7 @@
30427 *
30428 * rax original destination
30429 */
30430- .section .altinstr_replacement, "ax", @progbits
30431+ .section .altinstr_replacement, "a", @progbits
30432 .Lmemset_c:
30433 movq %rdi,%r9
30434 movq %rdx,%rcx
30435@@ -30,6 +30,7 @@
30436 movl %edx,%ecx
30437 rep stosb
30438 movq %r9,%rax
30439+ pax_force_retaddr
30440 ret
30441 .Lmemset_e:
30442 .previous
30443@@ -45,13 +46,14 @@
30444 *
30445 * rax original destination
30446 */
30447- .section .altinstr_replacement, "ax", @progbits
30448+ .section .altinstr_replacement, "a", @progbits
30449 .Lmemset_c_e:
30450 movq %rdi,%r9
30451 movb %sil,%al
30452 movq %rdx,%rcx
30453 rep stosb
30454 movq %r9,%rax
30455+ pax_force_retaddr
30456 ret
30457 .Lmemset_e_e:
30458 .previous
30459@@ -118,6 +120,7 @@ ENTRY(__memset)
30460
30461 .Lende:
30462 movq %r10,%rax
30463+ pax_force_retaddr
30464 ret
30465
30466 CFI_RESTORE_STATE
30467diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30468index c9f2d9b..e7fd2c0 100644
30469--- a/arch/x86/lib/mmx_32.c
30470+++ b/arch/x86/lib/mmx_32.c
30471@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30472 {
30473 void *p;
30474 int i;
30475+ unsigned long cr0;
30476
30477 if (unlikely(in_interrupt()))
30478 return __memcpy(to, from, len);
30479@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30480 kernel_fpu_begin();
30481
30482 __asm__ __volatile__ (
30483- "1: prefetch (%0)\n" /* This set is 28 bytes */
30484- " prefetch 64(%0)\n"
30485- " prefetch 128(%0)\n"
30486- " prefetch 192(%0)\n"
30487- " prefetch 256(%0)\n"
30488+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30489+ " prefetch 64(%1)\n"
30490+ " prefetch 128(%1)\n"
30491+ " prefetch 192(%1)\n"
30492+ " prefetch 256(%1)\n"
30493 "2: \n"
30494 ".section .fixup, \"ax\"\n"
30495- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30496+ "3: \n"
30497+
30498+#ifdef CONFIG_PAX_KERNEXEC
30499+ " movl %%cr0, %0\n"
30500+ " movl %0, %%eax\n"
30501+ " andl $0xFFFEFFFF, %%eax\n"
30502+ " movl %%eax, %%cr0\n"
30503+#endif
30504+
30505+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30506+
30507+#ifdef CONFIG_PAX_KERNEXEC
30508+ " movl %0, %%cr0\n"
30509+#endif
30510+
30511 " jmp 2b\n"
30512 ".previous\n"
30513 _ASM_EXTABLE(1b, 3b)
30514- : : "r" (from));
30515+ : "=&r" (cr0) : "r" (from) : "ax");
30516
30517 for ( ; i > 5; i--) {
30518 __asm__ __volatile__ (
30519- "1: prefetch 320(%0)\n"
30520- "2: movq (%0), %%mm0\n"
30521- " movq 8(%0), %%mm1\n"
30522- " movq 16(%0), %%mm2\n"
30523- " movq 24(%0), %%mm3\n"
30524- " movq %%mm0, (%1)\n"
30525- " movq %%mm1, 8(%1)\n"
30526- " movq %%mm2, 16(%1)\n"
30527- " movq %%mm3, 24(%1)\n"
30528- " movq 32(%0), %%mm0\n"
30529- " movq 40(%0), %%mm1\n"
30530- " movq 48(%0), %%mm2\n"
30531- " movq 56(%0), %%mm3\n"
30532- " movq %%mm0, 32(%1)\n"
30533- " movq %%mm1, 40(%1)\n"
30534- " movq %%mm2, 48(%1)\n"
30535- " movq %%mm3, 56(%1)\n"
30536+ "1: prefetch 320(%1)\n"
30537+ "2: movq (%1), %%mm0\n"
30538+ " movq 8(%1), %%mm1\n"
30539+ " movq 16(%1), %%mm2\n"
30540+ " movq 24(%1), %%mm3\n"
30541+ " movq %%mm0, (%2)\n"
30542+ " movq %%mm1, 8(%2)\n"
30543+ " movq %%mm2, 16(%2)\n"
30544+ " movq %%mm3, 24(%2)\n"
30545+ " movq 32(%1), %%mm0\n"
30546+ " movq 40(%1), %%mm1\n"
30547+ " movq 48(%1), %%mm2\n"
30548+ " movq 56(%1), %%mm3\n"
30549+ " movq %%mm0, 32(%2)\n"
30550+ " movq %%mm1, 40(%2)\n"
30551+ " movq %%mm2, 48(%2)\n"
30552+ " movq %%mm3, 56(%2)\n"
30553 ".section .fixup, \"ax\"\n"
30554- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30555+ "3:\n"
30556+
30557+#ifdef CONFIG_PAX_KERNEXEC
30558+ " movl %%cr0, %0\n"
30559+ " movl %0, %%eax\n"
30560+ " andl $0xFFFEFFFF, %%eax\n"
30561+ " movl %%eax, %%cr0\n"
30562+#endif
30563+
30564+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30565+
30566+#ifdef CONFIG_PAX_KERNEXEC
30567+ " movl %0, %%cr0\n"
30568+#endif
30569+
30570 " jmp 2b\n"
30571 ".previous\n"
30572 _ASM_EXTABLE(1b, 3b)
30573- : : "r" (from), "r" (to) : "memory");
30574+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30575
30576 from += 64;
30577 to += 64;
30578@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30579 static void fast_copy_page(void *to, void *from)
30580 {
30581 int i;
30582+ unsigned long cr0;
30583
30584 kernel_fpu_begin();
30585
30586@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30587 * but that is for later. -AV
30588 */
30589 __asm__ __volatile__(
30590- "1: prefetch (%0)\n"
30591- " prefetch 64(%0)\n"
30592- " prefetch 128(%0)\n"
30593- " prefetch 192(%0)\n"
30594- " prefetch 256(%0)\n"
30595+ "1: prefetch (%1)\n"
30596+ " prefetch 64(%1)\n"
30597+ " prefetch 128(%1)\n"
30598+ " prefetch 192(%1)\n"
30599+ " prefetch 256(%1)\n"
30600 "2: \n"
30601 ".section .fixup, \"ax\"\n"
30602- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30603+ "3: \n"
30604+
30605+#ifdef CONFIG_PAX_KERNEXEC
30606+ " movl %%cr0, %0\n"
30607+ " movl %0, %%eax\n"
30608+ " andl $0xFFFEFFFF, %%eax\n"
30609+ " movl %%eax, %%cr0\n"
30610+#endif
30611+
30612+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30613+
30614+#ifdef CONFIG_PAX_KERNEXEC
30615+ " movl %0, %%cr0\n"
30616+#endif
30617+
30618 " jmp 2b\n"
30619 ".previous\n"
30620- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30621+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30622
30623 for (i = 0; i < (4096-320)/64; i++) {
30624 __asm__ __volatile__ (
30625- "1: prefetch 320(%0)\n"
30626- "2: movq (%0), %%mm0\n"
30627- " movntq %%mm0, (%1)\n"
30628- " movq 8(%0), %%mm1\n"
30629- " movntq %%mm1, 8(%1)\n"
30630- " movq 16(%0), %%mm2\n"
30631- " movntq %%mm2, 16(%1)\n"
30632- " movq 24(%0), %%mm3\n"
30633- " movntq %%mm3, 24(%1)\n"
30634- " movq 32(%0), %%mm4\n"
30635- " movntq %%mm4, 32(%1)\n"
30636- " movq 40(%0), %%mm5\n"
30637- " movntq %%mm5, 40(%1)\n"
30638- " movq 48(%0), %%mm6\n"
30639- " movntq %%mm6, 48(%1)\n"
30640- " movq 56(%0), %%mm7\n"
30641- " movntq %%mm7, 56(%1)\n"
30642+ "1: prefetch 320(%1)\n"
30643+ "2: movq (%1), %%mm0\n"
30644+ " movntq %%mm0, (%2)\n"
30645+ " movq 8(%1), %%mm1\n"
30646+ " movntq %%mm1, 8(%2)\n"
30647+ " movq 16(%1), %%mm2\n"
30648+ " movntq %%mm2, 16(%2)\n"
30649+ " movq 24(%1), %%mm3\n"
30650+ " movntq %%mm3, 24(%2)\n"
30651+ " movq 32(%1), %%mm4\n"
30652+ " movntq %%mm4, 32(%2)\n"
30653+ " movq 40(%1), %%mm5\n"
30654+ " movntq %%mm5, 40(%2)\n"
30655+ " movq 48(%1), %%mm6\n"
30656+ " movntq %%mm6, 48(%2)\n"
30657+ " movq 56(%1), %%mm7\n"
30658+ " movntq %%mm7, 56(%2)\n"
30659 ".section .fixup, \"ax\"\n"
30660- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30661+ "3:\n"
30662+
30663+#ifdef CONFIG_PAX_KERNEXEC
30664+ " movl %%cr0, %0\n"
30665+ " movl %0, %%eax\n"
30666+ " andl $0xFFFEFFFF, %%eax\n"
30667+ " movl %%eax, %%cr0\n"
30668+#endif
30669+
30670+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30671+
30672+#ifdef CONFIG_PAX_KERNEXEC
30673+ " movl %0, %%cr0\n"
30674+#endif
30675+
30676 " jmp 2b\n"
30677 ".previous\n"
30678- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30679+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30680
30681 from += 64;
30682 to += 64;
30683@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30684 static void fast_copy_page(void *to, void *from)
30685 {
30686 int i;
30687+ unsigned long cr0;
30688
30689 kernel_fpu_begin();
30690
30691 __asm__ __volatile__ (
30692- "1: prefetch (%0)\n"
30693- " prefetch 64(%0)\n"
30694- " prefetch 128(%0)\n"
30695- " prefetch 192(%0)\n"
30696- " prefetch 256(%0)\n"
30697+ "1: prefetch (%1)\n"
30698+ " prefetch 64(%1)\n"
30699+ " prefetch 128(%1)\n"
30700+ " prefetch 192(%1)\n"
30701+ " prefetch 256(%1)\n"
30702 "2: \n"
30703 ".section .fixup, \"ax\"\n"
30704- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30705+ "3: \n"
30706+
30707+#ifdef CONFIG_PAX_KERNEXEC
30708+ " movl %%cr0, %0\n"
30709+ " movl %0, %%eax\n"
30710+ " andl $0xFFFEFFFF, %%eax\n"
30711+ " movl %%eax, %%cr0\n"
30712+#endif
30713+
30714+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30715+
30716+#ifdef CONFIG_PAX_KERNEXEC
30717+ " movl %0, %%cr0\n"
30718+#endif
30719+
30720 " jmp 2b\n"
30721 ".previous\n"
30722- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30723+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30724
30725 for (i = 0; i < 4096/64; i++) {
30726 __asm__ __volatile__ (
30727- "1: prefetch 320(%0)\n"
30728- "2: movq (%0), %%mm0\n"
30729- " movq 8(%0), %%mm1\n"
30730- " movq 16(%0), %%mm2\n"
30731- " movq 24(%0), %%mm3\n"
30732- " movq %%mm0, (%1)\n"
30733- " movq %%mm1, 8(%1)\n"
30734- " movq %%mm2, 16(%1)\n"
30735- " movq %%mm3, 24(%1)\n"
30736- " movq 32(%0), %%mm0\n"
30737- " movq 40(%0), %%mm1\n"
30738- " movq 48(%0), %%mm2\n"
30739- " movq 56(%0), %%mm3\n"
30740- " movq %%mm0, 32(%1)\n"
30741- " movq %%mm1, 40(%1)\n"
30742- " movq %%mm2, 48(%1)\n"
30743- " movq %%mm3, 56(%1)\n"
30744+ "1: prefetch 320(%1)\n"
30745+ "2: movq (%1), %%mm0\n"
30746+ " movq 8(%1), %%mm1\n"
30747+ " movq 16(%1), %%mm2\n"
30748+ " movq 24(%1), %%mm3\n"
30749+ " movq %%mm0, (%2)\n"
30750+ " movq %%mm1, 8(%2)\n"
30751+ " movq %%mm2, 16(%2)\n"
30752+ " movq %%mm3, 24(%2)\n"
30753+ " movq 32(%1), %%mm0\n"
30754+ " movq 40(%1), %%mm1\n"
30755+ " movq 48(%1), %%mm2\n"
30756+ " movq 56(%1), %%mm3\n"
30757+ " movq %%mm0, 32(%2)\n"
30758+ " movq %%mm1, 40(%2)\n"
30759+ " movq %%mm2, 48(%2)\n"
30760+ " movq %%mm3, 56(%2)\n"
30761 ".section .fixup, \"ax\"\n"
30762- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30763+ "3:\n"
30764+
30765+#ifdef CONFIG_PAX_KERNEXEC
30766+ " movl %%cr0, %0\n"
30767+ " movl %0, %%eax\n"
30768+ " andl $0xFFFEFFFF, %%eax\n"
30769+ " movl %%eax, %%cr0\n"
30770+#endif
30771+
30772+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30773+
30774+#ifdef CONFIG_PAX_KERNEXEC
30775+ " movl %0, %%cr0\n"
30776+#endif
30777+
30778 " jmp 2b\n"
30779 ".previous\n"
30780 _ASM_EXTABLE(1b, 3b)
30781- : : "r" (from), "r" (to) : "memory");
30782+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30783
30784 from += 64;
30785 to += 64;
30786diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30787index f6d13ee..d789440 100644
30788--- a/arch/x86/lib/msr-reg.S
30789+++ b/arch/x86/lib/msr-reg.S
30790@@ -3,6 +3,7 @@
30791 #include <asm/dwarf2.h>
30792 #include <asm/asm.h>
30793 #include <asm/msr.h>
30794+#include <asm/alternative-asm.h>
30795
30796 #ifdef CONFIG_X86_64
30797 /*
30798@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30799 movl %edi, 28(%r10)
30800 popq_cfi %rbp
30801 popq_cfi %rbx
30802+ pax_force_retaddr
30803 ret
30804 3:
30805 CFI_RESTORE_STATE
30806diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30807index fc6ba17..d4d989d 100644
30808--- a/arch/x86/lib/putuser.S
30809+++ b/arch/x86/lib/putuser.S
30810@@ -16,7 +16,9 @@
30811 #include <asm/errno.h>
30812 #include <asm/asm.h>
30813 #include <asm/smap.h>
30814-
30815+#include <asm/segment.h>
30816+#include <asm/pgtable.h>
30817+#include <asm/alternative-asm.h>
30818
30819 /*
30820 * __put_user_X
30821@@ -30,57 +32,125 @@
30822 * as they get called from within inline assembly.
30823 */
30824
30825-#define ENTER CFI_STARTPROC ; \
30826- GET_THREAD_INFO(%_ASM_BX)
30827-#define EXIT ASM_CLAC ; \
30828- ret ; \
30829+#define ENTER CFI_STARTPROC
30830+#define EXIT ASM_CLAC ; \
30831+ pax_force_retaddr ; \
30832+ ret ; \
30833 CFI_ENDPROC
30834
30835+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30836+#define _DEST %_ASM_CX,%_ASM_BX
30837+#else
30838+#define _DEST %_ASM_CX
30839+#endif
30840+
30841+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30842+#define __copyuser_seg gs;
30843+#else
30844+#define __copyuser_seg
30845+#endif
30846+
30847 .text
30848 ENTRY(__put_user_1)
30849 ENTER
30850+
30851+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30852+ GET_THREAD_INFO(%_ASM_BX)
30853 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30854 jae bad_put_user
30855 ASM_STAC
30856-1: movb %al,(%_ASM_CX)
30857+
30858+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30859+ mov pax_user_shadow_base,%_ASM_BX
30860+ cmp %_ASM_BX,%_ASM_CX
30861+ jb 1234f
30862+ xor %ebx,%ebx
30863+1234:
30864+#endif
30865+
30866+#endif
30867+
30868+1: __copyuser_seg movb %al,(_DEST)
30869 xor %eax,%eax
30870 EXIT
30871 ENDPROC(__put_user_1)
30872
30873 ENTRY(__put_user_2)
30874 ENTER
30875+
30876+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30877+ GET_THREAD_INFO(%_ASM_BX)
30878 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30879 sub $1,%_ASM_BX
30880 cmp %_ASM_BX,%_ASM_CX
30881 jae bad_put_user
30882 ASM_STAC
30883-2: movw %ax,(%_ASM_CX)
30884+
30885+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30886+ mov pax_user_shadow_base,%_ASM_BX
30887+ cmp %_ASM_BX,%_ASM_CX
30888+ jb 1234f
30889+ xor %ebx,%ebx
30890+1234:
30891+#endif
30892+
30893+#endif
30894+
30895+2: __copyuser_seg movw %ax,(_DEST)
30896 xor %eax,%eax
30897 EXIT
30898 ENDPROC(__put_user_2)
30899
30900 ENTRY(__put_user_4)
30901 ENTER
30902+
30903+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30904+ GET_THREAD_INFO(%_ASM_BX)
30905 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30906 sub $3,%_ASM_BX
30907 cmp %_ASM_BX,%_ASM_CX
30908 jae bad_put_user
30909 ASM_STAC
30910-3: movl %eax,(%_ASM_CX)
30911+
30912+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30913+ mov pax_user_shadow_base,%_ASM_BX
30914+ cmp %_ASM_BX,%_ASM_CX
30915+ jb 1234f
30916+ xor %ebx,%ebx
30917+1234:
30918+#endif
30919+
30920+#endif
30921+
30922+3: __copyuser_seg movl %eax,(_DEST)
30923 xor %eax,%eax
30924 EXIT
30925 ENDPROC(__put_user_4)
30926
30927 ENTRY(__put_user_8)
30928 ENTER
30929+
30930+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30931+ GET_THREAD_INFO(%_ASM_BX)
30932 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30933 sub $7,%_ASM_BX
30934 cmp %_ASM_BX,%_ASM_CX
30935 jae bad_put_user
30936 ASM_STAC
30937-4: mov %_ASM_AX,(%_ASM_CX)
30938+
30939+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30940+ mov pax_user_shadow_base,%_ASM_BX
30941+ cmp %_ASM_BX,%_ASM_CX
30942+ jb 1234f
30943+ xor %ebx,%ebx
30944+1234:
30945+#endif
30946+
30947+#endif
30948+
30949+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30950 #ifdef CONFIG_X86_32
30951-5: movl %edx,4(%_ASM_CX)
30952+5: __copyuser_seg movl %edx,4(_DEST)
30953 #endif
30954 xor %eax,%eax
30955 EXIT
30956diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30957index 5dff5f0..cadebf4 100644
30958--- a/arch/x86/lib/rwsem.S
30959+++ b/arch/x86/lib/rwsem.S
30960@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30961 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30962 CFI_RESTORE __ASM_REG(dx)
30963 restore_common_regs
30964+ pax_force_retaddr
30965 ret
30966 CFI_ENDPROC
30967 ENDPROC(call_rwsem_down_read_failed)
30968@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30969 movq %rax,%rdi
30970 call rwsem_down_write_failed
30971 restore_common_regs
30972+ pax_force_retaddr
30973 ret
30974 CFI_ENDPROC
30975 ENDPROC(call_rwsem_down_write_failed)
30976@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
30977 movq %rax,%rdi
30978 call rwsem_wake
30979 restore_common_regs
30980-1: ret
30981+1: pax_force_retaddr
30982+ ret
30983 CFI_ENDPROC
30984 ENDPROC(call_rwsem_wake)
30985
30986@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
30987 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30988 CFI_RESTORE __ASM_REG(dx)
30989 restore_common_regs
30990+ pax_force_retaddr
30991 ret
30992 CFI_ENDPROC
30993 ENDPROC(call_rwsem_downgrade_wake)
30994diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
30995index b30b5eb..2b57052 100644
30996--- a/arch/x86/lib/thunk_64.S
30997+++ b/arch/x86/lib/thunk_64.S
30998@@ -9,6 +9,7 @@
30999 #include <asm/dwarf2.h>
31000 #include <asm/calling.h>
31001 #include <asm/asm.h>
31002+#include <asm/alternative-asm.h>
31003
31004 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31005 .macro THUNK name, func, put_ret_addr_in_rdi=0
31006@@ -16,11 +17,11 @@
31007 \name:
31008 CFI_STARTPROC
31009
31010- /* this one pushes 9 elems, the next one would be %rIP */
31011- SAVE_ARGS
31012+ /* this one pushes 15+1 elems, the next one would be %rIP */
31013+ SAVE_ARGS 8
31014
31015 .if \put_ret_addr_in_rdi
31016- movq_cfi_restore 9*8, rdi
31017+ movq_cfi_restore RIP, rdi
31018 .endif
31019
31020 call \func
31021@@ -47,9 +48,10 @@
31022
31023 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31024 CFI_STARTPROC
31025- SAVE_ARGS
31026+ SAVE_ARGS 8
31027 restore:
31028- RESTORE_ARGS
31029+ RESTORE_ARGS 1,8
31030+ pax_force_retaddr
31031 ret
31032 CFI_ENDPROC
31033 _ASM_NOKPROBE(restore)
31034diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31035index e2f5e21..4b22130 100644
31036--- a/arch/x86/lib/usercopy_32.c
31037+++ b/arch/x86/lib/usercopy_32.c
31038@@ -42,11 +42,13 @@ do { \
31039 int __d0; \
31040 might_fault(); \
31041 __asm__ __volatile__( \
31042+ __COPYUSER_SET_ES \
31043 ASM_STAC "\n" \
31044 "0: rep; stosl\n" \
31045 " movl %2,%0\n" \
31046 "1: rep; stosb\n" \
31047 "2: " ASM_CLAC "\n" \
31048+ __COPYUSER_RESTORE_ES \
31049 ".section .fixup,\"ax\"\n" \
31050 "3: lea 0(%2,%0,4),%0\n" \
31051 " jmp 2b\n" \
31052@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31053
31054 #ifdef CONFIG_X86_INTEL_USERCOPY
31055 static unsigned long
31056-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31057+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31058 {
31059 int d0, d1;
31060 __asm__ __volatile__(
31061@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31062 " .align 2,0x90\n"
31063 "3: movl 0(%4), %%eax\n"
31064 "4: movl 4(%4), %%edx\n"
31065- "5: movl %%eax, 0(%3)\n"
31066- "6: movl %%edx, 4(%3)\n"
31067+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31068+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31069 "7: movl 8(%4), %%eax\n"
31070 "8: movl 12(%4),%%edx\n"
31071- "9: movl %%eax, 8(%3)\n"
31072- "10: movl %%edx, 12(%3)\n"
31073+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31074+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31075 "11: movl 16(%4), %%eax\n"
31076 "12: movl 20(%4), %%edx\n"
31077- "13: movl %%eax, 16(%3)\n"
31078- "14: movl %%edx, 20(%3)\n"
31079+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31080+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31081 "15: movl 24(%4), %%eax\n"
31082 "16: movl 28(%4), %%edx\n"
31083- "17: movl %%eax, 24(%3)\n"
31084- "18: movl %%edx, 28(%3)\n"
31085+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31086+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31087 "19: movl 32(%4), %%eax\n"
31088 "20: movl 36(%4), %%edx\n"
31089- "21: movl %%eax, 32(%3)\n"
31090- "22: movl %%edx, 36(%3)\n"
31091+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31092+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31093 "23: movl 40(%4), %%eax\n"
31094 "24: movl 44(%4), %%edx\n"
31095- "25: movl %%eax, 40(%3)\n"
31096- "26: movl %%edx, 44(%3)\n"
31097+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31098+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31099 "27: movl 48(%4), %%eax\n"
31100 "28: movl 52(%4), %%edx\n"
31101- "29: movl %%eax, 48(%3)\n"
31102- "30: movl %%edx, 52(%3)\n"
31103+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31104+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31105 "31: movl 56(%4), %%eax\n"
31106 "32: movl 60(%4), %%edx\n"
31107- "33: movl %%eax, 56(%3)\n"
31108- "34: movl %%edx, 60(%3)\n"
31109+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31110+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31111 " addl $-64, %0\n"
31112 " addl $64, %4\n"
31113 " addl $64, %3\n"
31114@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31115 " shrl $2, %0\n"
31116 " andl $3, %%eax\n"
31117 " cld\n"
31118+ __COPYUSER_SET_ES
31119 "99: rep; movsl\n"
31120 "36: movl %%eax, %0\n"
31121 "37: rep; movsb\n"
31122 "100:\n"
31123+ __COPYUSER_RESTORE_ES
31124+ ".section .fixup,\"ax\"\n"
31125+ "101: lea 0(%%eax,%0,4),%0\n"
31126+ " jmp 100b\n"
31127+ ".previous\n"
31128+ _ASM_EXTABLE(1b,100b)
31129+ _ASM_EXTABLE(2b,100b)
31130+ _ASM_EXTABLE(3b,100b)
31131+ _ASM_EXTABLE(4b,100b)
31132+ _ASM_EXTABLE(5b,100b)
31133+ _ASM_EXTABLE(6b,100b)
31134+ _ASM_EXTABLE(7b,100b)
31135+ _ASM_EXTABLE(8b,100b)
31136+ _ASM_EXTABLE(9b,100b)
31137+ _ASM_EXTABLE(10b,100b)
31138+ _ASM_EXTABLE(11b,100b)
31139+ _ASM_EXTABLE(12b,100b)
31140+ _ASM_EXTABLE(13b,100b)
31141+ _ASM_EXTABLE(14b,100b)
31142+ _ASM_EXTABLE(15b,100b)
31143+ _ASM_EXTABLE(16b,100b)
31144+ _ASM_EXTABLE(17b,100b)
31145+ _ASM_EXTABLE(18b,100b)
31146+ _ASM_EXTABLE(19b,100b)
31147+ _ASM_EXTABLE(20b,100b)
31148+ _ASM_EXTABLE(21b,100b)
31149+ _ASM_EXTABLE(22b,100b)
31150+ _ASM_EXTABLE(23b,100b)
31151+ _ASM_EXTABLE(24b,100b)
31152+ _ASM_EXTABLE(25b,100b)
31153+ _ASM_EXTABLE(26b,100b)
31154+ _ASM_EXTABLE(27b,100b)
31155+ _ASM_EXTABLE(28b,100b)
31156+ _ASM_EXTABLE(29b,100b)
31157+ _ASM_EXTABLE(30b,100b)
31158+ _ASM_EXTABLE(31b,100b)
31159+ _ASM_EXTABLE(32b,100b)
31160+ _ASM_EXTABLE(33b,100b)
31161+ _ASM_EXTABLE(34b,100b)
31162+ _ASM_EXTABLE(35b,100b)
31163+ _ASM_EXTABLE(36b,100b)
31164+ _ASM_EXTABLE(37b,100b)
31165+ _ASM_EXTABLE(99b,101b)
31166+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31167+ : "1"(to), "2"(from), "0"(size)
31168+ : "eax", "edx", "memory");
31169+ return size;
31170+}
31171+
31172+static unsigned long
31173+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31174+{
31175+ int d0, d1;
31176+ __asm__ __volatile__(
31177+ " .align 2,0x90\n"
31178+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31179+ " cmpl $67, %0\n"
31180+ " jbe 3f\n"
31181+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31182+ " .align 2,0x90\n"
31183+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31184+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31185+ "5: movl %%eax, 0(%3)\n"
31186+ "6: movl %%edx, 4(%3)\n"
31187+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31188+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31189+ "9: movl %%eax, 8(%3)\n"
31190+ "10: movl %%edx, 12(%3)\n"
31191+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31192+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31193+ "13: movl %%eax, 16(%3)\n"
31194+ "14: movl %%edx, 20(%3)\n"
31195+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31196+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31197+ "17: movl %%eax, 24(%3)\n"
31198+ "18: movl %%edx, 28(%3)\n"
31199+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31200+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31201+ "21: movl %%eax, 32(%3)\n"
31202+ "22: movl %%edx, 36(%3)\n"
31203+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31204+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31205+ "25: movl %%eax, 40(%3)\n"
31206+ "26: movl %%edx, 44(%3)\n"
31207+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31208+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31209+ "29: movl %%eax, 48(%3)\n"
31210+ "30: movl %%edx, 52(%3)\n"
31211+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31212+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31213+ "33: movl %%eax, 56(%3)\n"
31214+ "34: movl %%edx, 60(%3)\n"
31215+ " addl $-64, %0\n"
31216+ " addl $64, %4\n"
31217+ " addl $64, %3\n"
31218+ " cmpl $63, %0\n"
31219+ " ja 1b\n"
31220+ "35: movl %0, %%eax\n"
31221+ " shrl $2, %0\n"
31222+ " andl $3, %%eax\n"
31223+ " cld\n"
31224+ "99: rep; "__copyuser_seg" movsl\n"
31225+ "36: movl %%eax, %0\n"
31226+ "37: rep; "__copyuser_seg" movsb\n"
31227+ "100:\n"
31228 ".section .fixup,\"ax\"\n"
31229 "101: lea 0(%%eax,%0,4),%0\n"
31230 " jmp 100b\n"
31231@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31232 int d0, d1;
31233 __asm__ __volatile__(
31234 " .align 2,0x90\n"
31235- "0: movl 32(%4), %%eax\n"
31236+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31237 " cmpl $67, %0\n"
31238 " jbe 2f\n"
31239- "1: movl 64(%4), %%eax\n"
31240+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31241 " .align 2,0x90\n"
31242- "2: movl 0(%4), %%eax\n"
31243- "21: movl 4(%4), %%edx\n"
31244+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31245+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31246 " movl %%eax, 0(%3)\n"
31247 " movl %%edx, 4(%3)\n"
31248- "3: movl 8(%4), %%eax\n"
31249- "31: movl 12(%4),%%edx\n"
31250+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31251+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31252 " movl %%eax, 8(%3)\n"
31253 " movl %%edx, 12(%3)\n"
31254- "4: movl 16(%4), %%eax\n"
31255- "41: movl 20(%4), %%edx\n"
31256+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31257+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31258 " movl %%eax, 16(%3)\n"
31259 " movl %%edx, 20(%3)\n"
31260- "10: movl 24(%4), %%eax\n"
31261- "51: movl 28(%4), %%edx\n"
31262+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31263+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31264 " movl %%eax, 24(%3)\n"
31265 " movl %%edx, 28(%3)\n"
31266- "11: movl 32(%4), %%eax\n"
31267- "61: movl 36(%4), %%edx\n"
31268+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31269+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31270 " movl %%eax, 32(%3)\n"
31271 " movl %%edx, 36(%3)\n"
31272- "12: movl 40(%4), %%eax\n"
31273- "71: movl 44(%4), %%edx\n"
31274+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31275+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31276 " movl %%eax, 40(%3)\n"
31277 " movl %%edx, 44(%3)\n"
31278- "13: movl 48(%4), %%eax\n"
31279- "81: movl 52(%4), %%edx\n"
31280+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31281+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31282 " movl %%eax, 48(%3)\n"
31283 " movl %%edx, 52(%3)\n"
31284- "14: movl 56(%4), %%eax\n"
31285- "91: movl 60(%4), %%edx\n"
31286+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31287+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31288 " movl %%eax, 56(%3)\n"
31289 " movl %%edx, 60(%3)\n"
31290 " addl $-64, %0\n"
31291@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31292 " shrl $2, %0\n"
31293 " andl $3, %%eax\n"
31294 " cld\n"
31295- "6: rep; movsl\n"
31296+ "6: rep; "__copyuser_seg" movsl\n"
31297 " movl %%eax,%0\n"
31298- "7: rep; movsb\n"
31299+ "7: rep; "__copyuser_seg" movsb\n"
31300 "8:\n"
31301 ".section .fixup,\"ax\"\n"
31302 "9: lea 0(%%eax,%0,4),%0\n"
31303@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31304
31305 __asm__ __volatile__(
31306 " .align 2,0x90\n"
31307- "0: movl 32(%4), %%eax\n"
31308+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31309 " cmpl $67, %0\n"
31310 " jbe 2f\n"
31311- "1: movl 64(%4), %%eax\n"
31312+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31313 " .align 2,0x90\n"
31314- "2: movl 0(%4), %%eax\n"
31315- "21: movl 4(%4), %%edx\n"
31316+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31317+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31318 " movnti %%eax, 0(%3)\n"
31319 " movnti %%edx, 4(%3)\n"
31320- "3: movl 8(%4), %%eax\n"
31321- "31: movl 12(%4),%%edx\n"
31322+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31323+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31324 " movnti %%eax, 8(%3)\n"
31325 " movnti %%edx, 12(%3)\n"
31326- "4: movl 16(%4), %%eax\n"
31327- "41: movl 20(%4), %%edx\n"
31328+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31329+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31330 " movnti %%eax, 16(%3)\n"
31331 " movnti %%edx, 20(%3)\n"
31332- "10: movl 24(%4), %%eax\n"
31333- "51: movl 28(%4), %%edx\n"
31334+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31335+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31336 " movnti %%eax, 24(%3)\n"
31337 " movnti %%edx, 28(%3)\n"
31338- "11: movl 32(%4), %%eax\n"
31339- "61: movl 36(%4), %%edx\n"
31340+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31341+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31342 " movnti %%eax, 32(%3)\n"
31343 " movnti %%edx, 36(%3)\n"
31344- "12: movl 40(%4), %%eax\n"
31345- "71: movl 44(%4), %%edx\n"
31346+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31347+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31348 " movnti %%eax, 40(%3)\n"
31349 " movnti %%edx, 44(%3)\n"
31350- "13: movl 48(%4), %%eax\n"
31351- "81: movl 52(%4), %%edx\n"
31352+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31353+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31354 " movnti %%eax, 48(%3)\n"
31355 " movnti %%edx, 52(%3)\n"
31356- "14: movl 56(%4), %%eax\n"
31357- "91: movl 60(%4), %%edx\n"
31358+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31359+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31360 " movnti %%eax, 56(%3)\n"
31361 " movnti %%edx, 60(%3)\n"
31362 " addl $-64, %0\n"
31363@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31364 " shrl $2, %0\n"
31365 " andl $3, %%eax\n"
31366 " cld\n"
31367- "6: rep; movsl\n"
31368+ "6: rep; "__copyuser_seg" movsl\n"
31369 " movl %%eax,%0\n"
31370- "7: rep; movsb\n"
31371+ "7: rep; "__copyuser_seg" movsb\n"
31372 "8:\n"
31373 ".section .fixup,\"ax\"\n"
31374 "9: lea 0(%%eax,%0,4),%0\n"
31375@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31376
31377 __asm__ __volatile__(
31378 " .align 2,0x90\n"
31379- "0: movl 32(%4), %%eax\n"
31380+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31381 " cmpl $67, %0\n"
31382 " jbe 2f\n"
31383- "1: movl 64(%4), %%eax\n"
31384+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31385 " .align 2,0x90\n"
31386- "2: movl 0(%4), %%eax\n"
31387- "21: movl 4(%4), %%edx\n"
31388+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31389+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31390 " movnti %%eax, 0(%3)\n"
31391 " movnti %%edx, 4(%3)\n"
31392- "3: movl 8(%4), %%eax\n"
31393- "31: movl 12(%4),%%edx\n"
31394+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31395+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31396 " movnti %%eax, 8(%3)\n"
31397 " movnti %%edx, 12(%3)\n"
31398- "4: movl 16(%4), %%eax\n"
31399- "41: movl 20(%4), %%edx\n"
31400+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31401+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31402 " movnti %%eax, 16(%3)\n"
31403 " movnti %%edx, 20(%3)\n"
31404- "10: movl 24(%4), %%eax\n"
31405- "51: movl 28(%4), %%edx\n"
31406+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31407+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31408 " movnti %%eax, 24(%3)\n"
31409 " movnti %%edx, 28(%3)\n"
31410- "11: movl 32(%4), %%eax\n"
31411- "61: movl 36(%4), %%edx\n"
31412+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31413+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31414 " movnti %%eax, 32(%3)\n"
31415 " movnti %%edx, 36(%3)\n"
31416- "12: movl 40(%4), %%eax\n"
31417- "71: movl 44(%4), %%edx\n"
31418+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31419+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31420 " movnti %%eax, 40(%3)\n"
31421 " movnti %%edx, 44(%3)\n"
31422- "13: movl 48(%4), %%eax\n"
31423- "81: movl 52(%4), %%edx\n"
31424+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31425+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31426 " movnti %%eax, 48(%3)\n"
31427 " movnti %%edx, 52(%3)\n"
31428- "14: movl 56(%4), %%eax\n"
31429- "91: movl 60(%4), %%edx\n"
31430+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31431+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31432 " movnti %%eax, 56(%3)\n"
31433 " movnti %%edx, 60(%3)\n"
31434 " addl $-64, %0\n"
31435@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31436 " shrl $2, %0\n"
31437 " andl $3, %%eax\n"
31438 " cld\n"
31439- "6: rep; movsl\n"
31440+ "6: rep; "__copyuser_seg" movsl\n"
31441 " movl %%eax,%0\n"
31442- "7: rep; movsb\n"
31443+ "7: rep; "__copyuser_seg" movsb\n"
31444 "8:\n"
31445 ".section .fixup,\"ax\"\n"
31446 "9: lea 0(%%eax,%0,4),%0\n"
31447@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31448 */
31449 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31450 unsigned long size);
31451-unsigned long __copy_user_intel(void __user *to, const void *from,
31452+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31453+ unsigned long size);
31454+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31455 unsigned long size);
31456 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31457 const void __user *from, unsigned long size);
31458 #endif /* CONFIG_X86_INTEL_USERCOPY */
31459
31460 /* Generic arbitrary sized copy. */
31461-#define __copy_user(to, from, size) \
31462+#define __copy_user(to, from, size, prefix, set, restore) \
31463 do { \
31464 int __d0, __d1, __d2; \
31465 __asm__ __volatile__( \
31466+ set \
31467 " cmp $7,%0\n" \
31468 " jbe 1f\n" \
31469 " movl %1,%0\n" \
31470 " negl %0\n" \
31471 " andl $7,%0\n" \
31472 " subl %0,%3\n" \
31473- "4: rep; movsb\n" \
31474+ "4: rep; "prefix"movsb\n" \
31475 " movl %3,%0\n" \
31476 " shrl $2,%0\n" \
31477 " andl $3,%3\n" \
31478 " .align 2,0x90\n" \
31479- "0: rep; movsl\n" \
31480+ "0: rep; "prefix"movsl\n" \
31481 " movl %3,%0\n" \
31482- "1: rep; movsb\n" \
31483+ "1: rep; "prefix"movsb\n" \
31484 "2:\n" \
31485+ restore \
31486 ".section .fixup,\"ax\"\n" \
31487 "5: addl %3,%0\n" \
31488 " jmp 2b\n" \
31489@@ -538,14 +650,14 @@ do { \
31490 " negl %0\n" \
31491 " andl $7,%0\n" \
31492 " subl %0,%3\n" \
31493- "4: rep; movsb\n" \
31494+ "4: rep; "__copyuser_seg"movsb\n" \
31495 " movl %3,%0\n" \
31496 " shrl $2,%0\n" \
31497 " andl $3,%3\n" \
31498 " .align 2,0x90\n" \
31499- "0: rep; movsl\n" \
31500+ "0: rep; "__copyuser_seg"movsl\n" \
31501 " movl %3,%0\n" \
31502- "1: rep; movsb\n" \
31503+ "1: rep; "__copyuser_seg"movsb\n" \
31504 "2:\n" \
31505 ".section .fixup,\"ax\"\n" \
31506 "5: addl %3,%0\n" \
31507@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31508 {
31509 stac();
31510 if (movsl_is_ok(to, from, n))
31511- __copy_user(to, from, n);
31512+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31513 else
31514- n = __copy_user_intel(to, from, n);
31515+ n = __generic_copy_to_user_intel(to, from, n);
31516 clac();
31517 return n;
31518 }
31519@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31520 {
31521 stac();
31522 if (movsl_is_ok(to, from, n))
31523- __copy_user(to, from, n);
31524+ __copy_user(to, from, n, __copyuser_seg, "", "");
31525 else
31526- n = __copy_user_intel((void __user *)to,
31527- (const void *)from, n);
31528+ n = __generic_copy_from_user_intel(to, from, n);
31529 clac();
31530 return n;
31531 }
31532@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31533 if (n > 64 && cpu_has_xmm2)
31534 n = __copy_user_intel_nocache(to, from, n);
31535 else
31536- __copy_user(to, from, n);
31537+ __copy_user(to, from, n, __copyuser_seg, "", "");
31538 #else
31539- __copy_user(to, from, n);
31540+ __copy_user(to, from, n, __copyuser_seg, "", "");
31541 #endif
31542 clac();
31543 return n;
31544 }
31545 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31546
31547-/**
31548- * copy_to_user: - Copy a block of data into user space.
31549- * @to: Destination address, in user space.
31550- * @from: Source address, in kernel space.
31551- * @n: Number of bytes to copy.
31552- *
31553- * Context: User context only. This function may sleep.
31554- *
31555- * Copy data from kernel space to user space.
31556- *
31557- * Returns number of bytes that could not be copied.
31558- * On success, this will be zero.
31559- */
31560-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31561+#ifdef CONFIG_PAX_MEMORY_UDEREF
31562+void __set_fs(mm_segment_t x)
31563 {
31564- if (access_ok(VERIFY_WRITE, to, n))
31565- n = __copy_to_user(to, from, n);
31566- return n;
31567+ switch (x.seg) {
31568+ case 0:
31569+ loadsegment(gs, 0);
31570+ break;
31571+ case TASK_SIZE_MAX:
31572+ loadsegment(gs, __USER_DS);
31573+ break;
31574+ case -1UL:
31575+ loadsegment(gs, __KERNEL_DS);
31576+ break;
31577+ default:
31578+ BUG();
31579+ }
31580 }
31581-EXPORT_SYMBOL(_copy_to_user);
31582+EXPORT_SYMBOL(__set_fs);
31583
31584-/**
31585- * copy_from_user: - Copy a block of data from user space.
31586- * @to: Destination address, in kernel space.
31587- * @from: Source address, in user space.
31588- * @n: Number of bytes to copy.
31589- *
31590- * Context: User context only. This function may sleep.
31591- *
31592- * Copy data from user space to kernel space.
31593- *
31594- * Returns number of bytes that could not be copied.
31595- * On success, this will be zero.
31596- *
31597- * If some data could not be copied, this function will pad the copied
31598- * data to the requested size using zero bytes.
31599- */
31600-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31601+void set_fs(mm_segment_t x)
31602 {
31603- if (access_ok(VERIFY_READ, from, n))
31604- n = __copy_from_user(to, from, n);
31605- else
31606- memset(to, 0, n);
31607- return n;
31608+ current_thread_info()->addr_limit = x;
31609+ __set_fs(x);
31610 }
31611-EXPORT_SYMBOL(_copy_from_user);
31612+EXPORT_SYMBOL(set_fs);
31613+#endif
31614diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31615index c905e89..01ab928 100644
31616--- a/arch/x86/lib/usercopy_64.c
31617+++ b/arch/x86/lib/usercopy_64.c
31618@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31619 might_fault();
31620 /* no memory constraint because it doesn't change any memory gcc knows
31621 about */
31622+ pax_open_userland();
31623 stac();
31624 asm volatile(
31625 " testq %[size8],%[size8]\n"
31626@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31627 _ASM_EXTABLE(0b,3b)
31628 _ASM_EXTABLE(1b,2b)
31629 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31630- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31631+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31632 [zero] "r" (0UL), [eight] "r" (8UL));
31633 clac();
31634+ pax_close_userland();
31635 return size;
31636 }
31637 EXPORT_SYMBOL(__clear_user);
31638@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31639 }
31640 EXPORT_SYMBOL(clear_user);
31641
31642-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31643+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31644 {
31645- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31646- return copy_user_generic((__force void *)to, (__force void *)from, len);
31647- }
31648- return len;
31649+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31650+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31651+ return len;
31652 }
31653 EXPORT_SYMBOL(copy_in_user);
31654
31655@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31656 * it is not necessary to optimize tail handling.
31657 */
31658 __visible unsigned long
31659-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31660+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31661 {
31662 char c;
31663 unsigned zero_len;
31664
31665+ clac();
31666+ pax_close_userland();
31667 for (; len; --len, to++) {
31668 if (__get_user_nocheck(c, from++, sizeof(char)))
31669 break;
31670@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31671 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31672 if (__put_user_nocheck(c, to++, sizeof(char)))
31673 break;
31674- clac();
31675 return len;
31676 }
31677diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31678index ecfdc46..55b9309 100644
31679--- a/arch/x86/mm/Makefile
31680+++ b/arch/x86/mm/Makefile
31681@@ -32,3 +32,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31682 obj-$(CONFIG_MEMTEST) += memtest.o
31683
31684 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31685+
31686+quote:="
31687+obj-$(CONFIG_X86_64) += uderef_64.o
31688+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31689diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31690index 903ec1e..c4166b2 100644
31691--- a/arch/x86/mm/extable.c
31692+++ b/arch/x86/mm/extable.c
31693@@ -6,12 +6,24 @@
31694 static inline unsigned long
31695 ex_insn_addr(const struct exception_table_entry *x)
31696 {
31697- return (unsigned long)&x->insn + x->insn;
31698+ unsigned long reloc = 0;
31699+
31700+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31701+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31702+#endif
31703+
31704+ return (unsigned long)&x->insn + x->insn + reloc;
31705 }
31706 static inline unsigned long
31707 ex_fixup_addr(const struct exception_table_entry *x)
31708 {
31709- return (unsigned long)&x->fixup + x->fixup;
31710+ unsigned long reloc = 0;
31711+
31712+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31713+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31714+#endif
31715+
31716+ return (unsigned long)&x->fixup + x->fixup + reloc;
31717 }
31718
31719 int fixup_exception(struct pt_regs *regs)
31720@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31721 unsigned long new_ip;
31722
31723 #ifdef CONFIG_PNPBIOS
31724- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31725+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31726 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31727 extern u32 pnp_bios_is_utter_crap;
31728 pnp_bios_is_utter_crap = 1;
31729@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31730 i += 4;
31731 p->fixup -= i;
31732 i += 4;
31733+
31734+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31735+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31736+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31737+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31738+#endif
31739+
31740 }
31741 }
31742
31743diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31744index e3ff27a..f38f7c0 100644
31745--- a/arch/x86/mm/fault.c
31746+++ b/arch/x86/mm/fault.c
31747@@ -13,12 +13,19 @@
31748 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31749 #include <linux/prefetch.h> /* prefetchw */
31750 #include <linux/context_tracking.h> /* exception_enter(), ... */
31751+#include <linux/unistd.h>
31752+#include <linux/compiler.h>
31753
31754 #include <asm/traps.h> /* dotraplinkage, ... */
31755 #include <asm/pgalloc.h> /* pgd_*(), ... */
31756 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31757 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31758 #include <asm/vsyscall.h> /* emulate_vsyscall */
31759+#include <asm/tlbflush.h>
31760+
31761+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31762+#include <asm/stacktrace.h>
31763+#endif
31764
31765 #define CREATE_TRACE_POINTS
31766 #include <asm/trace/exceptions.h>
31767@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31768 int ret = 0;
31769
31770 /* kprobe_running() needs smp_processor_id() */
31771- if (kprobes_built_in() && !user_mode_vm(regs)) {
31772+ if (kprobes_built_in() && !user_mode(regs)) {
31773 preempt_disable();
31774 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31775 ret = 1;
31776@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31777 return !instr_lo || (instr_lo>>1) == 1;
31778 case 0x00:
31779 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31780- if (probe_kernel_address(instr, opcode))
31781+ if (user_mode(regs)) {
31782+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31783+ return 0;
31784+ } else if (probe_kernel_address(instr, opcode))
31785 return 0;
31786
31787 *prefetch = (instr_lo == 0xF) &&
31788@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31789 while (instr < max_instr) {
31790 unsigned char opcode;
31791
31792- if (probe_kernel_address(instr, opcode))
31793+ if (user_mode(regs)) {
31794+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31795+ break;
31796+ } else if (probe_kernel_address(instr, opcode))
31797 break;
31798
31799 instr++;
31800@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31801 force_sig_info(si_signo, &info, tsk);
31802 }
31803
31804+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31805+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31806+#endif
31807+
31808+#ifdef CONFIG_PAX_EMUTRAMP
31809+static int pax_handle_fetch_fault(struct pt_regs *regs);
31810+#endif
31811+
31812+#ifdef CONFIG_PAX_PAGEEXEC
31813+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31814+{
31815+ pgd_t *pgd;
31816+ pud_t *pud;
31817+ pmd_t *pmd;
31818+
31819+ pgd = pgd_offset(mm, address);
31820+ if (!pgd_present(*pgd))
31821+ return NULL;
31822+ pud = pud_offset(pgd, address);
31823+ if (!pud_present(*pud))
31824+ return NULL;
31825+ pmd = pmd_offset(pud, address);
31826+ if (!pmd_present(*pmd))
31827+ return NULL;
31828+ return pmd;
31829+}
31830+#endif
31831+
31832 DEFINE_SPINLOCK(pgd_lock);
31833 LIST_HEAD(pgd_list);
31834
31835@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31836 for (address = VMALLOC_START & PMD_MASK;
31837 address >= TASK_SIZE && address < FIXADDR_TOP;
31838 address += PMD_SIZE) {
31839+
31840+#ifdef CONFIG_PAX_PER_CPU_PGD
31841+ unsigned long cpu;
31842+#else
31843 struct page *page;
31844+#endif
31845
31846 spin_lock(&pgd_lock);
31847+
31848+#ifdef CONFIG_PAX_PER_CPU_PGD
31849+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31850+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31851+ pmd_t *ret;
31852+
31853+ ret = vmalloc_sync_one(pgd, address);
31854+ if (!ret)
31855+ break;
31856+ pgd = get_cpu_pgd(cpu, kernel);
31857+#else
31858 list_for_each_entry(page, &pgd_list, lru) {
31859+ pgd_t *pgd;
31860 spinlock_t *pgt_lock;
31861 pmd_t *ret;
31862
31863@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31864 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31865
31866 spin_lock(pgt_lock);
31867- ret = vmalloc_sync_one(page_address(page), address);
31868+ pgd = page_address(page);
31869+#endif
31870+
31871+ ret = vmalloc_sync_one(pgd, address);
31872+
31873+#ifndef CONFIG_PAX_PER_CPU_PGD
31874 spin_unlock(pgt_lock);
31875+#endif
31876
31877 if (!ret)
31878 break;
31879@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31880 * an interrupt in the middle of a task switch..
31881 */
31882 pgd_paddr = read_cr3();
31883+
31884+#ifdef CONFIG_PAX_PER_CPU_PGD
31885+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31886+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31887+#endif
31888+
31889 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31890 if (!pmd_k)
31891 return -1;
31892@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31893 * happen within a race in page table update. In the later
31894 * case just flush:
31895 */
31896- pgd = pgd_offset(current->active_mm, address);
31897+
31898 pgd_ref = pgd_offset_k(address);
31899 if (pgd_none(*pgd_ref))
31900 return -1;
31901
31902+#ifdef CONFIG_PAX_PER_CPU_PGD
31903+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31904+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31905+ if (pgd_none(*pgd)) {
31906+ set_pgd(pgd, *pgd_ref);
31907+ arch_flush_lazy_mmu_mode();
31908+ } else {
31909+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31910+ }
31911+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31912+#else
31913+ pgd = pgd_offset(current->active_mm, address);
31914+#endif
31915+
31916 if (pgd_none(*pgd)) {
31917 set_pgd(pgd, *pgd_ref);
31918 arch_flush_lazy_mmu_mode();
31919@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31920 static int is_errata100(struct pt_regs *regs, unsigned long address)
31921 {
31922 #ifdef CONFIG_X86_64
31923- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31924+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31925 return 1;
31926 #endif
31927 return 0;
31928@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31929 }
31930
31931 static const char nx_warning[] = KERN_CRIT
31932-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31933+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31934 static const char smep_warning[] = KERN_CRIT
31935-"unable to execute userspace code (SMEP?) (uid: %d)\n";
31936+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
31937
31938 static void
31939 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31940@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31941 if (!oops_may_print())
31942 return;
31943
31944- if (error_code & PF_INSTR) {
31945+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31946 unsigned int level;
31947 pgd_t *pgd;
31948 pte_t *pte;
31949@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31950 pte = lookup_address_in_pgd(pgd, address, &level);
31951
31952 if (pte && pte_present(*pte) && !pte_exec(*pte))
31953- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31954+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31955 if (pte && pte_present(*pte) && pte_exec(*pte) &&
31956 (pgd_flags(*pgd) & _PAGE_USER) &&
31957 (read_cr4() & X86_CR4_SMEP))
31958- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
31959+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31960 }
31961
31962+#ifdef CONFIG_PAX_KERNEXEC
31963+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31964+ if (current->signal->curr_ip)
31965+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31966+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31967+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31968+ else
31969+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
31970+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31971+ }
31972+#endif
31973+
31974 printk(KERN_ALERT "BUG: unable to handle kernel ");
31975 if (address < PAGE_SIZE)
31976 printk(KERN_CONT "NULL pointer dereference");
31977@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
31978 return;
31979 }
31980 #endif
31981+
31982+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31983+ if (pax_is_fetch_fault(regs, error_code, address)) {
31984+
31985+#ifdef CONFIG_PAX_EMUTRAMP
31986+ switch (pax_handle_fetch_fault(regs)) {
31987+ case 2:
31988+ return;
31989+ }
31990+#endif
31991+
31992+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31993+ do_group_exit(SIGKILL);
31994+ }
31995+#endif
31996+
31997 /* Kernel addresses are always protection faults: */
31998 if (address >= TASK_SIZE)
31999 error_code |= PF_PROT;
32000@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32001 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32002 printk(KERN_ERR
32003 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32004- tsk->comm, tsk->pid, address);
32005+ tsk->comm, task_pid_nr(tsk), address);
32006 code = BUS_MCEERR_AR;
32007 }
32008 #endif
32009@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32010 return 1;
32011 }
32012
32013+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32014+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32015+{
32016+ pte_t *pte;
32017+ pmd_t *pmd;
32018+ spinlock_t *ptl;
32019+ unsigned char pte_mask;
32020+
32021+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32022+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32023+ return 0;
32024+
32025+ /* PaX: it's our fault, let's handle it if we can */
32026+
32027+ /* PaX: take a look at read faults before acquiring any locks */
32028+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32029+ /* instruction fetch attempt from a protected page in user mode */
32030+ up_read(&mm->mmap_sem);
32031+
32032+#ifdef CONFIG_PAX_EMUTRAMP
32033+ switch (pax_handle_fetch_fault(regs)) {
32034+ case 2:
32035+ return 1;
32036+ }
32037+#endif
32038+
32039+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32040+ do_group_exit(SIGKILL);
32041+ }
32042+
32043+ pmd = pax_get_pmd(mm, address);
32044+ if (unlikely(!pmd))
32045+ return 0;
32046+
32047+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32048+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32049+ pte_unmap_unlock(pte, ptl);
32050+ return 0;
32051+ }
32052+
32053+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32054+ /* write attempt to a protected page in user mode */
32055+ pte_unmap_unlock(pte, ptl);
32056+ return 0;
32057+ }
32058+
32059+#ifdef CONFIG_SMP
32060+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32061+#else
32062+ if (likely(address > get_limit(regs->cs)))
32063+#endif
32064+ {
32065+ set_pte(pte, pte_mkread(*pte));
32066+ __flush_tlb_one(address);
32067+ pte_unmap_unlock(pte, ptl);
32068+ up_read(&mm->mmap_sem);
32069+ return 1;
32070+ }
32071+
32072+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32073+
32074+ /*
32075+ * PaX: fill DTLB with user rights and retry
32076+ */
32077+ __asm__ __volatile__ (
32078+ "orb %2,(%1)\n"
32079+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32080+/*
32081+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32082+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32083+ * page fault when examined during a TLB load attempt. this is true not only
32084+ * for PTEs holding a non-present entry but also present entries that will
32085+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32086+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32087+ * for our target pages since their PTEs are simply not in the TLBs at all.
32088+
32089+ * the best thing in omitting it is that we gain around 15-20% speed in the
32090+ * fast path of the page fault handler and can get rid of tracing since we
32091+ * can no longer flush unintended entries.
32092+ */
32093+ "invlpg (%0)\n"
32094+#endif
32095+ __copyuser_seg"testb $0,(%0)\n"
32096+ "xorb %3,(%1)\n"
32097+ :
32098+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32099+ : "memory", "cc");
32100+ pte_unmap_unlock(pte, ptl);
32101+ up_read(&mm->mmap_sem);
32102+ return 1;
32103+}
32104+#endif
32105+
32106 /*
32107 * Handle a spurious fault caused by a stale TLB entry.
32108 *
32109@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32110 static inline int
32111 access_error(unsigned long error_code, struct vm_area_struct *vma)
32112 {
32113+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32114+ return 1;
32115+
32116 if (error_code & PF_WRITE) {
32117 /* write, present and write, not present: */
32118 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32119@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32120 if (error_code & PF_USER)
32121 return false;
32122
32123- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32124+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32125 return false;
32126
32127 return true;
32128@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32129 tsk = current;
32130 mm = tsk->mm;
32131
32132+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32133+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32134+ if (!search_exception_tables(regs->ip)) {
32135+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32136+ bad_area_nosemaphore(regs, error_code, address);
32137+ return;
32138+ }
32139+ if (address < pax_user_shadow_base) {
32140+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32141+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32142+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32143+ } else
32144+ address -= pax_user_shadow_base;
32145+ }
32146+#endif
32147+
32148 /*
32149 * Detect and handle instructions that would cause a page fault for
32150 * both a tracked kernel page and a userspace page.
32151@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32152 * User-mode registers count as a user access even for any
32153 * potential system fault or CPU buglet:
32154 */
32155- if (user_mode_vm(regs)) {
32156+ if (user_mode(regs)) {
32157 local_irq_enable();
32158 error_code |= PF_USER;
32159 flags |= FAULT_FLAG_USER;
32160@@ -1187,6 +1411,11 @@ retry:
32161 might_sleep();
32162 }
32163
32164+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32165+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32166+ return;
32167+#endif
32168+
32169 vma = find_vma(mm, address);
32170 if (unlikely(!vma)) {
32171 bad_area(regs, error_code, address);
32172@@ -1198,18 +1427,24 @@ retry:
32173 bad_area(regs, error_code, address);
32174 return;
32175 }
32176- if (error_code & PF_USER) {
32177- /*
32178- * Accessing the stack below %sp is always a bug.
32179- * The large cushion allows instructions like enter
32180- * and pusha to work. ("enter $65535, $31" pushes
32181- * 32 pointers and then decrements %sp by 65535.)
32182- */
32183- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32184- bad_area(regs, error_code, address);
32185- return;
32186- }
32187+ /*
32188+ * Accessing the stack below %sp is always a bug.
32189+ * The large cushion allows instructions like enter
32190+ * and pusha to work. ("enter $65535, $31" pushes
32191+ * 32 pointers and then decrements %sp by 65535.)
32192+ */
32193+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32194+ bad_area(regs, error_code, address);
32195+ return;
32196 }
32197+
32198+#ifdef CONFIG_PAX_SEGMEXEC
32199+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32200+ bad_area(regs, error_code, address);
32201+ return;
32202+ }
32203+#endif
32204+
32205 if (unlikely(expand_stack(vma, address))) {
32206 bad_area(regs, error_code, address);
32207 return;
32208@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32209 }
32210 NOKPROBE_SYMBOL(trace_do_page_fault);
32211 #endif /* CONFIG_TRACING */
32212+
32213+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32214+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32215+{
32216+ struct mm_struct *mm = current->mm;
32217+ unsigned long ip = regs->ip;
32218+
32219+ if (v8086_mode(regs))
32220+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32221+
32222+#ifdef CONFIG_PAX_PAGEEXEC
32223+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32224+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32225+ return true;
32226+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32227+ return true;
32228+ return false;
32229+ }
32230+#endif
32231+
32232+#ifdef CONFIG_PAX_SEGMEXEC
32233+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32234+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32235+ return true;
32236+ return false;
32237+ }
32238+#endif
32239+
32240+ return false;
32241+}
32242+#endif
32243+
32244+#ifdef CONFIG_PAX_EMUTRAMP
32245+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32246+{
32247+ int err;
32248+
32249+ do { /* PaX: libffi trampoline emulation */
32250+ unsigned char mov, jmp;
32251+ unsigned int addr1, addr2;
32252+
32253+#ifdef CONFIG_X86_64
32254+ if ((regs->ip + 9) >> 32)
32255+ break;
32256+#endif
32257+
32258+ err = get_user(mov, (unsigned char __user *)regs->ip);
32259+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32260+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32261+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32262+
32263+ if (err)
32264+ break;
32265+
32266+ if (mov == 0xB8 && jmp == 0xE9) {
32267+ regs->ax = addr1;
32268+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32269+ return 2;
32270+ }
32271+ } while (0);
32272+
32273+ do { /* PaX: gcc trampoline emulation #1 */
32274+ unsigned char mov1, mov2;
32275+ unsigned short jmp;
32276+ unsigned int addr1, addr2;
32277+
32278+#ifdef CONFIG_X86_64
32279+ if ((regs->ip + 11) >> 32)
32280+ break;
32281+#endif
32282+
32283+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32284+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32285+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32286+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32287+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32288+
32289+ if (err)
32290+ break;
32291+
32292+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32293+ regs->cx = addr1;
32294+ regs->ax = addr2;
32295+ regs->ip = addr2;
32296+ return 2;
32297+ }
32298+ } while (0);
32299+
32300+ do { /* PaX: gcc trampoline emulation #2 */
32301+ unsigned char mov, jmp;
32302+ unsigned int addr1, addr2;
32303+
32304+#ifdef CONFIG_X86_64
32305+ if ((regs->ip + 9) >> 32)
32306+ break;
32307+#endif
32308+
32309+ err = get_user(mov, (unsigned char __user *)regs->ip);
32310+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32311+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32312+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32313+
32314+ if (err)
32315+ break;
32316+
32317+ if (mov == 0xB9 && jmp == 0xE9) {
32318+ regs->cx = addr1;
32319+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32320+ return 2;
32321+ }
32322+ } while (0);
32323+
32324+ return 1; /* PaX in action */
32325+}
32326+
32327+#ifdef CONFIG_X86_64
32328+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32329+{
32330+ int err;
32331+
32332+ do { /* PaX: libffi trampoline emulation */
32333+ unsigned short mov1, mov2, jmp1;
32334+ unsigned char stcclc, jmp2;
32335+ unsigned long addr1, addr2;
32336+
32337+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32338+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32339+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32340+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32341+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32342+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32343+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32344+
32345+ if (err)
32346+ break;
32347+
32348+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32349+ regs->r11 = addr1;
32350+ regs->r10 = addr2;
32351+ if (stcclc == 0xF8)
32352+ regs->flags &= ~X86_EFLAGS_CF;
32353+ else
32354+ regs->flags |= X86_EFLAGS_CF;
32355+ regs->ip = addr1;
32356+ return 2;
32357+ }
32358+ } while (0);
32359+
32360+ do { /* PaX: gcc trampoline emulation #1 */
32361+ unsigned short mov1, mov2, jmp1;
32362+ unsigned char jmp2;
32363+ unsigned int addr1;
32364+ unsigned long addr2;
32365+
32366+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32367+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32368+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32369+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32370+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32371+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32372+
32373+ if (err)
32374+ break;
32375+
32376+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32377+ regs->r11 = addr1;
32378+ regs->r10 = addr2;
32379+ regs->ip = addr1;
32380+ return 2;
32381+ }
32382+ } while (0);
32383+
32384+ do { /* PaX: gcc trampoline emulation #2 */
32385+ unsigned short mov1, mov2, jmp1;
32386+ unsigned char jmp2;
32387+ unsigned long addr1, addr2;
32388+
32389+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32390+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32391+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32392+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32393+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32394+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32395+
32396+ if (err)
32397+ break;
32398+
32399+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32400+ regs->r11 = addr1;
32401+ regs->r10 = addr2;
32402+ regs->ip = addr1;
32403+ return 2;
32404+ }
32405+ } while (0);
32406+
32407+ return 1; /* PaX in action */
32408+}
32409+#endif
32410+
32411+/*
32412+ * PaX: decide what to do with offenders (regs->ip = fault address)
32413+ *
32414+ * returns 1 when task should be killed
32415+ * 2 when gcc trampoline was detected
32416+ */
32417+static int pax_handle_fetch_fault(struct pt_regs *regs)
32418+{
32419+ if (v8086_mode(regs))
32420+ return 1;
32421+
32422+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32423+ return 1;
32424+
32425+#ifdef CONFIG_X86_32
32426+ return pax_handle_fetch_fault_32(regs);
32427+#else
32428+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32429+ return pax_handle_fetch_fault_32(regs);
32430+ else
32431+ return pax_handle_fetch_fault_64(regs);
32432+#endif
32433+}
32434+#endif
32435+
32436+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32437+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32438+{
32439+ long i;
32440+
32441+ printk(KERN_ERR "PAX: bytes at PC: ");
32442+ for (i = 0; i < 20; i++) {
32443+ unsigned char c;
32444+ if (get_user(c, (unsigned char __force_user *)pc+i))
32445+ printk(KERN_CONT "?? ");
32446+ else
32447+ printk(KERN_CONT "%02x ", c);
32448+ }
32449+ printk("\n");
32450+
32451+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32452+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32453+ unsigned long c;
32454+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32455+#ifdef CONFIG_X86_32
32456+ printk(KERN_CONT "???????? ");
32457+#else
32458+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32459+ printk(KERN_CONT "???????? ???????? ");
32460+ else
32461+ printk(KERN_CONT "???????????????? ");
32462+#endif
32463+ } else {
32464+#ifdef CONFIG_X86_64
32465+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32466+ printk(KERN_CONT "%08x ", (unsigned int)c);
32467+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32468+ } else
32469+#endif
32470+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32471+ }
32472+ }
32473+ printk("\n");
32474+}
32475+#endif
32476+
32477+/**
32478+ * probe_kernel_write(): safely attempt to write to a location
32479+ * @dst: address to write to
32480+ * @src: pointer to the data that shall be written
32481+ * @size: size of the data chunk
32482+ *
32483+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32484+ * happens, handle that and return -EFAULT.
32485+ */
32486+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32487+{
32488+ long ret;
32489+ mm_segment_t old_fs = get_fs();
32490+
32491+ set_fs(KERNEL_DS);
32492+ pagefault_disable();
32493+ pax_open_kernel();
32494+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32495+ pax_close_kernel();
32496+ pagefault_enable();
32497+ set_fs(old_fs);
32498+
32499+ return ret ? -EFAULT : 0;
32500+}
32501diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32502index 224b142..c2c9423 100644
32503--- a/arch/x86/mm/gup.c
32504+++ b/arch/x86/mm/gup.c
32505@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32506 addr = start;
32507 len = (unsigned long) nr_pages << PAGE_SHIFT;
32508 end = start + len;
32509- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32510+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32511 (void __user *)start, len)))
32512 return 0;
32513
32514@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32515 goto slow_irqon;
32516 #endif
32517
32518+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32519+ (void __user *)start, len)))
32520+ return 0;
32521+
32522 /*
32523 * XXX: batch / limit 'nr', to avoid large irq off latency
32524 * needs some instrumenting to determine the common sizes used by
32525diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32526index 4500142..53a363c 100644
32527--- a/arch/x86/mm/highmem_32.c
32528+++ b/arch/x86/mm/highmem_32.c
32529@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32530 idx = type + KM_TYPE_NR*smp_processor_id();
32531 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32532 BUG_ON(!pte_none(*(kmap_pte-idx)));
32533+
32534+ pax_open_kernel();
32535 set_pte(kmap_pte-idx, mk_pte(page, prot));
32536+ pax_close_kernel();
32537+
32538 arch_flush_lazy_mmu_mode();
32539
32540 return (void *)vaddr;
32541diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32542index 006cc91..bf05a83 100644
32543--- a/arch/x86/mm/hugetlbpage.c
32544+++ b/arch/x86/mm/hugetlbpage.c
32545@@ -86,23 +86,24 @@ int pud_huge(pud_t pud)
32546 #ifdef CONFIG_HUGETLB_PAGE
32547 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32548 unsigned long addr, unsigned long len,
32549- unsigned long pgoff, unsigned long flags)
32550+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32551 {
32552 struct hstate *h = hstate_file(file);
32553 struct vm_unmapped_area_info info;
32554-
32555+
32556 info.flags = 0;
32557 info.length = len;
32558 info.low_limit = current->mm->mmap_legacy_base;
32559 info.high_limit = TASK_SIZE;
32560 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32561 info.align_offset = 0;
32562+ info.threadstack_offset = offset;
32563 return vm_unmapped_area(&info);
32564 }
32565
32566 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32567 unsigned long addr0, unsigned long len,
32568- unsigned long pgoff, unsigned long flags)
32569+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32570 {
32571 struct hstate *h = hstate_file(file);
32572 struct vm_unmapped_area_info info;
32573@@ -114,6 +115,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32574 info.high_limit = current->mm->mmap_base;
32575 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32576 info.align_offset = 0;
32577+ info.threadstack_offset = offset;
32578 addr = vm_unmapped_area(&info);
32579
32580 /*
32581@@ -126,6 +128,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32582 VM_BUG_ON(addr != -ENOMEM);
32583 info.flags = 0;
32584 info.low_limit = TASK_UNMAPPED_BASE;
32585+
32586+#ifdef CONFIG_PAX_RANDMMAP
32587+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32588+ info.low_limit += current->mm->delta_mmap;
32589+#endif
32590+
32591 info.high_limit = TASK_SIZE;
32592 addr = vm_unmapped_area(&info);
32593 }
32594@@ -140,10 +148,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32595 struct hstate *h = hstate_file(file);
32596 struct mm_struct *mm = current->mm;
32597 struct vm_area_struct *vma;
32598+ unsigned long pax_task_size = TASK_SIZE;
32599+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32600
32601 if (len & ~huge_page_mask(h))
32602 return -EINVAL;
32603- if (len > TASK_SIZE)
32604+
32605+#ifdef CONFIG_PAX_SEGMEXEC
32606+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32607+ pax_task_size = SEGMEXEC_TASK_SIZE;
32608+#endif
32609+
32610+ pax_task_size -= PAGE_SIZE;
32611+
32612+ if (len > pax_task_size)
32613 return -ENOMEM;
32614
32615 if (flags & MAP_FIXED) {
32616@@ -152,19 +170,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32617 return addr;
32618 }
32619
32620+#ifdef CONFIG_PAX_RANDMMAP
32621+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32622+#endif
32623+
32624 if (addr) {
32625 addr = ALIGN(addr, huge_page_size(h));
32626 vma = find_vma(mm, addr);
32627- if (TASK_SIZE - len >= addr &&
32628- (!vma || addr + len <= vma->vm_start))
32629+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32630 return addr;
32631 }
32632 if (mm->get_unmapped_area == arch_get_unmapped_area)
32633 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32634- pgoff, flags);
32635+ pgoff, flags, offset);
32636 else
32637 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32638- pgoff, flags);
32639+ pgoff, flags, offset);
32640 }
32641 #endif /* CONFIG_HUGETLB_PAGE */
32642
32643diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32644index 079c3b6..7069023 100644
32645--- a/arch/x86/mm/init.c
32646+++ b/arch/x86/mm/init.c
32647@@ -4,6 +4,7 @@
32648 #include <linux/swap.h>
32649 #include <linux/memblock.h>
32650 #include <linux/bootmem.h> /* for max_low_pfn */
32651+#include <linux/tboot.h>
32652
32653 #include <asm/cacheflush.h>
32654 #include <asm/e820.h>
32655@@ -17,6 +18,8 @@
32656 #include <asm/proto.h>
32657 #include <asm/dma.h> /* for MAX_DMA_PFN */
32658 #include <asm/microcode.h>
32659+#include <asm/desc.h>
32660+#include <asm/bios_ebda.h>
32661
32662 /*
32663 * We need to define the tracepoints somewhere, and tlb.c
32664@@ -596,7 +599,18 @@ void __init init_mem_mapping(void)
32665 early_ioremap_page_table_range_init();
32666 #endif
32667
32668+#ifdef CONFIG_PAX_PER_CPU_PGD
32669+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32670+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32671+ KERNEL_PGD_PTRS);
32672+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32673+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32674+ KERNEL_PGD_PTRS);
32675+ load_cr3(get_cpu_pgd(0, kernel));
32676+#else
32677 load_cr3(swapper_pg_dir);
32678+#endif
32679+
32680 __flush_tlb_all();
32681
32682 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32683@@ -612,10 +626,40 @@ void __init init_mem_mapping(void)
32684 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32685 * mmio resources as well as potential bios/acpi data regions.
32686 */
32687+
32688+#ifdef CONFIG_GRKERNSEC_KMEM
32689+static unsigned int ebda_start __read_only;
32690+static unsigned int ebda_end __read_only;
32691+#endif
32692+
32693 int devmem_is_allowed(unsigned long pagenr)
32694 {
32695- if (pagenr < 256)
32696+#ifdef CONFIG_GRKERNSEC_KMEM
32697+ /* allow BDA */
32698+ if (!pagenr)
32699 return 1;
32700+ /* allow EBDA */
32701+ if (pagenr >= ebda_start && pagenr < ebda_end)
32702+ return 1;
32703+ /* if tboot is in use, allow access to its hardcoded serial log range */
32704+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32705+ return 1;
32706+#else
32707+ if (!pagenr)
32708+ return 1;
32709+#ifdef CONFIG_VM86
32710+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32711+ return 1;
32712+#endif
32713+#endif
32714+
32715+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32716+ return 1;
32717+#ifdef CONFIG_GRKERNSEC_KMEM
32718+ /* throw out everything else below 1MB */
32719+ if (pagenr <= 256)
32720+ return 0;
32721+#endif
32722 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32723 return 0;
32724 if (!page_is_ram(pagenr))
32725@@ -661,8 +705,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32726 #endif
32727 }
32728
32729+#ifdef CONFIG_GRKERNSEC_KMEM
32730+static inline void gr_init_ebda(void)
32731+{
32732+ unsigned int ebda_addr;
32733+ unsigned int ebda_size = 0;
32734+
32735+ ebda_addr = get_bios_ebda();
32736+ if (ebda_addr) {
32737+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32738+ ebda_size <<= 10;
32739+ }
32740+ if (ebda_addr && ebda_size) {
32741+ ebda_start = ebda_addr >> PAGE_SHIFT;
32742+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32743+ } else {
32744+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32745+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32746+ }
32747+}
32748+#else
32749+static inline void gr_init_ebda(void) { }
32750+#endif
32751+
32752 void free_initmem(void)
32753 {
32754+#ifdef CONFIG_PAX_KERNEXEC
32755+#ifdef CONFIG_X86_32
32756+ /* PaX: limit KERNEL_CS to actual size */
32757+ unsigned long addr, limit;
32758+ struct desc_struct d;
32759+ int cpu;
32760+#else
32761+ pgd_t *pgd;
32762+ pud_t *pud;
32763+ pmd_t *pmd;
32764+ unsigned long addr, end;
32765+#endif
32766+#endif
32767+
32768+ gr_init_ebda();
32769+
32770+#ifdef CONFIG_PAX_KERNEXEC
32771+#ifdef CONFIG_X86_32
32772+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32773+ limit = (limit - 1UL) >> PAGE_SHIFT;
32774+
32775+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32776+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32777+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32778+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32779+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32780+ }
32781+
32782+ /* PaX: make KERNEL_CS read-only */
32783+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32784+ if (!paravirt_enabled())
32785+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32786+/*
32787+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32788+ pgd = pgd_offset_k(addr);
32789+ pud = pud_offset(pgd, addr);
32790+ pmd = pmd_offset(pud, addr);
32791+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32792+ }
32793+*/
32794+#ifdef CONFIG_X86_PAE
32795+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32796+/*
32797+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32798+ pgd = pgd_offset_k(addr);
32799+ pud = pud_offset(pgd, addr);
32800+ pmd = pmd_offset(pud, addr);
32801+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32802+ }
32803+*/
32804+#endif
32805+
32806+#ifdef CONFIG_MODULES
32807+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32808+#endif
32809+
32810+#else
32811+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32812+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32813+ pgd = pgd_offset_k(addr);
32814+ pud = pud_offset(pgd, addr);
32815+ pmd = pmd_offset(pud, addr);
32816+ if (!pmd_present(*pmd))
32817+ continue;
32818+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32819+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32820+ else
32821+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32822+ }
32823+
32824+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32825+ end = addr + KERNEL_IMAGE_SIZE;
32826+ for (; addr < end; addr += PMD_SIZE) {
32827+ pgd = pgd_offset_k(addr);
32828+ pud = pud_offset(pgd, addr);
32829+ pmd = pmd_offset(pud, addr);
32830+ if (!pmd_present(*pmd))
32831+ continue;
32832+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32833+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32834+ }
32835+#endif
32836+
32837+ flush_tlb_all();
32838+#endif
32839+
32840 free_init_pages("unused kernel",
32841 (unsigned long)(&__init_begin),
32842 (unsigned long)(&__init_end));
32843diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32844index c8140e1..59257fc 100644
32845--- a/arch/x86/mm/init_32.c
32846+++ b/arch/x86/mm/init_32.c
32847@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32848 bool __read_mostly __vmalloc_start_set = false;
32849
32850 /*
32851- * Creates a middle page table and puts a pointer to it in the
32852- * given global directory entry. This only returns the gd entry
32853- * in non-PAE compilation mode, since the middle layer is folded.
32854- */
32855-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32856-{
32857- pud_t *pud;
32858- pmd_t *pmd_table;
32859-
32860-#ifdef CONFIG_X86_PAE
32861- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32862- pmd_table = (pmd_t *)alloc_low_page();
32863- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32864- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32865- pud = pud_offset(pgd, 0);
32866- BUG_ON(pmd_table != pmd_offset(pud, 0));
32867-
32868- return pmd_table;
32869- }
32870-#endif
32871- pud = pud_offset(pgd, 0);
32872- pmd_table = pmd_offset(pud, 0);
32873-
32874- return pmd_table;
32875-}
32876-
32877-/*
32878 * Create a page table and place a pointer to it in a middle page
32879 * directory entry:
32880 */
32881@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32882 pte_t *page_table = (pte_t *)alloc_low_page();
32883
32884 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32885+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32886+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32887+#else
32888 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32889+#endif
32890 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32891 }
32892
32893 return pte_offset_kernel(pmd, 0);
32894 }
32895
32896+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32897+{
32898+ pud_t *pud;
32899+ pmd_t *pmd_table;
32900+
32901+ pud = pud_offset(pgd, 0);
32902+ pmd_table = pmd_offset(pud, 0);
32903+
32904+ return pmd_table;
32905+}
32906+
32907 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32908 {
32909 int pgd_idx = pgd_index(vaddr);
32910@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32911 int pgd_idx, pmd_idx;
32912 unsigned long vaddr;
32913 pgd_t *pgd;
32914+ pud_t *pud;
32915 pmd_t *pmd;
32916 pte_t *pte = NULL;
32917 unsigned long count = page_table_range_init_count(start, end);
32918@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32919 pgd = pgd_base + pgd_idx;
32920
32921 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32922- pmd = one_md_table_init(pgd);
32923- pmd = pmd + pmd_index(vaddr);
32924+ pud = pud_offset(pgd, vaddr);
32925+ pmd = pmd_offset(pud, vaddr);
32926+
32927+#ifdef CONFIG_X86_PAE
32928+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32929+#endif
32930+
32931 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32932 pmd++, pmd_idx++) {
32933 pte = page_table_kmap_check(one_page_table_init(pmd),
32934@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32935 }
32936 }
32937
32938-static inline int is_kernel_text(unsigned long addr)
32939+static inline int is_kernel_text(unsigned long start, unsigned long end)
32940 {
32941- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32942- return 1;
32943- return 0;
32944+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32945+ end <= ktla_ktva((unsigned long)_stext)) &&
32946+ (start >= ktla_ktva((unsigned long)_einittext) ||
32947+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32948+
32949+#ifdef CONFIG_ACPI_SLEEP
32950+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32951+#endif
32952+
32953+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32954+ return 0;
32955+ return 1;
32956 }
32957
32958 /*
32959@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32960 unsigned long last_map_addr = end;
32961 unsigned long start_pfn, end_pfn;
32962 pgd_t *pgd_base = swapper_pg_dir;
32963- int pgd_idx, pmd_idx, pte_ofs;
32964+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32965 unsigned long pfn;
32966 pgd_t *pgd;
32967+ pud_t *pud;
32968 pmd_t *pmd;
32969 pte_t *pte;
32970 unsigned pages_2m, pages_4k;
32971@@ -291,8 +295,13 @@ repeat:
32972 pfn = start_pfn;
32973 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32974 pgd = pgd_base + pgd_idx;
32975- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
32976- pmd = one_md_table_init(pgd);
32977+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
32978+ pud = pud_offset(pgd, 0);
32979+ pmd = pmd_offset(pud, 0);
32980+
32981+#ifdef CONFIG_X86_PAE
32982+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32983+#endif
32984
32985 if (pfn >= end_pfn)
32986 continue;
32987@@ -304,14 +313,13 @@ repeat:
32988 #endif
32989 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
32990 pmd++, pmd_idx++) {
32991- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
32992+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
32993
32994 /*
32995 * Map with big pages if possible, otherwise
32996 * create normal page tables:
32997 */
32998 if (use_pse) {
32999- unsigned int addr2;
33000 pgprot_t prot = PAGE_KERNEL_LARGE;
33001 /*
33002 * first pass will use the same initial
33003@@ -322,11 +330,7 @@ repeat:
33004 _PAGE_PSE);
33005
33006 pfn &= PMD_MASK >> PAGE_SHIFT;
33007- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33008- PAGE_OFFSET + PAGE_SIZE-1;
33009-
33010- if (is_kernel_text(addr) ||
33011- is_kernel_text(addr2))
33012+ if (is_kernel_text(address, address + PMD_SIZE))
33013 prot = PAGE_KERNEL_LARGE_EXEC;
33014
33015 pages_2m++;
33016@@ -343,7 +347,7 @@ repeat:
33017 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33018 pte += pte_ofs;
33019 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33020- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33021+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33022 pgprot_t prot = PAGE_KERNEL;
33023 /*
33024 * first pass will use the same initial
33025@@ -351,7 +355,7 @@ repeat:
33026 */
33027 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33028
33029- if (is_kernel_text(addr))
33030+ if (is_kernel_text(address, address + PAGE_SIZE))
33031 prot = PAGE_KERNEL_EXEC;
33032
33033 pages_4k++;
33034@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33035
33036 pud = pud_offset(pgd, va);
33037 pmd = pmd_offset(pud, va);
33038- if (!pmd_present(*pmd))
33039+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33040 break;
33041
33042 /* should not be large page here */
33043@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33044
33045 static void __init pagetable_init(void)
33046 {
33047- pgd_t *pgd_base = swapper_pg_dir;
33048-
33049- permanent_kmaps_init(pgd_base);
33050+ permanent_kmaps_init(swapper_pg_dir);
33051 }
33052
33053-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33054+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33055 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33056
33057 /* user-defined highmem size */
33058@@ -787,10 +789,10 @@ void __init mem_init(void)
33059 ((unsigned long)&__init_end -
33060 (unsigned long)&__init_begin) >> 10,
33061
33062- (unsigned long)&_etext, (unsigned long)&_edata,
33063- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33064+ (unsigned long)&_sdata, (unsigned long)&_edata,
33065+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33066
33067- (unsigned long)&_text, (unsigned long)&_etext,
33068+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33069 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33070
33071 /*
33072@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33073 if (!kernel_set_to_readonly)
33074 return;
33075
33076+ start = ktla_ktva(start);
33077 pr_debug("Set kernel text: %lx - %lx for read write\n",
33078 start, start+size);
33079
33080@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33081 if (!kernel_set_to_readonly)
33082 return;
33083
33084+ start = ktla_ktva(start);
33085 pr_debug("Set kernel text: %lx - %lx for read only\n",
33086 start, start+size);
33087
33088@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33089 unsigned long start = PFN_ALIGN(_text);
33090 unsigned long size = PFN_ALIGN(_etext) - start;
33091
33092+ start = ktla_ktva(start);
33093 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33094 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33095 size >> 10);
33096diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33097index 30eb05a..ae671ac 100644
33098--- a/arch/x86/mm/init_64.c
33099+++ b/arch/x86/mm/init_64.c
33100@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33101 * around without checking the pgd every time.
33102 */
33103
33104-pteval_t __supported_pte_mask __read_mostly = ~0;
33105+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33106 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33107
33108 int force_personality32;
33109@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33110
33111 for (address = start; address <= end; address += PGDIR_SIZE) {
33112 const pgd_t *pgd_ref = pgd_offset_k(address);
33113+
33114+#ifdef CONFIG_PAX_PER_CPU_PGD
33115+ unsigned long cpu;
33116+#else
33117 struct page *page;
33118+#endif
33119
33120 /*
33121 * When it is called after memory hot remove, pgd_none()
33122@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33123 continue;
33124
33125 spin_lock(&pgd_lock);
33126+
33127+#ifdef CONFIG_PAX_PER_CPU_PGD
33128+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33129+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33130+
33131+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33132+ BUG_ON(pgd_page_vaddr(*pgd)
33133+ != pgd_page_vaddr(*pgd_ref));
33134+
33135+ if (removed) {
33136+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33137+ pgd_clear(pgd);
33138+ } else {
33139+ if (pgd_none(*pgd))
33140+ set_pgd(pgd, *pgd_ref);
33141+ }
33142+
33143+ pgd = pgd_offset_cpu(cpu, kernel, address);
33144+#else
33145 list_for_each_entry(page, &pgd_list, lru) {
33146 pgd_t *pgd;
33147 spinlock_t *pgt_lock;
33148@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33149 /* the pgt_lock only for Xen */
33150 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33151 spin_lock(pgt_lock);
33152+#endif
33153
33154 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33155 BUG_ON(pgd_page_vaddr(*pgd)
33156@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33157 set_pgd(pgd, *pgd_ref);
33158 }
33159
33160+#ifndef CONFIG_PAX_PER_CPU_PGD
33161 spin_unlock(pgt_lock);
33162+#endif
33163+
33164 }
33165 spin_unlock(&pgd_lock);
33166 }
33167@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33168 {
33169 if (pgd_none(*pgd)) {
33170 pud_t *pud = (pud_t *)spp_getpage();
33171- pgd_populate(&init_mm, pgd, pud);
33172+ pgd_populate_kernel(&init_mm, pgd, pud);
33173 if (pud != pud_offset(pgd, 0))
33174 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33175 pud, pud_offset(pgd, 0));
33176@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33177 {
33178 if (pud_none(*pud)) {
33179 pmd_t *pmd = (pmd_t *) spp_getpage();
33180- pud_populate(&init_mm, pud, pmd);
33181+ pud_populate_kernel(&init_mm, pud, pmd);
33182 if (pmd != pmd_offset(pud, 0))
33183 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33184 pmd, pmd_offset(pud, 0));
33185@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33186 pmd = fill_pmd(pud, vaddr);
33187 pte = fill_pte(pmd, vaddr);
33188
33189+ pax_open_kernel();
33190 set_pte(pte, new_pte);
33191+ pax_close_kernel();
33192
33193 /*
33194 * It's enough to flush this one mapping.
33195@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33196 pgd = pgd_offset_k((unsigned long)__va(phys));
33197 if (pgd_none(*pgd)) {
33198 pud = (pud_t *) spp_getpage();
33199- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33200- _PAGE_USER));
33201+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33202 }
33203 pud = pud_offset(pgd, (unsigned long)__va(phys));
33204 if (pud_none(*pud)) {
33205 pmd = (pmd_t *) spp_getpage();
33206- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33207- _PAGE_USER));
33208+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33209 }
33210 pmd = pmd_offset(pud, phys);
33211 BUG_ON(!pmd_none(*pmd));
33212@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33213 prot);
33214
33215 spin_lock(&init_mm.page_table_lock);
33216- pud_populate(&init_mm, pud, pmd);
33217+ pud_populate_kernel(&init_mm, pud, pmd);
33218 spin_unlock(&init_mm.page_table_lock);
33219 }
33220 __flush_tlb_all();
33221@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33222 page_size_mask);
33223
33224 spin_lock(&init_mm.page_table_lock);
33225- pgd_populate(&init_mm, pgd, pud);
33226+ pgd_populate_kernel(&init_mm, pgd, pud);
33227 spin_unlock(&init_mm.page_table_lock);
33228 pgd_changed = true;
33229 }
33230diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33231index 9ca35fc..4b2b7b7 100644
33232--- a/arch/x86/mm/iomap_32.c
33233+++ b/arch/x86/mm/iomap_32.c
33234@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33235 type = kmap_atomic_idx_push();
33236 idx = type + KM_TYPE_NR * smp_processor_id();
33237 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33238+
33239+ pax_open_kernel();
33240 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33241+ pax_close_kernel();
33242+
33243 arch_flush_lazy_mmu_mode();
33244
33245 return (void *)vaddr;
33246diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33247index fdf617c..b9e85bc 100644
33248--- a/arch/x86/mm/ioremap.c
33249+++ b/arch/x86/mm/ioremap.c
33250@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33251 unsigned long i;
33252
33253 for (i = 0; i < nr_pages; ++i)
33254- if (pfn_valid(start_pfn + i) &&
33255- !PageReserved(pfn_to_page(start_pfn + i)))
33256+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33257+ !PageReserved(pfn_to_page(start_pfn + i))))
33258 return 1;
33259
33260 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33261@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33262 *
33263 * Caller must ensure there is only one unmapping for the same pointer.
33264 */
33265-void iounmap(volatile void __iomem *addr)
33266+void iounmap(const volatile void __iomem *addr)
33267 {
33268 struct vm_struct *p, *o;
33269
33270@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33271 */
33272 void *xlate_dev_mem_ptr(phys_addr_t phys)
33273 {
33274- void *addr;
33275- unsigned long start = phys & PAGE_MASK;
33276-
33277 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33278- if (page_is_ram(start >> PAGE_SHIFT))
33279+ if (page_is_ram(phys >> PAGE_SHIFT))
33280+#ifdef CONFIG_HIGHMEM
33281+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33282+#endif
33283 return __va(phys);
33284
33285- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33286- if (addr)
33287- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33288-
33289- return addr;
33290+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33291 }
33292
33293 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33294 {
33295 if (page_is_ram(phys >> PAGE_SHIFT))
33296+#ifdef CONFIG_HIGHMEM
33297+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33298+#endif
33299 return;
33300
33301 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33302 return;
33303 }
33304
33305-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33306+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33307
33308 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33309 {
33310@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33311 early_ioremap_setup();
33312
33313 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33314- memset(bm_pte, 0, sizeof(bm_pte));
33315- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33316+ pmd_populate_user(&init_mm, pmd, bm_pte);
33317
33318 /*
33319 * The boot-ioremap range spans multiple pmds, for which
33320diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33321index b4f2e7e..96c9c3e 100644
33322--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33323+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33324@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33325 * memory (e.g. tracked pages)? For now, we need this to avoid
33326 * invoking kmemcheck for PnP BIOS calls.
33327 */
33328- if (regs->flags & X86_VM_MASK)
33329+ if (v8086_mode(regs))
33330 return false;
33331- if (regs->cs != __KERNEL_CS)
33332+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33333 return false;
33334
33335 pte = kmemcheck_pte_lookup(address);
33336diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33337index df4552b..12c129c 100644
33338--- a/arch/x86/mm/mmap.c
33339+++ b/arch/x86/mm/mmap.c
33340@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33341 * Leave an at least ~128 MB hole with possible stack randomization.
33342 */
33343 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33344-#define MAX_GAP (TASK_SIZE/6*5)
33345+#define MAX_GAP (pax_task_size/6*5)
33346
33347 static int mmap_is_legacy(void)
33348 {
33349@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33350 return rnd << PAGE_SHIFT;
33351 }
33352
33353-static unsigned long mmap_base(void)
33354+static unsigned long mmap_base(struct mm_struct *mm)
33355 {
33356 unsigned long gap = rlimit(RLIMIT_STACK);
33357+ unsigned long pax_task_size = TASK_SIZE;
33358+
33359+#ifdef CONFIG_PAX_SEGMEXEC
33360+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33361+ pax_task_size = SEGMEXEC_TASK_SIZE;
33362+#endif
33363
33364 if (gap < MIN_GAP)
33365 gap = MIN_GAP;
33366 else if (gap > MAX_GAP)
33367 gap = MAX_GAP;
33368
33369- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33370+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33371 }
33372
33373 /*
33374 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33375 * does, but not when emulating X86_32
33376 */
33377-static unsigned long mmap_legacy_base(void)
33378+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33379 {
33380- if (mmap_is_ia32())
33381+ if (mmap_is_ia32()) {
33382+
33383+#ifdef CONFIG_PAX_SEGMEXEC
33384+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33385+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33386+ else
33387+#endif
33388+
33389 return TASK_UNMAPPED_BASE;
33390- else
33391+ } else
33392 return TASK_UNMAPPED_BASE + mmap_rnd();
33393 }
33394
33395@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33396 */
33397 void arch_pick_mmap_layout(struct mm_struct *mm)
33398 {
33399- mm->mmap_legacy_base = mmap_legacy_base();
33400- mm->mmap_base = mmap_base();
33401+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33402+ mm->mmap_base = mmap_base(mm);
33403+
33404+#ifdef CONFIG_PAX_RANDMMAP
33405+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33406+ mm->mmap_legacy_base += mm->delta_mmap;
33407+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33408+ }
33409+#endif
33410
33411 if (mmap_is_legacy()) {
33412 mm->mmap_base = mm->mmap_legacy_base;
33413diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33414index 0057a7a..95c7edd 100644
33415--- a/arch/x86/mm/mmio-mod.c
33416+++ b/arch/x86/mm/mmio-mod.c
33417@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33418 break;
33419 default:
33420 {
33421- unsigned char *ip = (unsigned char *)instptr;
33422+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33423 my_trace->opcode = MMIO_UNKNOWN_OP;
33424 my_trace->width = 0;
33425 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33426@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33427 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33428 void __iomem *addr)
33429 {
33430- static atomic_t next_id;
33431+ static atomic_unchecked_t next_id;
33432 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33433 /* These are page-unaligned. */
33434 struct mmiotrace_map map = {
33435@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33436 .private = trace
33437 },
33438 .phys = offset,
33439- .id = atomic_inc_return(&next_id)
33440+ .id = atomic_inc_return_unchecked(&next_id)
33441 };
33442 map.map_id = trace->id;
33443
33444@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33445 ioremap_trace_core(offset, size, addr);
33446 }
33447
33448-static void iounmap_trace_core(volatile void __iomem *addr)
33449+static void iounmap_trace_core(const volatile void __iomem *addr)
33450 {
33451 struct mmiotrace_map map = {
33452 .phys = 0,
33453@@ -328,7 +328,7 @@ not_enabled:
33454 }
33455 }
33456
33457-void mmiotrace_iounmap(volatile void __iomem *addr)
33458+void mmiotrace_iounmap(const volatile void __iomem *addr)
33459 {
33460 might_sleep();
33461 if (is_enabled()) /* recheck and proper locking in *_core() */
33462diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33463index 1a88370..3f598b5 100644
33464--- a/arch/x86/mm/numa.c
33465+++ b/arch/x86/mm/numa.c
33466@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33467 }
33468 }
33469
33470-static int __init numa_register_memblks(struct numa_meminfo *mi)
33471+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33472 {
33473 unsigned long uninitialized_var(pfn_align);
33474 int i, nid;
33475diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33476index 536ea2f..f42c293 100644
33477--- a/arch/x86/mm/pageattr.c
33478+++ b/arch/x86/mm/pageattr.c
33479@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33480 */
33481 #ifdef CONFIG_PCI_BIOS
33482 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33483- pgprot_val(forbidden) |= _PAGE_NX;
33484+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33485 #endif
33486
33487 /*
33488@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33489 * Does not cover __inittext since that is gone later on. On
33490 * 64bit we do not enforce !NX on the low mapping
33491 */
33492- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33493- pgprot_val(forbidden) |= _PAGE_NX;
33494+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33495+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33496
33497+#ifdef CONFIG_DEBUG_RODATA
33498 /*
33499 * The .rodata section needs to be read-only. Using the pfn
33500 * catches all aliases.
33501@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33502 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33503 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33504 pgprot_val(forbidden) |= _PAGE_RW;
33505+#endif
33506
33507 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33508 /*
33509@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33510 }
33511 #endif
33512
33513+#ifdef CONFIG_PAX_KERNEXEC
33514+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33515+ pgprot_val(forbidden) |= _PAGE_RW;
33516+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33517+ }
33518+#endif
33519+
33520 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33521
33522 return prot;
33523@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33524 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33525 {
33526 /* change init_mm */
33527+ pax_open_kernel();
33528 set_pte_atomic(kpte, pte);
33529+
33530 #ifdef CONFIG_X86_32
33531 if (!SHARED_KERNEL_PMD) {
33532+
33533+#ifdef CONFIG_PAX_PER_CPU_PGD
33534+ unsigned long cpu;
33535+#else
33536 struct page *page;
33537+#endif
33538
33539+#ifdef CONFIG_PAX_PER_CPU_PGD
33540+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33541+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33542+#else
33543 list_for_each_entry(page, &pgd_list, lru) {
33544- pgd_t *pgd;
33545+ pgd_t *pgd = (pgd_t *)page_address(page);
33546+#endif
33547+
33548 pud_t *pud;
33549 pmd_t *pmd;
33550
33551- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33552+ pgd += pgd_index(address);
33553 pud = pud_offset(pgd, address);
33554 pmd = pmd_offset(pud, address);
33555 set_pte_atomic((pte_t *)pmd, pte);
33556 }
33557 }
33558 #endif
33559+ pax_close_kernel();
33560 }
33561
33562 static int
33563diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33564index 7ac6869..c0ba541 100644
33565--- a/arch/x86/mm/pat.c
33566+++ b/arch/x86/mm/pat.c
33567@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33568 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33569
33570 if (pg_flags == _PGMT_DEFAULT)
33571- return -1;
33572+ return _PAGE_CACHE_MODE_NUM;
33573 else if (pg_flags == _PGMT_WC)
33574 return _PAGE_CACHE_MODE_WC;
33575 else if (pg_flags == _PGMT_UC_MINUS)
33576@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33577
33578 page = pfn_to_page(pfn);
33579 type = get_page_memtype(page);
33580- if (type != -1) {
33581+ if (type != _PAGE_CACHE_MODE_NUM) {
33582 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33583 start, end - 1, type, req_type);
33584 if (new_type)
33585@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33586
33587 if (!entry) {
33588 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33589- current->comm, current->pid, start, end - 1);
33590+ current->comm, task_pid_nr(current), start, end - 1);
33591 return -EINVAL;
33592 }
33593
33594@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33595 page = pfn_to_page(paddr >> PAGE_SHIFT);
33596 rettype = get_page_memtype(page);
33597 /*
33598- * -1 from get_page_memtype() implies RAM page is in its
33599+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33600 * default state and not reserved, and hence of type WB
33601 */
33602- if (rettype == -1)
33603+ if (rettype == _PAGE_CACHE_MODE_NUM)
33604 rettype = _PAGE_CACHE_MODE_WB;
33605
33606 return rettype;
33607@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33608
33609 while (cursor < to) {
33610 if (!devmem_is_allowed(pfn)) {
33611- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33612- current->comm, from, to - 1);
33613+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33614+ current->comm, from, to - 1, cursor);
33615 return 0;
33616 }
33617 cursor += PAGE_SIZE;
33618@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33619 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33620 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33621 "for [mem %#010Lx-%#010Lx]\n",
33622- current->comm, current->pid,
33623+ current->comm, task_pid_nr(current),
33624 cattr_name(pcm),
33625 base, (unsigned long long)(base + size-1));
33626 return -EINVAL;
33627@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33628 pcm = lookup_memtype(paddr);
33629 if (want_pcm != pcm) {
33630 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33631- current->comm, current->pid,
33632+ current->comm, task_pid_nr(current),
33633 cattr_name(want_pcm),
33634 (unsigned long long)paddr,
33635 (unsigned long long)(paddr + size - 1),
33636@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33637 free_memtype(paddr, paddr + size);
33638 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33639 " for [mem %#010Lx-%#010Lx], got %s\n",
33640- current->comm, current->pid,
33641+ current->comm, task_pid_nr(current),
33642 cattr_name(want_pcm),
33643 (unsigned long long)paddr,
33644 (unsigned long long)(paddr + size - 1),
33645diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33646index 6582adc..fcc5d0b 100644
33647--- a/arch/x86/mm/pat_rbtree.c
33648+++ b/arch/x86/mm/pat_rbtree.c
33649@@ -161,7 +161,7 @@ success:
33650
33651 failure:
33652 printk(KERN_INFO "%s:%d conflicting memory types "
33653- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33654+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33655 end, cattr_name(found_type), cattr_name(match->type));
33656 return -EBUSY;
33657 }
33658diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33659index 9f0614d..92ae64a 100644
33660--- a/arch/x86/mm/pf_in.c
33661+++ b/arch/x86/mm/pf_in.c
33662@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33663 int i;
33664 enum reason_type rv = OTHERS;
33665
33666- p = (unsigned char *)ins_addr;
33667+ p = (unsigned char *)ktla_ktva(ins_addr);
33668 p += skip_prefix(p, &prf);
33669 p += get_opcode(p, &opcode);
33670
33671@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33672 struct prefix_bits prf;
33673 int i;
33674
33675- p = (unsigned char *)ins_addr;
33676+ p = (unsigned char *)ktla_ktva(ins_addr);
33677 p += skip_prefix(p, &prf);
33678 p += get_opcode(p, &opcode);
33679
33680@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33681 struct prefix_bits prf;
33682 int i;
33683
33684- p = (unsigned char *)ins_addr;
33685+ p = (unsigned char *)ktla_ktva(ins_addr);
33686 p += skip_prefix(p, &prf);
33687 p += get_opcode(p, &opcode);
33688
33689@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33690 struct prefix_bits prf;
33691 int i;
33692
33693- p = (unsigned char *)ins_addr;
33694+ p = (unsigned char *)ktla_ktva(ins_addr);
33695 p += skip_prefix(p, &prf);
33696 p += get_opcode(p, &opcode);
33697 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33698@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33699 struct prefix_bits prf;
33700 int i;
33701
33702- p = (unsigned char *)ins_addr;
33703+ p = (unsigned char *)ktla_ktva(ins_addr);
33704 p += skip_prefix(p, &prf);
33705 p += get_opcode(p, &opcode);
33706 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33707diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33708index 6fb6927..4fc13c0 100644
33709--- a/arch/x86/mm/pgtable.c
33710+++ b/arch/x86/mm/pgtable.c
33711@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33712 list_del(&page->lru);
33713 }
33714
33715-#define UNSHARED_PTRS_PER_PGD \
33716- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33717+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33718+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33719
33720+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33721+{
33722+ unsigned int count = USER_PGD_PTRS;
33723
33724+ if (!pax_user_shadow_base)
33725+ return;
33726+
33727+ while (count--)
33728+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33729+}
33730+#endif
33731+
33732+#ifdef CONFIG_PAX_PER_CPU_PGD
33733+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33734+{
33735+ unsigned int count = USER_PGD_PTRS;
33736+
33737+ while (count--) {
33738+ pgd_t pgd;
33739+
33740+#ifdef CONFIG_X86_64
33741+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33742+#else
33743+ pgd = *src++;
33744+#endif
33745+
33746+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33747+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33748+#endif
33749+
33750+ *dst++ = pgd;
33751+ }
33752+
33753+}
33754+#endif
33755+
33756+#ifdef CONFIG_X86_64
33757+#define pxd_t pud_t
33758+#define pyd_t pgd_t
33759+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33760+#define pgtable_pxd_page_ctor(page) true
33761+#define pgtable_pxd_page_dtor(page)
33762+#define pxd_free(mm, pud) pud_free((mm), (pud))
33763+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33764+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33765+#define PYD_SIZE PGDIR_SIZE
33766+#else
33767+#define pxd_t pmd_t
33768+#define pyd_t pud_t
33769+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33770+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33771+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33772+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33773+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33774+#define pyd_offset(mm, address) pud_offset((mm), (address))
33775+#define PYD_SIZE PUD_SIZE
33776+#endif
33777+
33778+#ifdef CONFIG_PAX_PER_CPU_PGD
33779+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33780+static inline void pgd_dtor(pgd_t *pgd) {}
33781+#else
33782 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33783 {
33784 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33785@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33786 pgd_list_del(pgd);
33787 spin_unlock(&pgd_lock);
33788 }
33789+#endif
33790
33791 /*
33792 * List of all pgd's needed for non-PAE so it can invalidate entries
33793@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33794 * -- nyc
33795 */
33796
33797-#ifdef CONFIG_X86_PAE
33798+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33799 /*
33800 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33801 * updating the top-level pagetable entries to guarantee the
33802@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33803 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33804 * and initialize the kernel pmds here.
33805 */
33806-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33807+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33808
33809 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33810 {
33811@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33812 */
33813 flush_tlb_mm(mm);
33814 }
33815+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33816+#define PREALLOCATED_PXDS USER_PGD_PTRS
33817 #else /* !CONFIG_X86_PAE */
33818
33819 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33820-#define PREALLOCATED_PMDS 0
33821+#define PREALLOCATED_PXDS 0
33822
33823 #endif /* CONFIG_X86_PAE */
33824
33825-static void free_pmds(pmd_t *pmds[])
33826+static void free_pxds(pxd_t *pxds[])
33827 {
33828 int i;
33829
33830- for(i = 0; i < PREALLOCATED_PMDS; i++)
33831- if (pmds[i]) {
33832- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33833- free_page((unsigned long)pmds[i]);
33834+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33835+ if (pxds[i]) {
33836+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33837+ free_page((unsigned long)pxds[i]);
33838 }
33839 }
33840
33841-static int preallocate_pmds(pmd_t *pmds[])
33842+static int preallocate_pxds(pxd_t *pxds[])
33843 {
33844 int i;
33845 bool failed = false;
33846
33847- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33848- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33849- if (!pmd)
33850+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33851+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33852+ if (!pxd)
33853 failed = true;
33854- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33855- free_page((unsigned long)pmd);
33856- pmd = NULL;
33857+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33858+ free_page((unsigned long)pxd);
33859+ pxd = NULL;
33860 failed = true;
33861 }
33862- pmds[i] = pmd;
33863+ pxds[i] = pxd;
33864 }
33865
33866 if (failed) {
33867- free_pmds(pmds);
33868+ free_pxds(pxds);
33869 return -ENOMEM;
33870 }
33871
33872@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33873 * preallocate which never got a corresponding vma will need to be
33874 * freed manually.
33875 */
33876-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33877+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33878 {
33879 int i;
33880
33881- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33882+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33883 pgd_t pgd = pgdp[i];
33884
33885 if (pgd_val(pgd) != 0) {
33886- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33887+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33888
33889- pgdp[i] = native_make_pgd(0);
33890+ set_pgd(pgdp + i, native_make_pgd(0));
33891
33892- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33893- pmd_free(mm, pmd);
33894+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33895+ pxd_free(mm, pxd);
33896 }
33897 }
33898 }
33899
33900-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33901+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33902 {
33903- pud_t *pud;
33904+ pyd_t *pyd;
33905 int i;
33906
33907- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33908+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33909 return;
33910
33911- pud = pud_offset(pgd, 0);
33912-
33913- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33914- pmd_t *pmd = pmds[i];
33915+#ifdef CONFIG_X86_64
33916+ pyd = pyd_offset(mm, 0L);
33917+#else
33918+ pyd = pyd_offset(pgd, 0L);
33919+#endif
33920
33921+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33922+ pxd_t *pxd = pxds[i];
33923 if (i >= KERNEL_PGD_BOUNDARY)
33924- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33925- sizeof(pmd_t) * PTRS_PER_PMD);
33926+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33927+ sizeof(pxd_t) * PTRS_PER_PMD);
33928
33929- pud_populate(mm, pud, pmd);
33930+ pyd_populate(mm, pyd, pxd);
33931 }
33932 }
33933
33934 pgd_t *pgd_alloc(struct mm_struct *mm)
33935 {
33936 pgd_t *pgd;
33937- pmd_t *pmds[PREALLOCATED_PMDS];
33938+ pxd_t *pxds[PREALLOCATED_PXDS];
33939
33940 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33941
33942@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33943
33944 mm->pgd = pgd;
33945
33946- if (preallocate_pmds(pmds) != 0)
33947+ if (preallocate_pxds(pxds) != 0)
33948 goto out_free_pgd;
33949
33950 if (paravirt_pgd_alloc(mm) != 0)
33951- goto out_free_pmds;
33952+ goto out_free_pxds;
33953
33954 /*
33955 * Make sure that pre-populating the pmds is atomic with
33956@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33957 spin_lock(&pgd_lock);
33958
33959 pgd_ctor(mm, pgd);
33960- pgd_prepopulate_pmd(mm, pgd, pmds);
33961+ pgd_prepopulate_pxd(mm, pgd, pxds);
33962
33963 spin_unlock(&pgd_lock);
33964
33965 return pgd;
33966
33967-out_free_pmds:
33968- free_pmds(pmds);
33969+out_free_pxds:
33970+ free_pxds(pxds);
33971 out_free_pgd:
33972 free_page((unsigned long)pgd);
33973 out:
33974@@ -313,7 +380,7 @@ out:
33975
33976 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33977 {
33978- pgd_mop_up_pmds(mm, pgd);
33979+ pgd_mop_up_pxds(mm, pgd);
33980 pgd_dtor(pgd);
33981 paravirt_pgd_free(mm, pgd);
33982 free_page((unsigned long)pgd);
33983diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
33984index 75cc097..79a097f 100644
33985--- a/arch/x86/mm/pgtable_32.c
33986+++ b/arch/x86/mm/pgtable_32.c
33987@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
33988 return;
33989 }
33990 pte = pte_offset_kernel(pmd, vaddr);
33991+
33992+ pax_open_kernel();
33993 if (pte_val(pteval))
33994 set_pte_at(&init_mm, vaddr, pte, pteval);
33995 else
33996 pte_clear(&init_mm, vaddr, pte);
33997+ pax_close_kernel();
33998
33999 /*
34000 * It's enough to flush this one mapping.
34001diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34002index e666cbb..61788c45 100644
34003--- a/arch/x86/mm/physaddr.c
34004+++ b/arch/x86/mm/physaddr.c
34005@@ -10,7 +10,7 @@
34006 #ifdef CONFIG_X86_64
34007
34008 #ifdef CONFIG_DEBUG_VIRTUAL
34009-unsigned long __phys_addr(unsigned long x)
34010+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34011 {
34012 unsigned long y = x - __START_KERNEL_map;
34013
34014@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34015 #else
34016
34017 #ifdef CONFIG_DEBUG_VIRTUAL
34018-unsigned long __phys_addr(unsigned long x)
34019+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34020 {
34021 unsigned long phys_addr = x - PAGE_OFFSET;
34022 /* VMALLOC_* aren't constants */
34023diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34024index 90555bf..f5f1828 100644
34025--- a/arch/x86/mm/setup_nx.c
34026+++ b/arch/x86/mm/setup_nx.c
34027@@ -5,8 +5,10 @@
34028 #include <asm/pgtable.h>
34029 #include <asm/proto.h>
34030
34031+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34032 static int disable_nx;
34033
34034+#ifndef CONFIG_PAX_PAGEEXEC
34035 /*
34036 * noexec = on|off
34037 *
34038@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34039 return 0;
34040 }
34041 early_param("noexec", noexec_setup);
34042+#endif
34043+
34044+#endif
34045
34046 void x86_configure_nx(void)
34047 {
34048+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34049 if (cpu_has_nx && !disable_nx)
34050 __supported_pte_mask |= _PAGE_NX;
34051 else
34052+#endif
34053 __supported_pte_mask &= ~_PAGE_NX;
34054 }
34055
34056diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34057index ee61c36..e6fedeb 100644
34058--- a/arch/x86/mm/tlb.c
34059+++ b/arch/x86/mm/tlb.c
34060@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34061 BUG();
34062 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34063 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34064+
34065+#ifndef CONFIG_PAX_PER_CPU_PGD
34066 load_cr3(swapper_pg_dir);
34067+#endif
34068+
34069 /*
34070 * This gets called in the idle path where RCU
34071 * functions differently. Tracing normally
34072diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34073new file mode 100644
34074index 0000000..dace51c
34075--- /dev/null
34076+++ b/arch/x86/mm/uderef_64.c
34077@@ -0,0 +1,37 @@
34078+#include <linux/mm.h>
34079+#include <asm/pgtable.h>
34080+#include <asm/uaccess.h>
34081+
34082+#ifdef CONFIG_PAX_MEMORY_UDEREF
34083+/* PaX: due to the special call convention these functions must
34084+ * - remain leaf functions under all configurations,
34085+ * - never be called directly, only dereferenced from the wrappers.
34086+ */
34087+void __pax_open_userland(void)
34088+{
34089+ unsigned int cpu;
34090+
34091+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34092+ return;
34093+
34094+ cpu = raw_get_cpu();
34095+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34096+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34097+ raw_put_cpu_no_resched();
34098+}
34099+EXPORT_SYMBOL(__pax_open_userland);
34100+
34101+void __pax_close_userland(void)
34102+{
34103+ unsigned int cpu;
34104+
34105+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34106+ return;
34107+
34108+ cpu = raw_get_cpu();
34109+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34110+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34111+ raw_put_cpu_no_resched();
34112+}
34113+EXPORT_SYMBOL(__pax_close_userland);
34114+#endif
34115diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34116index 6440221..f84b5c7 100644
34117--- a/arch/x86/net/bpf_jit.S
34118+++ b/arch/x86/net/bpf_jit.S
34119@@ -9,6 +9,7 @@
34120 */
34121 #include <linux/linkage.h>
34122 #include <asm/dwarf2.h>
34123+#include <asm/alternative-asm.h>
34124
34125 /*
34126 * Calling convention :
34127@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34128 jle bpf_slow_path_word
34129 mov (SKBDATA,%rsi),%eax
34130 bswap %eax /* ntohl() */
34131+ pax_force_retaddr
34132 ret
34133
34134 sk_load_half:
34135@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34136 jle bpf_slow_path_half
34137 movzwl (SKBDATA,%rsi),%eax
34138 rol $8,%ax # ntohs()
34139+ pax_force_retaddr
34140 ret
34141
34142 sk_load_byte:
34143@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34144 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34145 jle bpf_slow_path_byte
34146 movzbl (SKBDATA,%rsi),%eax
34147+ pax_force_retaddr
34148 ret
34149
34150 /* rsi contains offset and can be scratched */
34151@@ -90,6 +94,7 @@ bpf_slow_path_word:
34152 js bpf_error
34153 mov - MAX_BPF_STACK + 32(%rbp),%eax
34154 bswap %eax
34155+ pax_force_retaddr
34156 ret
34157
34158 bpf_slow_path_half:
34159@@ -98,12 +103,14 @@ bpf_slow_path_half:
34160 mov - MAX_BPF_STACK + 32(%rbp),%ax
34161 rol $8,%ax
34162 movzwl %ax,%eax
34163+ pax_force_retaddr
34164 ret
34165
34166 bpf_slow_path_byte:
34167 bpf_slow_path_common(1)
34168 js bpf_error
34169 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34170+ pax_force_retaddr
34171 ret
34172
34173 #define sk_negative_common(SIZE) \
34174@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34175 sk_negative_common(4)
34176 mov (%rax), %eax
34177 bswap %eax
34178+ pax_force_retaddr
34179 ret
34180
34181 bpf_slow_path_half_neg:
34182@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34183 mov (%rax),%ax
34184 rol $8,%ax
34185 movzwl %ax,%eax
34186+ pax_force_retaddr
34187 ret
34188
34189 bpf_slow_path_byte_neg:
34190@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34191 .globl sk_load_byte_negative_offset
34192 sk_negative_common(1)
34193 movzbl (%rax), %eax
34194+ pax_force_retaddr
34195 ret
34196
34197 bpf_error:
34198@@ -156,4 +166,5 @@ bpf_error:
34199 mov - MAX_BPF_STACK + 16(%rbp),%r14
34200 mov - MAX_BPF_STACK + 24(%rbp),%r15
34201 leaveq
34202+ pax_force_retaddr
34203 ret
34204diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34205index 9875143..00f6656 100644
34206--- a/arch/x86/net/bpf_jit_comp.c
34207+++ b/arch/x86/net/bpf_jit_comp.c
34208@@ -13,7 +13,11 @@
34209 #include <linux/if_vlan.h>
34210 #include <asm/cacheflush.h>
34211
34212+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34213+int bpf_jit_enable __read_only;
34214+#else
34215 int bpf_jit_enable __read_mostly;
34216+#endif
34217
34218 /*
34219 * assembly code in arch/x86/net/bpf_jit.S
34220@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34221 static void jit_fill_hole(void *area, unsigned int size)
34222 {
34223 /* fill whole space with int3 instructions */
34224+ pax_open_kernel();
34225 memset(area, 0xcc, size);
34226+ pax_close_kernel();
34227 }
34228
34229 struct jit_context {
34230@@ -896,7 +902,9 @@ common_load:
34231 pr_err("bpf_jit_compile fatal error\n");
34232 return -EFAULT;
34233 }
34234+ pax_open_kernel();
34235 memcpy(image + proglen, temp, ilen);
34236+ pax_close_kernel();
34237 }
34238 proglen += ilen;
34239 addrs[i] = proglen;
34240@@ -968,7 +976,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34241
34242 if (image) {
34243 bpf_flush_icache(header, image + proglen);
34244- set_memory_ro((unsigned long)header, header->pages);
34245 prog->bpf_func = (void *)image;
34246 prog->jited = true;
34247 }
34248@@ -981,12 +988,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34249 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34250 struct bpf_binary_header *header = (void *)addr;
34251
34252- if (!fp->jited)
34253- goto free_filter;
34254+ if (fp->jited)
34255+ bpf_jit_binary_free(header);
34256
34257- set_memory_rw(addr, header->pages);
34258- bpf_jit_binary_free(header);
34259-
34260-free_filter:
34261 bpf_prog_unlock_free(fp);
34262 }
34263diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34264index 5d04be5..2beeaa2 100644
34265--- a/arch/x86/oprofile/backtrace.c
34266+++ b/arch/x86/oprofile/backtrace.c
34267@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34268 struct stack_frame_ia32 *fp;
34269 unsigned long bytes;
34270
34271- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34272+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34273 if (bytes != 0)
34274 return NULL;
34275
34276- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34277+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34278
34279 oprofile_add_trace(bufhead[0].return_address);
34280
34281@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34282 struct stack_frame bufhead[2];
34283 unsigned long bytes;
34284
34285- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34286+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34287 if (bytes != 0)
34288 return NULL;
34289
34290@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34291 {
34292 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34293
34294- if (!user_mode_vm(regs)) {
34295+ if (!user_mode(regs)) {
34296 unsigned long stack = kernel_stack_pointer(regs);
34297 if (depth)
34298 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34299diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34300index 1d2e639..f6ef82a 100644
34301--- a/arch/x86/oprofile/nmi_int.c
34302+++ b/arch/x86/oprofile/nmi_int.c
34303@@ -23,6 +23,7 @@
34304 #include <asm/nmi.h>
34305 #include <asm/msr.h>
34306 #include <asm/apic.h>
34307+#include <asm/pgtable.h>
34308
34309 #include "op_counter.h"
34310 #include "op_x86_model.h"
34311@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34312 if (ret)
34313 return ret;
34314
34315- if (!model->num_virt_counters)
34316- model->num_virt_counters = model->num_counters;
34317+ if (!model->num_virt_counters) {
34318+ pax_open_kernel();
34319+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34320+ pax_close_kernel();
34321+ }
34322
34323 mux_init(ops);
34324
34325diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34326index 50d86c0..7985318 100644
34327--- a/arch/x86/oprofile/op_model_amd.c
34328+++ b/arch/x86/oprofile/op_model_amd.c
34329@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34330 num_counters = AMD64_NUM_COUNTERS;
34331 }
34332
34333- op_amd_spec.num_counters = num_counters;
34334- op_amd_spec.num_controls = num_counters;
34335- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34336+ pax_open_kernel();
34337+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34338+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34339+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34340+ pax_close_kernel();
34341
34342 return 0;
34343 }
34344diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34345index d90528e..0127e2b 100644
34346--- a/arch/x86/oprofile/op_model_ppro.c
34347+++ b/arch/x86/oprofile/op_model_ppro.c
34348@@ -19,6 +19,7 @@
34349 #include <asm/msr.h>
34350 #include <asm/apic.h>
34351 #include <asm/nmi.h>
34352+#include <asm/pgtable.h>
34353
34354 #include "op_x86_model.h"
34355 #include "op_counter.h"
34356@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34357
34358 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34359
34360- op_arch_perfmon_spec.num_counters = num_counters;
34361- op_arch_perfmon_spec.num_controls = num_counters;
34362+ pax_open_kernel();
34363+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34364+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34365+ pax_close_kernel();
34366 }
34367
34368 static int arch_perfmon_init(struct oprofile_operations *ignore)
34369diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34370index 71e8a67..6a313bb 100644
34371--- a/arch/x86/oprofile/op_x86_model.h
34372+++ b/arch/x86/oprofile/op_x86_model.h
34373@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34374 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34375 struct op_msrs const * const msrs);
34376 #endif
34377-};
34378+} __do_const;
34379
34380 struct op_counter_config;
34381
34382diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34383index 44b9271..4c5a988 100644
34384--- a/arch/x86/pci/intel_mid_pci.c
34385+++ b/arch/x86/pci/intel_mid_pci.c
34386@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34387 pci_mmcfg_late_init();
34388 pcibios_enable_irq = intel_mid_pci_irq_enable;
34389 pcibios_disable_irq = intel_mid_pci_irq_disable;
34390- pci_root_ops = intel_mid_pci_ops;
34391+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34392 pci_soc_mode = 1;
34393 /* Continue with standard init */
34394 return 1;
34395diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34396index 5dc6ca5..25c03f5 100644
34397--- a/arch/x86/pci/irq.c
34398+++ b/arch/x86/pci/irq.c
34399@@ -51,7 +51,7 @@ struct irq_router {
34400 struct irq_router_handler {
34401 u16 vendor;
34402 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34403-};
34404+} __do_const;
34405
34406 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34407 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34408@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34409 return 0;
34410 }
34411
34412-static __initdata struct irq_router_handler pirq_routers[] = {
34413+static __initconst const struct irq_router_handler pirq_routers[] = {
34414 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34415 { PCI_VENDOR_ID_AL, ali_router_probe },
34416 { PCI_VENDOR_ID_ITE, ite_router_probe },
34417@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34418 static void __init pirq_find_router(struct irq_router *r)
34419 {
34420 struct irq_routing_table *rt = pirq_table;
34421- struct irq_router_handler *h;
34422+ const struct irq_router_handler *h;
34423
34424 #ifdef CONFIG_PCI_BIOS
34425 if (!rt->signature) {
34426@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34427 return 0;
34428 }
34429
34430-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34431+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34432 {
34433 .callback = fix_broken_hp_bios_irq9,
34434 .ident = "HP Pavilion N5400 Series Laptop",
34435diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34436index 9b83b90..4112152 100644
34437--- a/arch/x86/pci/pcbios.c
34438+++ b/arch/x86/pci/pcbios.c
34439@@ -79,7 +79,7 @@ union bios32 {
34440 static struct {
34441 unsigned long address;
34442 unsigned short segment;
34443-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34444+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34445
34446 /*
34447 * Returns the entry point for the given service, NULL on error
34448@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34449 unsigned long length; /* %ecx */
34450 unsigned long entry; /* %edx */
34451 unsigned long flags;
34452+ struct desc_struct d, *gdt;
34453
34454 local_irq_save(flags);
34455- __asm__("lcall *(%%edi); cld"
34456+
34457+ gdt = get_cpu_gdt_table(smp_processor_id());
34458+
34459+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34460+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34461+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34462+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34463+
34464+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34465 : "=a" (return_code),
34466 "=b" (address),
34467 "=c" (length),
34468 "=d" (entry)
34469 : "0" (service),
34470 "1" (0),
34471- "D" (&bios32_indirect));
34472+ "D" (&bios32_indirect),
34473+ "r"(__PCIBIOS_DS)
34474+ : "memory");
34475+
34476+ pax_open_kernel();
34477+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34478+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34479+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34480+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34481+ pax_close_kernel();
34482+
34483 local_irq_restore(flags);
34484
34485 switch (return_code) {
34486- case 0:
34487- return address + entry;
34488- case 0x80: /* Not present */
34489- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34490- return 0;
34491- default: /* Shouldn't happen */
34492- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34493- service, return_code);
34494+ case 0: {
34495+ int cpu;
34496+ unsigned char flags;
34497+
34498+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34499+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34500+ printk(KERN_WARNING "bios32_service: not valid\n");
34501 return 0;
34502+ }
34503+ address = address + PAGE_OFFSET;
34504+ length += 16UL; /* some BIOSs underreport this... */
34505+ flags = 4;
34506+ if (length >= 64*1024*1024) {
34507+ length >>= PAGE_SHIFT;
34508+ flags |= 8;
34509+ }
34510+
34511+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34512+ gdt = get_cpu_gdt_table(cpu);
34513+ pack_descriptor(&d, address, length, 0x9b, flags);
34514+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34515+ pack_descriptor(&d, address, length, 0x93, flags);
34516+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34517+ }
34518+ return entry;
34519+ }
34520+ case 0x80: /* Not present */
34521+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34522+ return 0;
34523+ default: /* Shouldn't happen */
34524+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34525+ service, return_code);
34526+ return 0;
34527 }
34528 }
34529
34530 static struct {
34531 unsigned long address;
34532 unsigned short segment;
34533-} pci_indirect = { 0, __KERNEL_CS };
34534+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34535
34536-static int pci_bios_present;
34537+static int pci_bios_present __read_only;
34538
34539 static int __init check_pcibios(void)
34540 {
34541@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34542 unsigned long flags, pcibios_entry;
34543
34544 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34545- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34546+ pci_indirect.address = pcibios_entry;
34547
34548 local_irq_save(flags);
34549- __asm__(
34550- "lcall *(%%edi); cld\n\t"
34551+ __asm__("movw %w6, %%ds\n\t"
34552+ "lcall *%%ss:(%%edi); cld\n\t"
34553+ "push %%ss\n\t"
34554+ "pop %%ds\n\t"
34555 "jc 1f\n\t"
34556 "xor %%ah, %%ah\n"
34557 "1:"
34558@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34559 "=b" (ebx),
34560 "=c" (ecx)
34561 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34562- "D" (&pci_indirect)
34563+ "D" (&pci_indirect),
34564+ "r" (__PCIBIOS_DS)
34565 : "memory");
34566 local_irq_restore(flags);
34567
34568@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34569
34570 switch (len) {
34571 case 1:
34572- __asm__("lcall *(%%esi); cld\n\t"
34573+ __asm__("movw %w6, %%ds\n\t"
34574+ "lcall *%%ss:(%%esi); cld\n\t"
34575+ "push %%ss\n\t"
34576+ "pop %%ds\n\t"
34577 "jc 1f\n\t"
34578 "xor %%ah, %%ah\n"
34579 "1:"
34580@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34581 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34582 "b" (bx),
34583 "D" ((long)reg),
34584- "S" (&pci_indirect));
34585+ "S" (&pci_indirect),
34586+ "r" (__PCIBIOS_DS));
34587 /*
34588 * Zero-extend the result beyond 8 bits, do not trust the
34589 * BIOS having done it:
34590@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34591 *value &= 0xff;
34592 break;
34593 case 2:
34594- __asm__("lcall *(%%esi); cld\n\t"
34595+ __asm__("movw %w6, %%ds\n\t"
34596+ "lcall *%%ss:(%%esi); cld\n\t"
34597+ "push %%ss\n\t"
34598+ "pop %%ds\n\t"
34599 "jc 1f\n\t"
34600 "xor %%ah, %%ah\n"
34601 "1:"
34602@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34603 : "1" (PCIBIOS_READ_CONFIG_WORD),
34604 "b" (bx),
34605 "D" ((long)reg),
34606- "S" (&pci_indirect));
34607+ "S" (&pci_indirect),
34608+ "r" (__PCIBIOS_DS));
34609 /*
34610 * Zero-extend the result beyond 16 bits, do not trust the
34611 * BIOS having done it:
34612@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34613 *value &= 0xffff;
34614 break;
34615 case 4:
34616- __asm__("lcall *(%%esi); cld\n\t"
34617+ __asm__("movw %w6, %%ds\n\t"
34618+ "lcall *%%ss:(%%esi); cld\n\t"
34619+ "push %%ss\n\t"
34620+ "pop %%ds\n\t"
34621 "jc 1f\n\t"
34622 "xor %%ah, %%ah\n"
34623 "1:"
34624@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34625 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34626 "b" (bx),
34627 "D" ((long)reg),
34628- "S" (&pci_indirect));
34629+ "S" (&pci_indirect),
34630+ "r" (__PCIBIOS_DS));
34631 break;
34632 }
34633
34634@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34635
34636 switch (len) {
34637 case 1:
34638- __asm__("lcall *(%%esi); cld\n\t"
34639+ __asm__("movw %w6, %%ds\n\t"
34640+ "lcall *%%ss:(%%esi); cld\n\t"
34641+ "push %%ss\n\t"
34642+ "pop %%ds\n\t"
34643 "jc 1f\n\t"
34644 "xor %%ah, %%ah\n"
34645 "1:"
34646@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34647 "c" (value),
34648 "b" (bx),
34649 "D" ((long)reg),
34650- "S" (&pci_indirect));
34651+ "S" (&pci_indirect),
34652+ "r" (__PCIBIOS_DS));
34653 break;
34654 case 2:
34655- __asm__("lcall *(%%esi); cld\n\t"
34656+ __asm__("movw %w6, %%ds\n\t"
34657+ "lcall *%%ss:(%%esi); cld\n\t"
34658+ "push %%ss\n\t"
34659+ "pop %%ds\n\t"
34660 "jc 1f\n\t"
34661 "xor %%ah, %%ah\n"
34662 "1:"
34663@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34664 "c" (value),
34665 "b" (bx),
34666 "D" ((long)reg),
34667- "S" (&pci_indirect));
34668+ "S" (&pci_indirect),
34669+ "r" (__PCIBIOS_DS));
34670 break;
34671 case 4:
34672- __asm__("lcall *(%%esi); cld\n\t"
34673+ __asm__("movw %w6, %%ds\n\t"
34674+ "lcall *%%ss:(%%esi); cld\n\t"
34675+ "push %%ss\n\t"
34676+ "pop %%ds\n\t"
34677 "jc 1f\n\t"
34678 "xor %%ah, %%ah\n"
34679 "1:"
34680@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34681 "c" (value),
34682 "b" (bx),
34683 "D" ((long)reg),
34684- "S" (&pci_indirect));
34685+ "S" (&pci_indirect),
34686+ "r" (__PCIBIOS_DS));
34687 break;
34688 }
34689
34690@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34691
34692 DBG("PCI: Fetching IRQ routing table... ");
34693 __asm__("push %%es\n\t"
34694+ "movw %w8, %%ds\n\t"
34695 "push %%ds\n\t"
34696 "pop %%es\n\t"
34697- "lcall *(%%esi); cld\n\t"
34698+ "lcall *%%ss:(%%esi); cld\n\t"
34699 "pop %%es\n\t"
34700+ "push %%ss\n\t"
34701+ "pop %%ds\n"
34702 "jc 1f\n\t"
34703 "xor %%ah, %%ah\n"
34704 "1:"
34705@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34706 "1" (0),
34707 "D" ((long) &opt),
34708 "S" (&pci_indirect),
34709- "m" (opt)
34710+ "m" (opt),
34711+ "r" (__PCIBIOS_DS)
34712 : "memory");
34713 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34714 if (ret & 0xff00)
34715@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34716 {
34717 int ret;
34718
34719- __asm__("lcall *(%%esi); cld\n\t"
34720+ __asm__("movw %w5, %%ds\n\t"
34721+ "lcall *%%ss:(%%esi); cld\n\t"
34722+ "push %%ss\n\t"
34723+ "pop %%ds\n"
34724 "jc 1f\n\t"
34725 "xor %%ah, %%ah\n"
34726 "1:"
34727@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34728 : "0" (PCIBIOS_SET_PCI_HW_INT),
34729 "b" ((dev->bus->number << 8) | dev->devfn),
34730 "c" ((irq << 8) | (pin + 10)),
34731- "S" (&pci_indirect));
34732+ "S" (&pci_indirect),
34733+ "r" (__PCIBIOS_DS));
34734 return !(ret & 0xff00);
34735 }
34736 EXPORT_SYMBOL(pcibios_set_irq_routing);
34737diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34738index 40e7cda..c7e6672 100644
34739--- a/arch/x86/platform/efi/efi_32.c
34740+++ b/arch/x86/platform/efi/efi_32.c
34741@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34742 {
34743 struct desc_ptr gdt_descr;
34744
34745+#ifdef CONFIG_PAX_KERNEXEC
34746+ struct desc_struct d;
34747+#endif
34748+
34749 local_irq_save(efi_rt_eflags);
34750
34751 load_cr3(initial_page_table);
34752 __flush_tlb_all();
34753
34754+#ifdef CONFIG_PAX_KERNEXEC
34755+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34756+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34757+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34758+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34759+#endif
34760+
34761 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34762 gdt_descr.size = GDT_SIZE - 1;
34763 load_gdt(&gdt_descr);
34764@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34765 {
34766 struct desc_ptr gdt_descr;
34767
34768+#ifdef CONFIG_PAX_KERNEXEC
34769+ struct desc_struct d;
34770+
34771+ memset(&d, 0, sizeof d);
34772+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34773+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34774+#endif
34775+
34776 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34777 gdt_descr.size = GDT_SIZE - 1;
34778 load_gdt(&gdt_descr);
34779
34780+#ifdef CONFIG_PAX_PER_CPU_PGD
34781+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34782+#else
34783 load_cr3(swapper_pg_dir);
34784+#endif
34785+
34786 __flush_tlb_all();
34787
34788 local_irq_restore(efi_rt_eflags);
34789diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34790index 17e80d8..9fa6e41 100644
34791--- a/arch/x86/platform/efi/efi_64.c
34792+++ b/arch/x86/platform/efi/efi_64.c
34793@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34794 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34795 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34796 }
34797+
34798+#ifdef CONFIG_PAX_PER_CPU_PGD
34799+ load_cr3(swapper_pg_dir);
34800+#endif
34801+
34802 __flush_tlb_all();
34803 }
34804
34805@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34806 for (pgd = 0; pgd < n_pgds; pgd++)
34807 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34808 kfree(save_pgd);
34809+
34810+#ifdef CONFIG_PAX_PER_CPU_PGD
34811+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34812+#endif
34813+
34814 __flush_tlb_all();
34815 local_irq_restore(efi_flags);
34816 early_code_mapping_set_exec(0);
34817@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34818 unsigned npages;
34819 pgd_t *pgd;
34820
34821- if (efi_enabled(EFI_OLD_MEMMAP))
34822+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34823+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34824+ * able to execute the EFI services.
34825+ */
34826+ if (__supported_pte_mask & _PAGE_NX) {
34827+ unsigned long addr = (unsigned long) __va(0);
34828+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34829+
34830+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34831+#ifdef CONFIG_PAX_PER_CPU_PGD
34832+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34833+#endif
34834+ set_pgd(pgd_offset_k(addr), pe);
34835+ }
34836+
34837 return 0;
34838+ }
34839
34840 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34841 pgd = __va(efi_scratch.efi_pgt);
34842diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34843index 040192b..7d3300f 100644
34844--- a/arch/x86/platform/efi/efi_stub_32.S
34845+++ b/arch/x86/platform/efi/efi_stub_32.S
34846@@ -6,7 +6,9 @@
34847 */
34848
34849 #include <linux/linkage.h>
34850+#include <linux/init.h>
34851 #include <asm/page_types.h>
34852+#include <asm/segment.h>
34853
34854 /*
34855 * efi_call_phys(void *, ...) is a function with variable parameters.
34856@@ -20,7 +22,7 @@
34857 * service functions will comply with gcc calling convention, too.
34858 */
34859
34860-.text
34861+__INIT
34862 ENTRY(efi_call_phys)
34863 /*
34864 * 0. The function can only be called in Linux kernel. So CS has been
34865@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34866 * The mapping of lower virtual memory has been created in prolog and
34867 * epilog.
34868 */
34869- movl $1f, %edx
34870- subl $__PAGE_OFFSET, %edx
34871- jmp *%edx
34872+#ifdef CONFIG_PAX_KERNEXEC
34873+ movl $(__KERNEXEC_EFI_DS), %edx
34874+ mov %edx, %ds
34875+ mov %edx, %es
34876+ mov %edx, %ss
34877+ addl $2f,(1f)
34878+ ljmp *(1f)
34879+
34880+__INITDATA
34881+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34882+.previous
34883+
34884+2:
34885+ subl $2b,(1b)
34886+#else
34887+ jmp 1f-__PAGE_OFFSET
34888 1:
34889+#endif
34890
34891 /*
34892 * 2. Now on the top of stack is the return
34893@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34894 * parameter 2, ..., param n. To make things easy, we save the return
34895 * address of efi_call_phys in a global variable.
34896 */
34897- popl %edx
34898- movl %edx, saved_return_addr
34899- /* get the function pointer into ECX*/
34900- popl %ecx
34901- movl %ecx, efi_rt_function_ptr
34902- movl $2f, %edx
34903- subl $__PAGE_OFFSET, %edx
34904- pushl %edx
34905+ popl (saved_return_addr)
34906+ popl (efi_rt_function_ptr)
34907
34908 /*
34909 * 3. Clear PG bit in %CR0.
34910@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34911 /*
34912 * 5. Call the physical function.
34913 */
34914- jmp *%ecx
34915+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
34916
34917-2:
34918 /*
34919 * 6. After EFI runtime service returns, control will return to
34920 * following instruction. We'd better readjust stack pointer first.
34921@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34922 movl %cr0, %edx
34923 orl $0x80000000, %edx
34924 movl %edx, %cr0
34925- jmp 1f
34926-1:
34927+
34928 /*
34929 * 8. Now restore the virtual mode from flat mode by
34930 * adding EIP with PAGE_OFFSET.
34931 */
34932- movl $1f, %edx
34933- jmp *%edx
34934+#ifdef CONFIG_PAX_KERNEXEC
34935+ movl $(__KERNEL_DS), %edx
34936+ mov %edx, %ds
34937+ mov %edx, %es
34938+ mov %edx, %ss
34939+ ljmp $(__KERNEL_CS),$1f
34940+#else
34941+ jmp 1f+__PAGE_OFFSET
34942+#endif
34943 1:
34944
34945 /*
34946 * 9. Balance the stack. And because EAX contain the return value,
34947 * we'd better not clobber it.
34948 */
34949- leal efi_rt_function_ptr, %edx
34950- movl (%edx), %ecx
34951- pushl %ecx
34952+ pushl (efi_rt_function_ptr)
34953
34954 /*
34955- * 10. Push the saved return address onto the stack and return.
34956+ * 10. Return to the saved return address.
34957 */
34958- leal saved_return_addr, %edx
34959- movl (%edx), %ecx
34960- pushl %ecx
34961- ret
34962+ jmpl *(saved_return_addr)
34963 ENDPROC(efi_call_phys)
34964 .previous
34965
34966-.data
34967+__INITDATA
34968 saved_return_addr:
34969 .long 0
34970 efi_rt_function_ptr:
34971diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
34972index 86d0f9e..6d499f4 100644
34973--- a/arch/x86/platform/efi/efi_stub_64.S
34974+++ b/arch/x86/platform/efi/efi_stub_64.S
34975@@ -11,6 +11,7 @@
34976 #include <asm/msr.h>
34977 #include <asm/processor-flags.h>
34978 #include <asm/page_types.h>
34979+#include <asm/alternative-asm.h>
34980
34981 #define SAVE_XMM \
34982 mov %rsp, %rax; \
34983@@ -88,6 +89,7 @@ ENTRY(efi_call)
34984 RESTORE_PGT
34985 addq $48, %rsp
34986 RESTORE_XMM
34987+ pax_force_retaddr 0, 1
34988 ret
34989 ENDPROC(efi_call)
34990
34991diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
34992index 1bbedc4..eb795b5 100644
34993--- a/arch/x86/platform/intel-mid/intel-mid.c
34994+++ b/arch/x86/platform/intel-mid/intel-mid.c
34995@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
34996 {
34997 };
34998
34999-static void intel_mid_reboot(void)
35000+static void __noreturn intel_mid_reboot(void)
35001 {
35002 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35003+ BUG();
35004 }
35005
35006 static unsigned long __init intel_mid_calibrate_tsc(void)
35007diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35008index 3c1c386..59a68ed 100644
35009--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35010+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35011@@ -13,6 +13,6 @@
35012 /* For every CPU addition a new get_<cpuname>_ops interface needs
35013 * to be added.
35014 */
35015-extern void *get_penwell_ops(void);
35016-extern void *get_cloverview_ops(void);
35017-extern void *get_tangier_ops(void);
35018+extern const void *get_penwell_ops(void);
35019+extern const void *get_cloverview_ops(void);
35020+extern const void *get_tangier_ops(void);
35021diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35022index 23381d2..8ddc10e 100644
35023--- a/arch/x86/platform/intel-mid/mfld.c
35024+++ b/arch/x86/platform/intel-mid/mfld.c
35025@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35026 pm_power_off = mfld_power_off;
35027 }
35028
35029-void *get_penwell_ops(void)
35030+const void *get_penwell_ops(void)
35031 {
35032 return &penwell_ops;
35033 }
35034
35035-void *get_cloverview_ops(void)
35036+const void *get_cloverview_ops(void)
35037 {
35038 return &penwell_ops;
35039 }
35040diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35041index aaca917..66eadbc 100644
35042--- a/arch/x86/platform/intel-mid/mrfl.c
35043+++ b/arch/x86/platform/intel-mid/mrfl.c
35044@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35045 .arch_setup = tangier_arch_setup,
35046 };
35047
35048-void *get_tangier_ops(void)
35049+const void *get_tangier_ops(void)
35050 {
35051 return &tangier_ops;
35052 }
35053diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35054index d6ee929..3637cb5 100644
35055--- a/arch/x86/platform/olpc/olpc_dt.c
35056+++ b/arch/x86/platform/olpc/olpc_dt.c
35057@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35058 return res;
35059 }
35060
35061-static struct of_pdt_ops prom_olpc_ops __initdata = {
35062+static struct of_pdt_ops prom_olpc_ops __initconst = {
35063 .nextprop = olpc_dt_nextprop,
35064 .getproplen = olpc_dt_getproplen,
35065 .getproperty = olpc_dt_getproperty,
35066diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35067index 6ec7910..ecdbb11 100644
35068--- a/arch/x86/power/cpu.c
35069+++ b/arch/x86/power/cpu.c
35070@@ -137,11 +137,8 @@ static void do_fpu_end(void)
35071 static void fix_processor_context(void)
35072 {
35073 int cpu = smp_processor_id();
35074- struct tss_struct *t = &per_cpu(init_tss, cpu);
35075-#ifdef CONFIG_X86_64
35076- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35077- tss_desc tss;
35078-#endif
35079+ struct tss_struct *t = init_tss + cpu;
35080+
35081 set_tss_desc(cpu, t); /*
35082 * This just modifies memory; should not be
35083 * necessary. But... This is necessary, because
35084@@ -150,10 +147,6 @@ static void fix_processor_context(void)
35085 */
35086
35087 #ifdef CONFIG_X86_64
35088- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35089- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35090- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35091-
35092 syscall_init(); /* This sets MSR_*STAR and related */
35093 #endif
35094 load_TR_desc(); /* This does ltr */
35095diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35096index bad628a..a102610 100644
35097--- a/arch/x86/realmode/init.c
35098+++ b/arch/x86/realmode/init.c
35099@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35100 __va(real_mode_header->trampoline_header);
35101
35102 #ifdef CONFIG_X86_32
35103- trampoline_header->start = __pa_symbol(startup_32_smp);
35104+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35105+
35106+#ifdef CONFIG_PAX_KERNEXEC
35107+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35108+#endif
35109+
35110+ trampoline_header->boot_cs = __BOOT_CS;
35111 trampoline_header->gdt_limit = __BOOT_DS + 7;
35112 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35113 #else
35114@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35115 *trampoline_cr4_features = read_cr4();
35116
35117 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35118- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35119+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35120 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35121 #endif
35122 }
35123diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35124index 7c0d7be..d24dc88 100644
35125--- a/arch/x86/realmode/rm/Makefile
35126+++ b/arch/x86/realmode/rm/Makefile
35127@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35128
35129 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35130 -I$(srctree)/arch/x86/boot
35131+ifdef CONSTIFY_PLUGIN
35132+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35133+endif
35134 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35135 GCOV_PROFILE := n
35136diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35137index a28221d..93c40f1 100644
35138--- a/arch/x86/realmode/rm/header.S
35139+++ b/arch/x86/realmode/rm/header.S
35140@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35141 #endif
35142 /* APM/BIOS reboot */
35143 .long pa_machine_real_restart_asm
35144-#ifdef CONFIG_X86_64
35145+#ifdef CONFIG_X86_32
35146+ .long __KERNEL_CS
35147+#else
35148 .long __KERNEL32_CS
35149 #endif
35150 END(real_mode_header)
35151diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35152index 48ddd76..c26749f 100644
35153--- a/arch/x86/realmode/rm/trampoline_32.S
35154+++ b/arch/x86/realmode/rm/trampoline_32.S
35155@@ -24,6 +24,12 @@
35156 #include <asm/page_types.h>
35157 #include "realmode.h"
35158
35159+#ifdef CONFIG_PAX_KERNEXEC
35160+#define ta(X) (X)
35161+#else
35162+#define ta(X) (pa_ ## X)
35163+#endif
35164+
35165 .text
35166 .code16
35167
35168@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35169
35170 cli # We should be safe anyway
35171
35172- movl tr_start, %eax # where we need to go
35173-
35174 movl $0xA5A5A5A5, trampoline_status
35175 # write marker for master knows we're running
35176
35177@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35178 movw $1, %dx # protected mode (PE) bit
35179 lmsw %dx # into protected mode
35180
35181- ljmpl $__BOOT_CS, $pa_startup_32
35182+ ljmpl *(trampoline_header)
35183
35184 .section ".text32","ax"
35185 .code32
35186@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35187 .balign 8
35188 GLOBAL(trampoline_header)
35189 tr_start: .space 4
35190- tr_gdt_pad: .space 2
35191+ tr_boot_cs: .space 2
35192 tr_gdt: .space 6
35193 END(trampoline_header)
35194
35195diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35196index dac7b20..72dbaca 100644
35197--- a/arch/x86/realmode/rm/trampoline_64.S
35198+++ b/arch/x86/realmode/rm/trampoline_64.S
35199@@ -93,6 +93,7 @@ ENTRY(startup_32)
35200 movl %edx, %gs
35201
35202 movl pa_tr_cr4, %eax
35203+ andl $~X86_CR4_PCIDE, %eax
35204 movl %eax, %cr4 # Enable PAE mode
35205
35206 # Setup trampoline 4 level pagetables
35207@@ -106,7 +107,7 @@ ENTRY(startup_32)
35208 wrmsr
35209
35210 # Enable paging and in turn activate Long Mode
35211- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35212+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35213 movl %eax, %cr0
35214
35215 /*
35216diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35217index 9e7e147..25a4158 100644
35218--- a/arch/x86/realmode/rm/wakeup_asm.S
35219+++ b/arch/x86/realmode/rm/wakeup_asm.S
35220@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35221 lgdtl pmode_gdt
35222
35223 /* This really couldn't... */
35224- movl pmode_entry, %eax
35225 movl pmode_cr0, %ecx
35226 movl %ecx, %cr0
35227- ljmpl $__KERNEL_CS, $pa_startup_32
35228- /* -> jmp *%eax in trampoline_32.S */
35229+
35230+ ljmpl *pmode_entry
35231 #else
35232 jmp trampoline_start
35233 #endif
35234diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35235index 604a37e..e49702a 100644
35236--- a/arch/x86/tools/Makefile
35237+++ b/arch/x86/tools/Makefile
35238@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35239
35240 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35241
35242-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35243+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35244 hostprogs-y += relocs
35245 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35246 PHONY += relocs
35247diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35248index 0c2fae8..88036b7 100644
35249--- a/arch/x86/tools/relocs.c
35250+++ b/arch/x86/tools/relocs.c
35251@@ -1,5 +1,7 @@
35252 /* This is included from relocs_32/64.c */
35253
35254+#include "../../../include/generated/autoconf.h"
35255+
35256 #define ElfW(type) _ElfW(ELF_BITS, type)
35257 #define _ElfW(bits, type) __ElfW(bits, type)
35258 #define __ElfW(bits, type) Elf##bits##_##type
35259@@ -11,6 +13,7 @@
35260 #define Elf_Sym ElfW(Sym)
35261
35262 static Elf_Ehdr ehdr;
35263+static Elf_Phdr *phdr;
35264
35265 struct relocs {
35266 uint32_t *offset;
35267@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35268 }
35269 }
35270
35271+static void read_phdrs(FILE *fp)
35272+{
35273+ unsigned int i;
35274+
35275+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35276+ if (!phdr) {
35277+ die("Unable to allocate %d program headers\n",
35278+ ehdr.e_phnum);
35279+ }
35280+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35281+ die("Seek to %d failed: %s\n",
35282+ ehdr.e_phoff, strerror(errno));
35283+ }
35284+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35285+ die("Cannot read ELF program headers: %s\n",
35286+ strerror(errno));
35287+ }
35288+ for(i = 0; i < ehdr.e_phnum; i++) {
35289+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35290+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35291+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35292+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35293+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35294+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35295+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35296+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35297+ }
35298+
35299+}
35300+
35301 static void read_shdrs(FILE *fp)
35302 {
35303- int i;
35304+ unsigned int i;
35305 Elf_Shdr shdr;
35306
35307 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35308@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35309
35310 static void read_strtabs(FILE *fp)
35311 {
35312- int i;
35313+ unsigned int i;
35314 for (i = 0; i < ehdr.e_shnum; i++) {
35315 struct section *sec = &secs[i];
35316 if (sec->shdr.sh_type != SHT_STRTAB) {
35317@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35318
35319 static void read_symtabs(FILE *fp)
35320 {
35321- int i,j;
35322+ unsigned int i,j;
35323 for (i = 0; i < ehdr.e_shnum; i++) {
35324 struct section *sec = &secs[i];
35325 if (sec->shdr.sh_type != SHT_SYMTAB) {
35326@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35327 }
35328
35329
35330-static void read_relocs(FILE *fp)
35331+static void read_relocs(FILE *fp, int use_real_mode)
35332 {
35333- int i,j;
35334+ unsigned int i,j;
35335+ uint32_t base;
35336+
35337 for (i = 0; i < ehdr.e_shnum; i++) {
35338 struct section *sec = &secs[i];
35339 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35340@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35341 die("Cannot read symbol table: %s\n",
35342 strerror(errno));
35343 }
35344+ base = 0;
35345+
35346+#ifdef CONFIG_X86_32
35347+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35348+ if (phdr[j].p_type != PT_LOAD )
35349+ continue;
35350+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35351+ continue;
35352+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35353+ break;
35354+ }
35355+#endif
35356+
35357 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35358 Elf_Rel *rel = &sec->reltab[j];
35359- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35360+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35361 rel->r_info = elf_xword_to_cpu(rel->r_info);
35362 #if (SHT_REL_TYPE == SHT_RELA)
35363 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35364@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35365
35366 static void print_absolute_symbols(void)
35367 {
35368- int i;
35369+ unsigned int i;
35370 const char *format;
35371
35372 if (ELF_BITS == 64)
35373@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35374 for (i = 0; i < ehdr.e_shnum; i++) {
35375 struct section *sec = &secs[i];
35376 char *sym_strtab;
35377- int j;
35378+ unsigned int j;
35379
35380 if (sec->shdr.sh_type != SHT_SYMTAB) {
35381 continue;
35382@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35383
35384 static void print_absolute_relocs(void)
35385 {
35386- int i, printed = 0;
35387+ unsigned int i, printed = 0;
35388 const char *format;
35389
35390 if (ELF_BITS == 64)
35391@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35392 struct section *sec_applies, *sec_symtab;
35393 char *sym_strtab;
35394 Elf_Sym *sh_symtab;
35395- int j;
35396+ unsigned int j;
35397 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35398 continue;
35399 }
35400@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35401 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35402 Elf_Sym *sym, const char *symname))
35403 {
35404- int i;
35405+ unsigned int i;
35406 /* Walk through the relocations */
35407 for (i = 0; i < ehdr.e_shnum; i++) {
35408 char *sym_strtab;
35409 Elf_Sym *sh_symtab;
35410 struct section *sec_applies, *sec_symtab;
35411- int j;
35412+ unsigned int j;
35413 struct section *sec = &secs[i];
35414
35415 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35416@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35417 {
35418 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35419 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35420+ char *sym_strtab = sec->link->link->strtab;
35421+
35422+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35423+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35424+ return 0;
35425+
35426+#ifdef CONFIG_PAX_KERNEXEC
35427+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35428+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35429+ return 0;
35430+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35431+ return 0;
35432+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35433+ return 0;
35434+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35435+ return 0;
35436+#endif
35437
35438 switch (r_type) {
35439 case R_386_NONE:
35440@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35441
35442 static void emit_relocs(int as_text, int use_real_mode)
35443 {
35444- int i;
35445+ unsigned int i;
35446 int (*write_reloc)(uint32_t, FILE *) = write32;
35447 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35448 const char *symname);
35449@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35450 {
35451 regex_init(use_real_mode);
35452 read_ehdr(fp);
35453+ read_phdrs(fp);
35454 read_shdrs(fp);
35455 read_strtabs(fp);
35456 read_symtabs(fp);
35457- read_relocs(fp);
35458+ read_relocs(fp, use_real_mode);
35459 if (ELF_BITS == 64)
35460 percpu_init();
35461 if (show_absolute_syms) {
35462diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35463index f40281e..92728c9 100644
35464--- a/arch/x86/um/mem_32.c
35465+++ b/arch/x86/um/mem_32.c
35466@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35467 gate_vma.vm_start = FIXADDR_USER_START;
35468 gate_vma.vm_end = FIXADDR_USER_END;
35469 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35470- gate_vma.vm_page_prot = __P101;
35471+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35472
35473 return 0;
35474 }
35475diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35476index 80ffa5b..a33bd15 100644
35477--- a/arch/x86/um/tls_32.c
35478+++ b/arch/x86/um/tls_32.c
35479@@ -260,7 +260,7 @@ out:
35480 if (unlikely(task == current &&
35481 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35482 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35483- "without flushed TLS.", current->pid);
35484+ "without flushed TLS.", task_pid_nr(current));
35485 }
35486
35487 return 0;
35488diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35489index 5a4affe..9e2d522 100644
35490--- a/arch/x86/vdso/Makefile
35491+++ b/arch/x86/vdso/Makefile
35492@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
35493 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35494 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35495
35496-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35497+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35498 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35499 GCOV_PROFILE := n
35500
35501diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35502index 0224987..c7d65a5 100644
35503--- a/arch/x86/vdso/vdso2c.h
35504+++ b/arch/x86/vdso/vdso2c.h
35505@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35506 unsigned long load_size = -1; /* Work around bogus warning */
35507 unsigned long mapping_size;
35508 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35509- int i;
35510+ unsigned int i;
35511 unsigned long j;
35512 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35513 *alt_sec = NULL;
35514diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35515index e904c27..b9eaa03 100644
35516--- a/arch/x86/vdso/vdso32-setup.c
35517+++ b/arch/x86/vdso/vdso32-setup.c
35518@@ -14,6 +14,7 @@
35519 #include <asm/cpufeature.h>
35520 #include <asm/processor.h>
35521 #include <asm/vdso.h>
35522+#include <asm/mman.h>
35523
35524 #ifdef CONFIG_COMPAT_VDSO
35525 #define VDSO_DEFAULT 0
35526diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35527index 1c9f750..cfddb1a 100644
35528--- a/arch/x86/vdso/vma.c
35529+++ b/arch/x86/vdso/vma.c
35530@@ -19,10 +19,7 @@
35531 #include <asm/page.h>
35532 #include <asm/hpet.h>
35533 #include <asm/desc.h>
35534-
35535-#if defined(CONFIG_X86_64)
35536-unsigned int __read_mostly vdso64_enabled = 1;
35537-#endif
35538+#include <asm/mman.h>
35539
35540 void __init init_vdso_image(const struct vdso_image *image)
35541 {
35542@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35543 .pages = no_pages,
35544 };
35545
35546+#ifdef CONFIG_PAX_RANDMMAP
35547+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35548+ calculate_addr = false;
35549+#endif
35550+
35551 if (calculate_addr) {
35552 addr = vdso_addr(current->mm->start_stack,
35553 image->size - image->sym_vvar_start);
35554@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35555 down_write(&mm->mmap_sem);
35556
35557 addr = get_unmapped_area(NULL, addr,
35558- image->size - image->sym_vvar_start, 0, 0);
35559+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35560 if (IS_ERR_VALUE(addr)) {
35561 ret = addr;
35562 goto up_fail;
35563 }
35564
35565 text_start = addr - image->sym_vvar_start;
35566- current->mm->context.vdso = (void __user *)text_start;
35567+ mm->context.vdso = text_start;
35568
35569 /*
35570 * MAYWRITE to allow gdb to COW and set breakpoints
35571@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35572 hpet_address >> PAGE_SHIFT,
35573 PAGE_SIZE,
35574 pgprot_noncached(PAGE_READONLY));
35575-
35576- if (ret)
35577- goto up_fail;
35578 }
35579 #endif
35580
35581 up_fail:
35582 if (ret)
35583- current->mm->context.vdso = NULL;
35584+ current->mm->context.vdso = 0;
35585
35586 up_write(&mm->mmap_sem);
35587 return ret;
35588@@ -191,8 +190,8 @@ static int load_vdso32(void)
35589
35590 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35591 current_thread_info()->sysenter_return =
35592- current->mm->context.vdso +
35593- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35594+ (void __force_user *)(current->mm->context.vdso +
35595+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35596
35597 return 0;
35598 }
35599@@ -201,9 +200,6 @@ static int load_vdso32(void)
35600 #ifdef CONFIG_X86_64
35601 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35602 {
35603- if (!vdso64_enabled)
35604- return 0;
35605-
35606 return map_vdso(&vdso_image_64, true);
35607 }
35608
35609@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35610 int uses_interp)
35611 {
35612 #ifdef CONFIG_X86_X32_ABI
35613- if (test_thread_flag(TIF_X32)) {
35614- if (!vdso64_enabled)
35615- return 0;
35616-
35617+ if (test_thread_flag(TIF_X32))
35618 return map_vdso(&vdso_image_x32, true);
35619- }
35620 #endif
35621
35622 return load_vdso32();
35623@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35624 #endif
35625
35626 #ifdef CONFIG_X86_64
35627-static __init int vdso_setup(char *s)
35628-{
35629- vdso64_enabled = simple_strtoul(s, NULL, 0);
35630- return 0;
35631-}
35632-__setup("vdso=", vdso_setup);
35633-#endif
35634-
35635-#ifdef CONFIG_X86_64
35636 static void vgetcpu_cpu_init(void *arg)
35637 {
35638 int cpu = smp_processor_id();
35639diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35640index e88fda8..76ce7ce 100644
35641--- a/arch/x86/xen/Kconfig
35642+++ b/arch/x86/xen/Kconfig
35643@@ -9,6 +9,7 @@ config XEN
35644 select XEN_HAVE_PVMMU
35645 depends on X86_64 || (X86_32 && X86_PAE)
35646 depends on X86_TSC
35647+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35648 help
35649 This is the Linux Xen port. Enabling this will allow the
35650 kernel to boot in a paravirtualized environment under the
35651diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35652index 78a881b..9994bbb 100644
35653--- a/arch/x86/xen/enlighten.c
35654+++ b/arch/x86/xen/enlighten.c
35655@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35656
35657 struct shared_info xen_dummy_shared_info;
35658
35659-void *xen_initial_gdt;
35660-
35661 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35662 __read_mostly int xen_have_vector_callback;
35663 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35664@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35665 {
35666 unsigned long va = dtr->address;
35667 unsigned int size = dtr->size + 1;
35668- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35669- unsigned long frames[pages];
35670+ unsigned long frames[65536 / PAGE_SIZE];
35671 int f;
35672
35673 /*
35674@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35675 {
35676 unsigned long va = dtr->address;
35677 unsigned int size = dtr->size + 1;
35678- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35679- unsigned long frames[pages];
35680+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35681 int f;
35682
35683 /*
35684@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35685 * 8-byte entries, or 16 4k pages..
35686 */
35687
35688- BUG_ON(size > 65536);
35689+ BUG_ON(size > GDT_SIZE);
35690 BUG_ON(va & ~PAGE_MASK);
35691
35692 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35693@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35694 return 0;
35695 }
35696
35697-static void set_xen_basic_apic_ops(void)
35698+static void __init set_xen_basic_apic_ops(void)
35699 {
35700 apic->read = xen_apic_read;
35701 apic->write = xen_apic_write;
35702@@ -1291,30 +1287,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35703 #endif
35704 };
35705
35706-static void xen_reboot(int reason)
35707+static __noreturn void xen_reboot(int reason)
35708 {
35709 struct sched_shutdown r = { .reason = reason };
35710
35711- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35712- BUG();
35713+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35714+ BUG();
35715 }
35716
35717-static void xen_restart(char *msg)
35718+static __noreturn void xen_restart(char *msg)
35719 {
35720 xen_reboot(SHUTDOWN_reboot);
35721 }
35722
35723-static void xen_emergency_restart(void)
35724+static __noreturn void xen_emergency_restart(void)
35725 {
35726 xen_reboot(SHUTDOWN_reboot);
35727 }
35728
35729-static void xen_machine_halt(void)
35730+static __noreturn void xen_machine_halt(void)
35731 {
35732 xen_reboot(SHUTDOWN_poweroff);
35733 }
35734
35735-static void xen_machine_power_off(void)
35736+static __noreturn void xen_machine_power_off(void)
35737 {
35738 if (pm_power_off)
35739 pm_power_off();
35740@@ -1467,8 +1463,11 @@ static void __ref xen_setup_gdt(int cpu)
35741 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35742 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35743
35744- setup_stack_canary_segment(0);
35745- switch_to_new_gdt(0);
35746+ setup_stack_canary_segment(cpu);
35747+#ifdef CONFIG_X86_64
35748+ load_percpu_segment(cpu);
35749+#endif
35750+ switch_to_new_gdt(cpu);
35751
35752 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35753 pv_cpu_ops.load_gdt = xen_load_gdt;
35754@@ -1583,7 +1582,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35755 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35756
35757 /* Work out if we support NX */
35758- x86_configure_nx();
35759+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35760+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35761+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35762+ unsigned l, h;
35763+
35764+ __supported_pte_mask |= _PAGE_NX;
35765+ rdmsr(MSR_EFER, l, h);
35766+ l |= EFER_NX;
35767+ wrmsr(MSR_EFER, l, h);
35768+ }
35769+#endif
35770
35771 /* Get mfn list */
35772 xen_build_dynamic_phys_to_machine();
35773@@ -1611,13 +1620,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35774
35775 machine_ops = xen_machine_ops;
35776
35777- /*
35778- * The only reliable way to retain the initial address of the
35779- * percpu gdt_page is to remember it here, so we can go and
35780- * mark it RW later, when the initial percpu area is freed.
35781- */
35782- xen_initial_gdt = &per_cpu(gdt_page, 0);
35783-
35784 xen_smp_init();
35785
35786 #ifdef CONFIG_ACPI_NUMA
35787diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35788index 5c1f9ac..0e15f5c 100644
35789--- a/arch/x86/xen/mmu.c
35790+++ b/arch/x86/xen/mmu.c
35791@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35792 return val;
35793 }
35794
35795-static pteval_t pte_pfn_to_mfn(pteval_t val)
35796+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35797 {
35798 if (val & _PAGE_PRESENT) {
35799 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35800@@ -1836,7 +1836,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35801 * L3_k[511] -> level2_fixmap_pgt */
35802 convert_pfn_mfn(level3_kernel_pgt);
35803
35804+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35805+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35806+ convert_pfn_mfn(level3_vmemmap_pgt);
35807 /* L3_k[511][506] -> level1_fixmap_pgt */
35808+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35809 convert_pfn_mfn(level2_fixmap_pgt);
35810 }
35811 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35812@@ -1861,11 +1865,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35813 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35814 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35815 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35816+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35817+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35818+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35819 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35820 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35821+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35822 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35823 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35824 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35825+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35826
35827 /* Pin down new L4 */
35828 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35829@@ -2049,6 +2058,7 @@ static void __init xen_post_allocator_init(void)
35830 pv_mmu_ops.set_pud = xen_set_pud;
35831 #if PAGETABLE_LEVELS == 4
35832 pv_mmu_ops.set_pgd = xen_set_pgd;
35833+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35834 #endif
35835
35836 /* This will work as long as patching hasn't happened yet
35837@@ -2127,6 +2137,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35838 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35839 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35840 .set_pgd = xen_set_pgd_hyper,
35841+ .set_pgd_batched = xen_set_pgd_hyper,
35842
35843 .alloc_pud = xen_alloc_pmd_init,
35844 .release_pud = xen_release_pmd_init,
35845diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35846index 4c071ae..00e7049 100644
35847--- a/arch/x86/xen/smp.c
35848+++ b/arch/x86/xen/smp.c
35849@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35850
35851 if (xen_pv_domain()) {
35852 if (!xen_feature(XENFEAT_writable_page_tables))
35853- /* We've switched to the "real" per-cpu gdt, so make
35854- * sure the old memory can be recycled. */
35855- make_lowmem_page_readwrite(xen_initial_gdt);
35856-
35857 #ifdef CONFIG_X86_32
35858 /*
35859 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35860 * expects __USER_DS
35861 */
35862- loadsegment(ds, __USER_DS);
35863- loadsegment(es, __USER_DS);
35864+ loadsegment(ds, __KERNEL_DS);
35865+ loadsegment(es, __KERNEL_DS);
35866 #endif
35867
35868 xen_filter_cpu_maps();
35869@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35870 #ifdef CONFIG_X86_32
35871 /* Note: PVH is not yet supported on x86_32. */
35872 ctxt->user_regs.fs = __KERNEL_PERCPU;
35873- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35874+ savesegment(gs, ctxt->user_regs.gs);
35875 #endif
35876 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
35877
35878@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35879 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
35880 ctxt->flags = VGCF_IN_KERNEL;
35881 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35882- ctxt->user_regs.ds = __USER_DS;
35883- ctxt->user_regs.es = __USER_DS;
35884+ ctxt->user_regs.ds = __KERNEL_DS;
35885+ ctxt->user_regs.es = __KERNEL_DS;
35886 ctxt->user_regs.ss = __KERNEL_DS;
35887
35888 xen_copy_trap_info(ctxt->trap_ctxt);
35889@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35890 int rc;
35891
35892 per_cpu(current_task, cpu) = idle;
35893+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35894 #ifdef CONFIG_X86_32
35895 irq_ctx_init(cpu);
35896 #else
35897 clear_tsk_thread_flag(idle, TIF_FORK);
35898 #endif
35899- per_cpu(kernel_stack, cpu) =
35900- (unsigned long)task_stack_page(idle) -
35901- KERNEL_STACK_OFFSET + THREAD_SIZE;
35902+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
35903
35904 xen_setup_runstate_info(cpu);
35905 xen_setup_timer(cpu);
35906@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35907
35908 void __init xen_smp_init(void)
35909 {
35910- smp_ops = xen_smp_ops;
35911+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35912 xen_fill_possible_map();
35913 }
35914
35915diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35916index fd92a64..1f72641 100644
35917--- a/arch/x86/xen/xen-asm_32.S
35918+++ b/arch/x86/xen/xen-asm_32.S
35919@@ -99,7 +99,7 @@ ENTRY(xen_iret)
35920 pushw %fs
35921 movl $(__KERNEL_PERCPU), %eax
35922 movl %eax, %fs
35923- movl %fs:xen_vcpu, %eax
35924+ mov PER_CPU_VAR(xen_vcpu), %eax
35925 POP_FS
35926 #else
35927 movl %ss:xen_vcpu, %eax
35928diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
35929index 674b2225..f1f5dc1 100644
35930--- a/arch/x86/xen/xen-head.S
35931+++ b/arch/x86/xen/xen-head.S
35932@@ -39,6 +39,17 @@ ENTRY(startup_xen)
35933 #ifdef CONFIG_X86_32
35934 mov %esi,xen_start_info
35935 mov $init_thread_union+THREAD_SIZE,%esp
35936+#ifdef CONFIG_SMP
35937+ movl $cpu_gdt_table,%edi
35938+ movl $__per_cpu_load,%eax
35939+ movw %ax,__KERNEL_PERCPU + 2(%edi)
35940+ rorl $16,%eax
35941+ movb %al,__KERNEL_PERCPU + 4(%edi)
35942+ movb %ah,__KERNEL_PERCPU + 7(%edi)
35943+ movl $__per_cpu_end - 1,%eax
35944+ subl $__per_cpu_start,%eax
35945+ movw %ax,__KERNEL_PERCPU + 0(%edi)
35946+#endif
35947 #else
35948 mov %rsi,xen_start_info
35949 mov $init_thread_union+THREAD_SIZE,%rsp
35950diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
35951index 5686bd9..0c8b6ee 100644
35952--- a/arch/x86/xen/xen-ops.h
35953+++ b/arch/x86/xen/xen-ops.h
35954@@ -10,8 +10,6 @@
35955 extern const char xen_hypervisor_callback[];
35956 extern const char xen_failsafe_callback[];
35957
35958-extern void *xen_initial_gdt;
35959-
35960 struct trap_info;
35961 void xen_copy_trap_info(struct trap_info *traps);
35962
35963diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
35964index 525bd3d..ef888b1 100644
35965--- a/arch/xtensa/variants/dc232b/include/variant/core.h
35966+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
35967@@ -119,9 +119,9 @@
35968 ----------------------------------------------------------------------*/
35969
35970 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
35971-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
35972 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
35973 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
35974+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35975
35976 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
35977 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
35978diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
35979index 2f33760..835e50a 100644
35980--- a/arch/xtensa/variants/fsf/include/variant/core.h
35981+++ b/arch/xtensa/variants/fsf/include/variant/core.h
35982@@ -11,6 +11,7 @@
35983 #ifndef _XTENSA_CORE_H
35984 #define _XTENSA_CORE_H
35985
35986+#include <linux/const.h>
35987
35988 /****************************************************************************
35989 Parameters Useful for Any Code, USER or PRIVILEGED
35990@@ -112,9 +113,9 @@
35991 ----------------------------------------------------------------------*/
35992
35993 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35994-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35995 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35996 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35997+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35998
35999 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36000 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36001diff --git a/block/bio.c b/block/bio.c
36002index 471d738..bd3da0d 100644
36003--- a/block/bio.c
36004+++ b/block/bio.c
36005@@ -1169,7 +1169,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36006 /*
36007 * Overflow, abort
36008 */
36009- if (end < start)
36010+ if (end < start || end - start > INT_MAX - nr_pages)
36011 return ERR_PTR(-EINVAL);
36012
36013 nr_pages += end - start;
36014@@ -1303,7 +1303,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
36015 /*
36016 * Overflow, abort
36017 */
36018- if (end < start)
36019+ if (end < start || end - start > INT_MAX - nr_pages)
36020 return ERR_PTR(-EINVAL);
36021
36022 nr_pages += end - start;
36023@@ -1565,7 +1565,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
36024 const int read = bio_data_dir(bio) == READ;
36025 struct bio_map_data *bmd = bio->bi_private;
36026 int i;
36027- char *p = bmd->sgvecs[0].iov_base;
36028+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
36029
36030 bio_for_each_segment_all(bvec, bio, i) {
36031 char *addr = page_address(bvec->bv_page);
36032diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36033index 0736729..2ec3b48 100644
36034--- a/block/blk-iopoll.c
36035+++ b/block/blk-iopoll.c
36036@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36037 }
36038 EXPORT_SYMBOL(blk_iopoll_complete);
36039
36040-static void blk_iopoll_softirq(struct softirq_action *h)
36041+static __latent_entropy void blk_iopoll_softirq(void)
36042 {
36043 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36044 int rearm = 0, budget = blk_iopoll_budget;
36045diff --git a/block/blk-map.c b/block/blk-map.c
36046index f890d43..97b0482 100644
36047--- a/block/blk-map.c
36048+++ b/block/blk-map.c
36049@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36050 if (!len || !kbuf)
36051 return -EINVAL;
36052
36053- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36054+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36055 if (do_copy)
36056 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36057 else
36058diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36059index 53b1737..08177d2e 100644
36060--- a/block/blk-softirq.c
36061+++ b/block/blk-softirq.c
36062@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36063 * Softirq action handler - move entries to local list and loop over them
36064 * while passing them to the queue registered handler.
36065 */
36066-static void blk_done_softirq(struct softirq_action *h)
36067+static __latent_entropy void blk_done_softirq(void)
36068 {
36069 struct list_head *cpu_list, local_list;
36070
36071diff --git a/block/bsg.c b/block/bsg.c
36072index 276e869..6fe4c61 100644
36073--- a/block/bsg.c
36074+++ b/block/bsg.c
36075@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36076 struct sg_io_v4 *hdr, struct bsg_device *bd,
36077 fmode_t has_write_perm)
36078 {
36079+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36080+ unsigned char *cmdptr;
36081+
36082 if (hdr->request_len > BLK_MAX_CDB) {
36083 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36084 if (!rq->cmd)
36085 return -ENOMEM;
36086- }
36087+ cmdptr = rq->cmd;
36088+ } else
36089+ cmdptr = tmpcmd;
36090
36091- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36092+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36093 hdr->request_len))
36094 return -EFAULT;
36095
36096+ if (cmdptr != rq->cmd)
36097+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36098+
36099 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36100 if (blk_verify_command(rq->cmd, has_write_perm))
36101 return -EPERM;
36102diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36103index f678c73..f35aa18 100644
36104--- a/block/compat_ioctl.c
36105+++ b/block/compat_ioctl.c
36106@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36107 cgc = compat_alloc_user_space(sizeof(*cgc));
36108 cgc32 = compat_ptr(arg);
36109
36110- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36111+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36112 get_user(data, &cgc32->buffer) ||
36113 put_user(compat_ptr(data), &cgc->buffer) ||
36114 copy_in_user(&cgc->buflen, &cgc32->buflen,
36115@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36116 err |= __get_user(f->spec1, &uf->spec1);
36117 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36118 err |= __get_user(name, &uf->name);
36119- f->name = compat_ptr(name);
36120+ f->name = (void __force_kernel *)compat_ptr(name);
36121 if (err) {
36122 err = -EFAULT;
36123 goto out;
36124diff --git a/block/genhd.c b/block/genhd.c
36125index 0a536dc..b8f7aca 100644
36126--- a/block/genhd.c
36127+++ b/block/genhd.c
36128@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36129
36130 /*
36131 * Register device numbers dev..(dev+range-1)
36132- * range must be nonzero
36133+ * Noop if @range is zero.
36134 * The hash chain is sorted on range, so that subranges can override.
36135 */
36136 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36137 struct kobject *(*probe)(dev_t, int *, void *),
36138 int (*lock)(dev_t, void *), void *data)
36139 {
36140- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36141+ if (range)
36142+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36143 }
36144
36145 EXPORT_SYMBOL(blk_register_region);
36146
36147+/* undo blk_register_region(), noop if @range is zero */
36148 void blk_unregister_region(dev_t devt, unsigned long range)
36149 {
36150- kobj_unmap(bdev_map, devt, range);
36151+ if (range)
36152+ kobj_unmap(bdev_map, devt, range);
36153 }
36154
36155 EXPORT_SYMBOL(blk_unregister_region);
36156diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36157index 56d08fd..2e07090 100644
36158--- a/block/partitions/efi.c
36159+++ b/block/partitions/efi.c
36160@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36161 if (!gpt)
36162 return NULL;
36163
36164+ if (!le32_to_cpu(gpt->num_partition_entries))
36165+ return NULL;
36166+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36167+ if (!pte)
36168+ return NULL;
36169+
36170 count = le32_to_cpu(gpt->num_partition_entries) *
36171 le32_to_cpu(gpt->sizeof_partition_entry);
36172- if (!count)
36173- return NULL;
36174- pte = kmalloc(count, GFP_KERNEL);
36175- if (!pte)
36176- return NULL;
36177-
36178 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36179 (u8 *) pte, count) < count) {
36180 kfree(pte);
36181diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36182index 28163fa..07190a06 100644
36183--- a/block/scsi_ioctl.c
36184+++ b/block/scsi_ioctl.c
36185@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36186 return put_user(0, p);
36187 }
36188
36189-static int sg_get_timeout(struct request_queue *q)
36190+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36191 {
36192 return jiffies_to_clock_t(q->sg_timeout);
36193 }
36194@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36195 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36196 struct sg_io_hdr *hdr, fmode_t mode)
36197 {
36198- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36199+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36200+ unsigned char *cmdptr;
36201+
36202+ if (rq->cmd != rq->__cmd)
36203+ cmdptr = rq->cmd;
36204+ else
36205+ cmdptr = tmpcmd;
36206+
36207+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36208 return -EFAULT;
36209+
36210+ if (cmdptr != rq->cmd)
36211+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36212+
36213 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36214 return -EPERM;
36215
36216@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36217 int err;
36218 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36219 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36220+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36221+ unsigned char *cmdptr;
36222
36223 if (!sic)
36224 return -EINVAL;
36225@@ -469,9 +483,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36226 */
36227 err = -EFAULT;
36228 rq->cmd_len = cmdlen;
36229- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36230+
36231+ if (rq->cmd != rq->__cmd)
36232+ cmdptr = rq->cmd;
36233+ else
36234+ cmdptr = tmpcmd;
36235+
36236+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36237 goto error;
36238
36239+ if (rq->cmd != cmdptr)
36240+ memcpy(rq->cmd, cmdptr, cmdlen);
36241+
36242 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36243 goto error;
36244
36245diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36246index 650afac1..f3307de 100644
36247--- a/crypto/cryptd.c
36248+++ b/crypto/cryptd.c
36249@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36250
36251 struct cryptd_blkcipher_request_ctx {
36252 crypto_completion_t complete;
36253-};
36254+} __no_const;
36255
36256 struct cryptd_hash_ctx {
36257 struct crypto_shash *child;
36258@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36259
36260 struct cryptd_aead_request_ctx {
36261 crypto_completion_t complete;
36262-};
36263+} __no_const;
36264
36265 static void cryptd_queue_worker(struct work_struct *work);
36266
36267diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36268index c305d41..a96de79 100644
36269--- a/crypto/pcrypt.c
36270+++ b/crypto/pcrypt.c
36271@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36272 int ret;
36273
36274 pinst->kobj.kset = pcrypt_kset;
36275- ret = kobject_add(&pinst->kobj, NULL, name);
36276+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36277 if (!ret)
36278 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36279
36280diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36281index 6921c7f..78e1af7 100644
36282--- a/drivers/acpi/acpica/hwxfsleep.c
36283+++ b/drivers/acpi/acpica/hwxfsleep.c
36284@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36285 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36286
36287 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36288- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36289- acpi_hw_extended_sleep},
36290- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36291- acpi_hw_extended_wake_prep},
36292- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36293+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36294+ .extended_function = acpi_hw_extended_sleep},
36295+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36296+ .extended_function = acpi_hw_extended_wake_prep},
36297+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36298+ .extended_function = acpi_hw_extended_wake}
36299 };
36300
36301 /*
36302diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36303index 16129c7..8b675cd 100644
36304--- a/drivers/acpi/apei/apei-internal.h
36305+++ b/drivers/acpi/apei/apei-internal.h
36306@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36307 struct apei_exec_ins_type {
36308 u32 flags;
36309 apei_exec_ins_func_t run;
36310-};
36311+} __do_const;
36312
36313 struct apei_exec_context {
36314 u32 ip;
36315diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36316index e82d097..0c855c1 100644
36317--- a/drivers/acpi/apei/ghes.c
36318+++ b/drivers/acpi/apei/ghes.c
36319@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36320 const struct acpi_hest_generic *generic,
36321 const struct acpi_hest_generic_status *estatus)
36322 {
36323- static atomic_t seqno;
36324+ static atomic_unchecked_t seqno;
36325 unsigned int curr_seqno;
36326 char pfx_seq[64];
36327
36328@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36329 else
36330 pfx = KERN_ERR;
36331 }
36332- curr_seqno = atomic_inc_return(&seqno);
36333+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36334 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36335 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36336 pfx_seq, generic->header.source_id);
36337diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36338index a83e3c6..c3d617f 100644
36339--- a/drivers/acpi/bgrt.c
36340+++ b/drivers/acpi/bgrt.c
36341@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36342 if (!bgrt_image)
36343 return -ENODEV;
36344
36345- bin_attr_image.private = bgrt_image;
36346- bin_attr_image.size = bgrt_image_size;
36347+ pax_open_kernel();
36348+ *(void **)&bin_attr_image.private = bgrt_image;
36349+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36350+ pax_close_kernel();
36351
36352 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36353 if (!bgrt_kobj)
36354diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36355index 9b693d5..8953d54 100644
36356--- a/drivers/acpi/blacklist.c
36357+++ b/drivers/acpi/blacklist.c
36358@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36359 u32 is_critical_error;
36360 };
36361
36362-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36363+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36364
36365 /*
36366 * POLICY: If *anything* doesn't work, put it on the blacklist.
36367@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36368 return 0;
36369 }
36370
36371-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36372+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36373 {
36374 .callback = dmi_disable_osi_vista,
36375 .ident = "Fujitsu Siemens",
36376diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36377index c68e724..e863008 100644
36378--- a/drivers/acpi/custom_method.c
36379+++ b/drivers/acpi/custom_method.c
36380@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36381 struct acpi_table_header table;
36382 acpi_status status;
36383
36384+#ifdef CONFIG_GRKERNSEC_KMEM
36385+ return -EPERM;
36386+#endif
36387+
36388 if (!(*ppos)) {
36389 /* parse the table header to get the table length */
36390 if (count <= sizeof(struct acpi_table_header))
36391diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36392index c0d44d3..5ad8f9a 100644
36393--- a/drivers/acpi/device_pm.c
36394+++ b/drivers/acpi/device_pm.c
36395@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36396
36397 #endif /* CONFIG_PM_SLEEP */
36398
36399+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36400+
36401 static struct dev_pm_domain acpi_general_pm_domain = {
36402 .ops = {
36403 #ifdef CONFIG_PM
36404@@ -1043,6 +1045,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36405 #endif
36406 #endif
36407 },
36408+ .detach = acpi_dev_pm_detach
36409 };
36410
36411 /**
36412@@ -1112,7 +1115,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36413 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36414 }
36415
36416- dev->pm_domain->detach = acpi_dev_pm_detach;
36417 return 0;
36418 }
36419 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36420diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36421index 87b704e..2d1d0c1 100644
36422--- a/drivers/acpi/processor_idle.c
36423+++ b/drivers/acpi/processor_idle.c
36424@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36425 {
36426 int i, count = CPUIDLE_DRIVER_STATE_START;
36427 struct acpi_processor_cx *cx;
36428- struct cpuidle_state *state;
36429+ cpuidle_state_no_const *state;
36430 struct cpuidle_driver *drv = &acpi_idle_driver;
36431
36432 if (!pr->flags.power_setup_done)
36433diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36434index 13e577c..cef11ee 100644
36435--- a/drivers/acpi/sysfs.c
36436+++ b/drivers/acpi/sysfs.c
36437@@ -423,11 +423,11 @@ static u32 num_counters;
36438 static struct attribute **all_attrs;
36439 static u32 acpi_gpe_count;
36440
36441-static struct attribute_group interrupt_stats_attr_group = {
36442+static attribute_group_no_const interrupt_stats_attr_group = {
36443 .name = "interrupts",
36444 };
36445
36446-static struct kobj_attribute *counter_attrs;
36447+static kobj_attribute_no_const *counter_attrs;
36448
36449 static void delete_gpe_attr_array(void)
36450 {
36451diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36452index 61a9c07..ea98fa1 100644
36453--- a/drivers/ata/libahci.c
36454+++ b/drivers/ata/libahci.c
36455@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36456 }
36457 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36458
36459-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36460+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36461 struct ata_taskfile *tf, int is_cmd, u16 flags,
36462 unsigned long timeout_msec)
36463 {
36464diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36465index d1a05f9..eb70e10 100644
36466--- a/drivers/ata/libata-core.c
36467+++ b/drivers/ata/libata-core.c
36468@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36469 static void ata_dev_xfermask(struct ata_device *dev);
36470 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36471
36472-atomic_t ata_print_id = ATOMIC_INIT(0);
36473+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36474
36475 struct ata_force_param {
36476 const char *name;
36477@@ -4831,7 +4831,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36478 struct ata_port *ap;
36479 unsigned int tag;
36480
36481- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36482+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36483 ap = qc->ap;
36484
36485 qc->flags = 0;
36486@@ -4847,7 +4847,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36487 struct ata_port *ap;
36488 struct ata_link *link;
36489
36490- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36491+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36492 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36493 ap = qc->ap;
36494 link = qc->dev->link;
36495@@ -5951,6 +5951,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36496 return;
36497
36498 spin_lock(&lock);
36499+ pax_open_kernel();
36500
36501 for (cur = ops->inherits; cur; cur = cur->inherits) {
36502 void **inherit = (void **)cur;
36503@@ -5964,8 +5965,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36504 if (IS_ERR(*pp))
36505 *pp = NULL;
36506
36507- ops->inherits = NULL;
36508+ *(struct ata_port_operations **)&ops->inherits = NULL;
36509
36510+ pax_close_kernel();
36511 spin_unlock(&lock);
36512 }
36513
36514@@ -6161,7 +6163,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36515
36516 /* give ports names and add SCSI hosts */
36517 for (i = 0; i < host->n_ports; i++) {
36518- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36519+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36520 host->ports[i]->local_port_no = i + 1;
36521 }
36522
36523diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36524index 6abd17a..9961bf7 100644
36525--- a/drivers/ata/libata-scsi.c
36526+++ b/drivers/ata/libata-scsi.c
36527@@ -4169,7 +4169,7 @@ int ata_sas_port_init(struct ata_port *ap)
36528
36529 if (rc)
36530 return rc;
36531- ap->print_id = atomic_inc_return(&ata_print_id);
36532+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36533 return 0;
36534 }
36535 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36536diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36537index 5f4e0cc..ff2c347 100644
36538--- a/drivers/ata/libata.h
36539+++ b/drivers/ata/libata.h
36540@@ -53,7 +53,7 @@ enum {
36541 ATA_DNXFER_QUIET = (1 << 31),
36542 };
36543
36544-extern atomic_t ata_print_id;
36545+extern atomic_unchecked_t ata_print_id;
36546 extern int atapi_passthru16;
36547 extern int libata_fua;
36548 extern int libata_noacpi;
36549diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36550index a9b0c82..207d97d 100644
36551--- a/drivers/ata/pata_arasan_cf.c
36552+++ b/drivers/ata/pata_arasan_cf.c
36553@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36554 /* Handle platform specific quirks */
36555 if (quirk) {
36556 if (quirk & CF_BROKEN_PIO) {
36557- ap->ops->set_piomode = NULL;
36558+ pax_open_kernel();
36559+ *(void **)&ap->ops->set_piomode = NULL;
36560+ pax_close_kernel();
36561 ap->pio_mask = 0;
36562 }
36563 if (quirk & CF_BROKEN_MWDMA)
36564diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36565index f9b983a..887b9d8 100644
36566--- a/drivers/atm/adummy.c
36567+++ b/drivers/atm/adummy.c
36568@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36569 vcc->pop(vcc, skb);
36570 else
36571 dev_kfree_skb_any(skb);
36572- atomic_inc(&vcc->stats->tx);
36573+ atomic_inc_unchecked(&vcc->stats->tx);
36574
36575 return 0;
36576 }
36577diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36578index f1a9198..f466a4a 100644
36579--- a/drivers/atm/ambassador.c
36580+++ b/drivers/atm/ambassador.c
36581@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36582 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36583
36584 // VC layer stats
36585- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36586+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36587
36588 // free the descriptor
36589 kfree (tx_descr);
36590@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36591 dump_skb ("<<<", vc, skb);
36592
36593 // VC layer stats
36594- atomic_inc(&atm_vcc->stats->rx);
36595+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36596 __net_timestamp(skb);
36597 // end of our responsibility
36598 atm_vcc->push (atm_vcc, skb);
36599@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36600 } else {
36601 PRINTK (KERN_INFO, "dropped over-size frame");
36602 // should we count this?
36603- atomic_inc(&atm_vcc->stats->rx_drop);
36604+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36605 }
36606
36607 } else {
36608@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36609 }
36610
36611 if (check_area (skb->data, skb->len)) {
36612- atomic_inc(&atm_vcc->stats->tx_err);
36613+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36614 return -ENOMEM; // ?
36615 }
36616
36617diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36618index 480fa6f..947067c 100644
36619--- a/drivers/atm/atmtcp.c
36620+++ b/drivers/atm/atmtcp.c
36621@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36622 if (vcc->pop) vcc->pop(vcc,skb);
36623 else dev_kfree_skb(skb);
36624 if (dev_data) return 0;
36625- atomic_inc(&vcc->stats->tx_err);
36626+ atomic_inc_unchecked(&vcc->stats->tx_err);
36627 return -ENOLINK;
36628 }
36629 size = skb->len+sizeof(struct atmtcp_hdr);
36630@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36631 if (!new_skb) {
36632 if (vcc->pop) vcc->pop(vcc,skb);
36633 else dev_kfree_skb(skb);
36634- atomic_inc(&vcc->stats->tx_err);
36635+ atomic_inc_unchecked(&vcc->stats->tx_err);
36636 return -ENOBUFS;
36637 }
36638 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36639@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36640 if (vcc->pop) vcc->pop(vcc,skb);
36641 else dev_kfree_skb(skb);
36642 out_vcc->push(out_vcc,new_skb);
36643- atomic_inc(&vcc->stats->tx);
36644- atomic_inc(&out_vcc->stats->rx);
36645+ atomic_inc_unchecked(&vcc->stats->tx);
36646+ atomic_inc_unchecked(&out_vcc->stats->rx);
36647 return 0;
36648 }
36649
36650@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36651 read_unlock(&vcc_sklist_lock);
36652 if (!out_vcc) {
36653 result = -EUNATCH;
36654- atomic_inc(&vcc->stats->tx_err);
36655+ atomic_inc_unchecked(&vcc->stats->tx_err);
36656 goto done;
36657 }
36658 skb_pull(skb,sizeof(struct atmtcp_hdr));
36659@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36660 __net_timestamp(new_skb);
36661 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36662 out_vcc->push(out_vcc,new_skb);
36663- atomic_inc(&vcc->stats->tx);
36664- atomic_inc(&out_vcc->stats->rx);
36665+ atomic_inc_unchecked(&vcc->stats->tx);
36666+ atomic_inc_unchecked(&out_vcc->stats->rx);
36667 done:
36668 if (vcc->pop) vcc->pop(vcc,skb);
36669 else dev_kfree_skb(skb);
36670diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36671index c7fab3e..68d0965 100644
36672--- a/drivers/atm/eni.c
36673+++ b/drivers/atm/eni.c
36674@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36675 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36676 vcc->dev->number);
36677 length = 0;
36678- atomic_inc(&vcc->stats->rx_err);
36679+ atomic_inc_unchecked(&vcc->stats->rx_err);
36680 }
36681 else {
36682 length = ATM_CELL_SIZE-1; /* no HEC */
36683@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36684 size);
36685 }
36686 eff = length = 0;
36687- atomic_inc(&vcc->stats->rx_err);
36688+ atomic_inc_unchecked(&vcc->stats->rx_err);
36689 }
36690 else {
36691 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36692@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36693 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36694 vcc->dev->number,vcc->vci,length,size << 2,descr);
36695 length = eff = 0;
36696- atomic_inc(&vcc->stats->rx_err);
36697+ atomic_inc_unchecked(&vcc->stats->rx_err);
36698 }
36699 }
36700 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36701@@ -770,7 +770,7 @@ rx_dequeued++;
36702 vcc->push(vcc,skb);
36703 pushed++;
36704 }
36705- atomic_inc(&vcc->stats->rx);
36706+ atomic_inc_unchecked(&vcc->stats->rx);
36707 }
36708 wake_up(&eni_dev->rx_wait);
36709 }
36710@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36711 PCI_DMA_TODEVICE);
36712 if (vcc->pop) vcc->pop(vcc,skb);
36713 else dev_kfree_skb_irq(skb);
36714- atomic_inc(&vcc->stats->tx);
36715+ atomic_inc_unchecked(&vcc->stats->tx);
36716 wake_up(&eni_dev->tx_wait);
36717 dma_complete++;
36718 }
36719diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36720index 82f2ae0..f205c02 100644
36721--- a/drivers/atm/firestream.c
36722+++ b/drivers/atm/firestream.c
36723@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36724 }
36725 }
36726
36727- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36728+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36729
36730 fs_dprintk (FS_DEBUG_TXMEM, "i");
36731 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36732@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36733 #endif
36734 skb_put (skb, qe->p1 & 0xffff);
36735 ATM_SKB(skb)->vcc = atm_vcc;
36736- atomic_inc(&atm_vcc->stats->rx);
36737+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36738 __net_timestamp(skb);
36739 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36740 atm_vcc->push (atm_vcc, skb);
36741@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36742 kfree (pe);
36743 }
36744 if (atm_vcc)
36745- atomic_inc(&atm_vcc->stats->rx_drop);
36746+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36747 break;
36748 case 0x1f: /* Reassembly abort: no buffers. */
36749 /* Silently increment error counter. */
36750 if (atm_vcc)
36751- atomic_inc(&atm_vcc->stats->rx_drop);
36752+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36753 break;
36754 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36755 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36756diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36757index d5d9eaf..65c0d53 100644
36758--- a/drivers/atm/fore200e.c
36759+++ b/drivers/atm/fore200e.c
36760@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36761 #endif
36762 /* check error condition */
36763 if (*entry->status & STATUS_ERROR)
36764- atomic_inc(&vcc->stats->tx_err);
36765+ atomic_inc_unchecked(&vcc->stats->tx_err);
36766 else
36767- atomic_inc(&vcc->stats->tx);
36768+ atomic_inc_unchecked(&vcc->stats->tx);
36769 }
36770 }
36771
36772@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36773 if (skb == NULL) {
36774 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36775
36776- atomic_inc(&vcc->stats->rx_drop);
36777+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36778 return -ENOMEM;
36779 }
36780
36781@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36782
36783 dev_kfree_skb_any(skb);
36784
36785- atomic_inc(&vcc->stats->rx_drop);
36786+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36787 return -ENOMEM;
36788 }
36789
36790 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36791
36792 vcc->push(vcc, skb);
36793- atomic_inc(&vcc->stats->rx);
36794+ atomic_inc_unchecked(&vcc->stats->rx);
36795
36796 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36797
36798@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36799 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36800 fore200e->atm_dev->number,
36801 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36802- atomic_inc(&vcc->stats->rx_err);
36803+ atomic_inc_unchecked(&vcc->stats->rx_err);
36804 }
36805 }
36806
36807@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36808 goto retry_here;
36809 }
36810
36811- atomic_inc(&vcc->stats->tx_err);
36812+ atomic_inc_unchecked(&vcc->stats->tx_err);
36813
36814 fore200e->tx_sat++;
36815 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36816diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36817index c39702b..785b73b 100644
36818--- a/drivers/atm/he.c
36819+++ b/drivers/atm/he.c
36820@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36821
36822 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36823 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36824- atomic_inc(&vcc->stats->rx_drop);
36825+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36826 goto return_host_buffers;
36827 }
36828
36829@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36830 RBRQ_LEN_ERR(he_dev->rbrq_head)
36831 ? "LEN_ERR" : "",
36832 vcc->vpi, vcc->vci);
36833- atomic_inc(&vcc->stats->rx_err);
36834+ atomic_inc_unchecked(&vcc->stats->rx_err);
36835 goto return_host_buffers;
36836 }
36837
36838@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36839 vcc->push(vcc, skb);
36840 spin_lock(&he_dev->global_lock);
36841
36842- atomic_inc(&vcc->stats->rx);
36843+ atomic_inc_unchecked(&vcc->stats->rx);
36844
36845 return_host_buffers:
36846 ++pdus_assembled;
36847@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
36848 tpd->vcc->pop(tpd->vcc, tpd->skb);
36849 else
36850 dev_kfree_skb_any(tpd->skb);
36851- atomic_inc(&tpd->vcc->stats->tx_err);
36852+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
36853 }
36854 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
36855 return;
36856@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36857 vcc->pop(vcc, skb);
36858 else
36859 dev_kfree_skb_any(skb);
36860- atomic_inc(&vcc->stats->tx_err);
36861+ atomic_inc_unchecked(&vcc->stats->tx_err);
36862 return -EINVAL;
36863 }
36864
36865@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36866 vcc->pop(vcc, skb);
36867 else
36868 dev_kfree_skb_any(skb);
36869- atomic_inc(&vcc->stats->tx_err);
36870+ atomic_inc_unchecked(&vcc->stats->tx_err);
36871 return -EINVAL;
36872 }
36873 #endif
36874@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36875 vcc->pop(vcc, skb);
36876 else
36877 dev_kfree_skb_any(skb);
36878- atomic_inc(&vcc->stats->tx_err);
36879+ atomic_inc_unchecked(&vcc->stats->tx_err);
36880 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36881 return -ENOMEM;
36882 }
36883@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36884 vcc->pop(vcc, skb);
36885 else
36886 dev_kfree_skb_any(skb);
36887- atomic_inc(&vcc->stats->tx_err);
36888+ atomic_inc_unchecked(&vcc->stats->tx_err);
36889 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36890 return -ENOMEM;
36891 }
36892@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36893 __enqueue_tpd(he_dev, tpd, cid);
36894 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36895
36896- atomic_inc(&vcc->stats->tx);
36897+ atomic_inc_unchecked(&vcc->stats->tx);
36898
36899 return 0;
36900 }
36901diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
36902index 1dc0519..1aadaf7 100644
36903--- a/drivers/atm/horizon.c
36904+++ b/drivers/atm/horizon.c
36905@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
36906 {
36907 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
36908 // VC layer stats
36909- atomic_inc(&vcc->stats->rx);
36910+ atomic_inc_unchecked(&vcc->stats->rx);
36911 __net_timestamp(skb);
36912 // end of our responsibility
36913 vcc->push (vcc, skb);
36914@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
36915 dev->tx_iovec = NULL;
36916
36917 // VC layer stats
36918- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36919+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36920
36921 // free the skb
36922 hrz_kfree_skb (skb);
36923diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
36924index 2b24ed0..b3d6acc 100644
36925--- a/drivers/atm/idt77252.c
36926+++ b/drivers/atm/idt77252.c
36927@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
36928 else
36929 dev_kfree_skb(skb);
36930
36931- atomic_inc(&vcc->stats->tx);
36932+ atomic_inc_unchecked(&vcc->stats->tx);
36933 }
36934
36935 atomic_dec(&scq->used);
36936@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36937 if ((sb = dev_alloc_skb(64)) == NULL) {
36938 printk("%s: Can't allocate buffers for aal0.\n",
36939 card->name);
36940- atomic_add(i, &vcc->stats->rx_drop);
36941+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36942 break;
36943 }
36944 if (!atm_charge(vcc, sb->truesize)) {
36945 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
36946 card->name);
36947- atomic_add(i - 1, &vcc->stats->rx_drop);
36948+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
36949 dev_kfree_skb(sb);
36950 break;
36951 }
36952@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36953 ATM_SKB(sb)->vcc = vcc;
36954 __net_timestamp(sb);
36955 vcc->push(vcc, sb);
36956- atomic_inc(&vcc->stats->rx);
36957+ atomic_inc_unchecked(&vcc->stats->rx);
36958
36959 cell += ATM_CELL_PAYLOAD;
36960 }
36961@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36962 "(CDC: %08x)\n",
36963 card->name, len, rpp->len, readl(SAR_REG_CDC));
36964 recycle_rx_pool_skb(card, rpp);
36965- atomic_inc(&vcc->stats->rx_err);
36966+ atomic_inc_unchecked(&vcc->stats->rx_err);
36967 return;
36968 }
36969 if (stat & SAR_RSQE_CRC) {
36970 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
36971 recycle_rx_pool_skb(card, rpp);
36972- atomic_inc(&vcc->stats->rx_err);
36973+ atomic_inc_unchecked(&vcc->stats->rx_err);
36974 return;
36975 }
36976 if (skb_queue_len(&rpp->queue) > 1) {
36977@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36978 RXPRINTK("%s: Can't alloc RX skb.\n",
36979 card->name);
36980 recycle_rx_pool_skb(card, rpp);
36981- atomic_inc(&vcc->stats->rx_err);
36982+ atomic_inc_unchecked(&vcc->stats->rx_err);
36983 return;
36984 }
36985 if (!atm_charge(vcc, skb->truesize)) {
36986@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36987 __net_timestamp(skb);
36988
36989 vcc->push(vcc, skb);
36990- atomic_inc(&vcc->stats->rx);
36991+ atomic_inc_unchecked(&vcc->stats->rx);
36992
36993 return;
36994 }
36995@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36996 __net_timestamp(skb);
36997
36998 vcc->push(vcc, skb);
36999- atomic_inc(&vcc->stats->rx);
37000+ atomic_inc_unchecked(&vcc->stats->rx);
37001
37002 if (skb->truesize > SAR_FB_SIZE_3)
37003 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37004@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37005 if (vcc->qos.aal != ATM_AAL0) {
37006 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37007 card->name, vpi, vci);
37008- atomic_inc(&vcc->stats->rx_drop);
37009+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37010 goto drop;
37011 }
37012
37013 if ((sb = dev_alloc_skb(64)) == NULL) {
37014 printk("%s: Can't allocate buffers for AAL0.\n",
37015 card->name);
37016- atomic_inc(&vcc->stats->rx_err);
37017+ atomic_inc_unchecked(&vcc->stats->rx_err);
37018 goto drop;
37019 }
37020
37021@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37022 ATM_SKB(sb)->vcc = vcc;
37023 __net_timestamp(sb);
37024 vcc->push(vcc, sb);
37025- atomic_inc(&vcc->stats->rx);
37026+ atomic_inc_unchecked(&vcc->stats->rx);
37027
37028 drop:
37029 skb_pull(queue, 64);
37030@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37031
37032 if (vc == NULL) {
37033 printk("%s: NULL connection in send().\n", card->name);
37034- atomic_inc(&vcc->stats->tx_err);
37035+ atomic_inc_unchecked(&vcc->stats->tx_err);
37036 dev_kfree_skb(skb);
37037 return -EINVAL;
37038 }
37039 if (!test_bit(VCF_TX, &vc->flags)) {
37040 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37041- atomic_inc(&vcc->stats->tx_err);
37042+ atomic_inc_unchecked(&vcc->stats->tx_err);
37043 dev_kfree_skb(skb);
37044 return -EINVAL;
37045 }
37046@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37047 break;
37048 default:
37049 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37050- atomic_inc(&vcc->stats->tx_err);
37051+ atomic_inc_unchecked(&vcc->stats->tx_err);
37052 dev_kfree_skb(skb);
37053 return -EINVAL;
37054 }
37055
37056 if (skb_shinfo(skb)->nr_frags != 0) {
37057 printk("%s: No scatter-gather yet.\n", card->name);
37058- atomic_inc(&vcc->stats->tx_err);
37059+ atomic_inc_unchecked(&vcc->stats->tx_err);
37060 dev_kfree_skb(skb);
37061 return -EINVAL;
37062 }
37063@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37064
37065 err = queue_skb(card, vc, skb, oam);
37066 if (err) {
37067- atomic_inc(&vcc->stats->tx_err);
37068+ atomic_inc_unchecked(&vcc->stats->tx_err);
37069 dev_kfree_skb(skb);
37070 return err;
37071 }
37072@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37073 skb = dev_alloc_skb(64);
37074 if (!skb) {
37075 printk("%s: Out of memory in send_oam().\n", card->name);
37076- atomic_inc(&vcc->stats->tx_err);
37077+ atomic_inc_unchecked(&vcc->stats->tx_err);
37078 return -ENOMEM;
37079 }
37080 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37081diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37082index 4217f29..88f547a 100644
37083--- a/drivers/atm/iphase.c
37084+++ b/drivers/atm/iphase.c
37085@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37086 status = (u_short) (buf_desc_ptr->desc_mode);
37087 if (status & (RX_CER | RX_PTE | RX_OFL))
37088 {
37089- atomic_inc(&vcc->stats->rx_err);
37090+ atomic_inc_unchecked(&vcc->stats->rx_err);
37091 IF_ERR(printk("IA: bad packet, dropping it");)
37092 if (status & RX_CER) {
37093 IF_ERR(printk(" cause: packet CRC error\n");)
37094@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37095 len = dma_addr - buf_addr;
37096 if (len > iadev->rx_buf_sz) {
37097 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37098- atomic_inc(&vcc->stats->rx_err);
37099+ atomic_inc_unchecked(&vcc->stats->rx_err);
37100 goto out_free_desc;
37101 }
37102
37103@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37104 ia_vcc = INPH_IA_VCC(vcc);
37105 if (ia_vcc == NULL)
37106 {
37107- atomic_inc(&vcc->stats->rx_err);
37108+ atomic_inc_unchecked(&vcc->stats->rx_err);
37109 atm_return(vcc, skb->truesize);
37110 dev_kfree_skb_any(skb);
37111 goto INCR_DLE;
37112@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37113 if ((length > iadev->rx_buf_sz) || (length >
37114 (skb->len - sizeof(struct cpcs_trailer))))
37115 {
37116- atomic_inc(&vcc->stats->rx_err);
37117+ atomic_inc_unchecked(&vcc->stats->rx_err);
37118 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37119 length, skb->len);)
37120 atm_return(vcc, skb->truesize);
37121@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37122
37123 IF_RX(printk("rx_dle_intr: skb push");)
37124 vcc->push(vcc,skb);
37125- atomic_inc(&vcc->stats->rx);
37126+ atomic_inc_unchecked(&vcc->stats->rx);
37127 iadev->rx_pkt_cnt++;
37128 }
37129 INCR_DLE:
37130@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37131 {
37132 struct k_sonet_stats *stats;
37133 stats = &PRIV(_ia_dev[board])->sonet_stats;
37134- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37135- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37136- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37137- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37138- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37139- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37140- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37141- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37142- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37143+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37144+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37145+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37146+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37147+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37148+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37149+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37150+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37151+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37152 }
37153 ia_cmds.status = 0;
37154 break;
37155@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37156 if ((desc == 0) || (desc > iadev->num_tx_desc))
37157 {
37158 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37159- atomic_inc(&vcc->stats->tx);
37160+ atomic_inc_unchecked(&vcc->stats->tx);
37161 if (vcc->pop)
37162 vcc->pop(vcc, skb);
37163 else
37164@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37165 ATM_DESC(skb) = vcc->vci;
37166 skb_queue_tail(&iadev->tx_dma_q, skb);
37167
37168- atomic_inc(&vcc->stats->tx);
37169+ atomic_inc_unchecked(&vcc->stats->tx);
37170 iadev->tx_pkt_cnt++;
37171 /* Increment transaction counter */
37172 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37173
37174 #if 0
37175 /* add flow control logic */
37176- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37177+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37178 if (iavcc->vc_desc_cnt > 10) {
37179 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37180 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37181diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37182index 93eaf8d..b4ca7da 100644
37183--- a/drivers/atm/lanai.c
37184+++ b/drivers/atm/lanai.c
37185@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37186 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37187 lanai_endtx(lanai, lvcc);
37188 lanai_free_skb(lvcc->tx.atmvcc, skb);
37189- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37190+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37191 }
37192
37193 /* Try to fill the buffer - don't call unless there is backlog */
37194@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37195 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37196 __net_timestamp(skb);
37197 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37198- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37199+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37200 out:
37201 lvcc->rx.buf.ptr = end;
37202 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37203@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37204 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37205 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37206 lanai->stats.service_rxnotaal5++;
37207- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37208+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37209 return 0;
37210 }
37211 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37212@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37213 int bytes;
37214 read_unlock(&vcc_sklist_lock);
37215 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37216- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37217+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37218 lvcc->stats.x.aal5.service_trash++;
37219 bytes = (SERVICE_GET_END(s) * 16) -
37220 (((unsigned long) lvcc->rx.buf.ptr) -
37221@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37222 }
37223 if (s & SERVICE_STREAM) {
37224 read_unlock(&vcc_sklist_lock);
37225- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37226+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37227 lvcc->stats.x.aal5.service_stream++;
37228 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37229 "PDU on VCI %d!\n", lanai->number, vci);
37230@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37231 return 0;
37232 }
37233 DPRINTK("got rx crc error on vci %d\n", vci);
37234- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37235+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37236 lvcc->stats.x.aal5.service_rxcrc++;
37237 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37238 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37239diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37240index 9988ac9..7c52585 100644
37241--- a/drivers/atm/nicstar.c
37242+++ b/drivers/atm/nicstar.c
37243@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37244 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37245 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37246 card->index);
37247- atomic_inc(&vcc->stats->tx_err);
37248+ atomic_inc_unchecked(&vcc->stats->tx_err);
37249 dev_kfree_skb_any(skb);
37250 return -EINVAL;
37251 }
37252@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37253 if (!vc->tx) {
37254 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37255 card->index);
37256- atomic_inc(&vcc->stats->tx_err);
37257+ atomic_inc_unchecked(&vcc->stats->tx_err);
37258 dev_kfree_skb_any(skb);
37259 return -EINVAL;
37260 }
37261@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37262 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37263 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37264 card->index);
37265- atomic_inc(&vcc->stats->tx_err);
37266+ atomic_inc_unchecked(&vcc->stats->tx_err);
37267 dev_kfree_skb_any(skb);
37268 return -EINVAL;
37269 }
37270
37271 if (skb_shinfo(skb)->nr_frags != 0) {
37272 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37273- atomic_inc(&vcc->stats->tx_err);
37274+ atomic_inc_unchecked(&vcc->stats->tx_err);
37275 dev_kfree_skb_any(skb);
37276 return -EINVAL;
37277 }
37278@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37279 }
37280
37281 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37282- atomic_inc(&vcc->stats->tx_err);
37283+ atomic_inc_unchecked(&vcc->stats->tx_err);
37284 dev_kfree_skb_any(skb);
37285 return -EIO;
37286 }
37287- atomic_inc(&vcc->stats->tx);
37288+ atomic_inc_unchecked(&vcc->stats->tx);
37289
37290 return 0;
37291 }
37292@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37293 printk
37294 ("nicstar%d: Can't allocate buffers for aal0.\n",
37295 card->index);
37296- atomic_add(i, &vcc->stats->rx_drop);
37297+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37298 break;
37299 }
37300 if (!atm_charge(vcc, sb->truesize)) {
37301 RXPRINTK
37302 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37303 card->index);
37304- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37305+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37306 dev_kfree_skb_any(sb);
37307 break;
37308 }
37309@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37310 ATM_SKB(sb)->vcc = vcc;
37311 __net_timestamp(sb);
37312 vcc->push(vcc, sb);
37313- atomic_inc(&vcc->stats->rx);
37314+ atomic_inc_unchecked(&vcc->stats->rx);
37315 cell += ATM_CELL_PAYLOAD;
37316 }
37317
37318@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37319 if (iovb == NULL) {
37320 printk("nicstar%d: Out of iovec buffers.\n",
37321 card->index);
37322- atomic_inc(&vcc->stats->rx_drop);
37323+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37324 recycle_rx_buf(card, skb);
37325 return;
37326 }
37327@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37328 small or large buffer itself. */
37329 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37330 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37331- atomic_inc(&vcc->stats->rx_err);
37332+ atomic_inc_unchecked(&vcc->stats->rx_err);
37333 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37334 NS_MAX_IOVECS);
37335 NS_PRV_IOVCNT(iovb) = 0;
37336@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37337 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37338 card->index);
37339 which_list(card, skb);
37340- atomic_inc(&vcc->stats->rx_err);
37341+ atomic_inc_unchecked(&vcc->stats->rx_err);
37342 recycle_rx_buf(card, skb);
37343 vc->rx_iov = NULL;
37344 recycle_iov_buf(card, iovb);
37345@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37346 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37347 card->index);
37348 which_list(card, skb);
37349- atomic_inc(&vcc->stats->rx_err);
37350+ atomic_inc_unchecked(&vcc->stats->rx_err);
37351 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37352 NS_PRV_IOVCNT(iovb));
37353 vc->rx_iov = NULL;
37354@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37355 printk(" - PDU size mismatch.\n");
37356 else
37357 printk(".\n");
37358- atomic_inc(&vcc->stats->rx_err);
37359+ atomic_inc_unchecked(&vcc->stats->rx_err);
37360 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37361 NS_PRV_IOVCNT(iovb));
37362 vc->rx_iov = NULL;
37363@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37364 /* skb points to a small buffer */
37365 if (!atm_charge(vcc, skb->truesize)) {
37366 push_rxbufs(card, skb);
37367- atomic_inc(&vcc->stats->rx_drop);
37368+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37369 } else {
37370 skb_put(skb, len);
37371 dequeue_sm_buf(card, skb);
37372@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37373 ATM_SKB(skb)->vcc = vcc;
37374 __net_timestamp(skb);
37375 vcc->push(vcc, skb);
37376- atomic_inc(&vcc->stats->rx);
37377+ atomic_inc_unchecked(&vcc->stats->rx);
37378 }
37379 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37380 struct sk_buff *sb;
37381@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37382 if (len <= NS_SMBUFSIZE) {
37383 if (!atm_charge(vcc, sb->truesize)) {
37384 push_rxbufs(card, sb);
37385- atomic_inc(&vcc->stats->rx_drop);
37386+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37387 } else {
37388 skb_put(sb, len);
37389 dequeue_sm_buf(card, sb);
37390@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37391 ATM_SKB(sb)->vcc = vcc;
37392 __net_timestamp(sb);
37393 vcc->push(vcc, sb);
37394- atomic_inc(&vcc->stats->rx);
37395+ atomic_inc_unchecked(&vcc->stats->rx);
37396 }
37397
37398 push_rxbufs(card, skb);
37399@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37400
37401 if (!atm_charge(vcc, skb->truesize)) {
37402 push_rxbufs(card, skb);
37403- atomic_inc(&vcc->stats->rx_drop);
37404+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37405 } else {
37406 dequeue_lg_buf(card, skb);
37407 #ifdef NS_USE_DESTRUCTORS
37408@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37409 ATM_SKB(skb)->vcc = vcc;
37410 __net_timestamp(skb);
37411 vcc->push(vcc, skb);
37412- atomic_inc(&vcc->stats->rx);
37413+ atomic_inc_unchecked(&vcc->stats->rx);
37414 }
37415
37416 push_rxbufs(card, sb);
37417@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37418 printk
37419 ("nicstar%d: Out of huge buffers.\n",
37420 card->index);
37421- atomic_inc(&vcc->stats->rx_drop);
37422+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37423 recycle_iovec_rx_bufs(card,
37424 (struct iovec *)
37425 iovb->data,
37426@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37427 card->hbpool.count++;
37428 } else
37429 dev_kfree_skb_any(hb);
37430- atomic_inc(&vcc->stats->rx_drop);
37431+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37432 } else {
37433 /* Copy the small buffer to the huge buffer */
37434 sb = (struct sk_buff *)iov->iov_base;
37435@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37436 #endif /* NS_USE_DESTRUCTORS */
37437 __net_timestamp(hb);
37438 vcc->push(vcc, hb);
37439- atomic_inc(&vcc->stats->rx);
37440+ atomic_inc_unchecked(&vcc->stats->rx);
37441 }
37442 }
37443
37444diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37445index 21b0bc6..b5f40ba 100644
37446--- a/drivers/atm/solos-pci.c
37447+++ b/drivers/atm/solos-pci.c
37448@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37449 }
37450 atm_charge(vcc, skb->truesize);
37451 vcc->push(vcc, skb);
37452- atomic_inc(&vcc->stats->rx);
37453+ atomic_inc_unchecked(&vcc->stats->rx);
37454 break;
37455
37456 case PKT_STATUS:
37457@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37458 vcc = SKB_CB(oldskb)->vcc;
37459
37460 if (vcc) {
37461- atomic_inc(&vcc->stats->tx);
37462+ atomic_inc_unchecked(&vcc->stats->tx);
37463 solos_pop(vcc, oldskb);
37464 } else {
37465 dev_kfree_skb_irq(oldskb);
37466diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37467index 0215934..ce9f5b1 100644
37468--- a/drivers/atm/suni.c
37469+++ b/drivers/atm/suni.c
37470@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37471
37472
37473 #define ADD_LIMITED(s,v) \
37474- atomic_add((v),&stats->s); \
37475- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37476+ atomic_add_unchecked((v),&stats->s); \
37477+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37478
37479
37480 static void suni_hz(unsigned long from_timer)
37481diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37482index 5120a96..e2572bd 100644
37483--- a/drivers/atm/uPD98402.c
37484+++ b/drivers/atm/uPD98402.c
37485@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37486 struct sonet_stats tmp;
37487 int error = 0;
37488
37489- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37490+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37491 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37492 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37493 if (zero && !error) {
37494@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37495
37496
37497 #define ADD_LIMITED(s,v) \
37498- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37499- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37500- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37501+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37502+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37503+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37504
37505
37506 static void stat_event(struct atm_dev *dev)
37507@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37508 if (reason & uPD98402_INT_PFM) stat_event(dev);
37509 if (reason & uPD98402_INT_PCO) {
37510 (void) GET(PCOCR); /* clear interrupt cause */
37511- atomic_add(GET(HECCT),
37512+ atomic_add_unchecked(GET(HECCT),
37513 &PRIV(dev)->sonet_stats.uncorr_hcs);
37514 }
37515 if ((reason & uPD98402_INT_RFO) &&
37516@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37517 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37518 uPD98402_INT_LOS),PIMR); /* enable them */
37519 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37520- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37521- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37522- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37523+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37524+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37525+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37526 return 0;
37527 }
37528
37529diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37530index 969c3c2..9b72956 100644
37531--- a/drivers/atm/zatm.c
37532+++ b/drivers/atm/zatm.c
37533@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37534 }
37535 if (!size) {
37536 dev_kfree_skb_irq(skb);
37537- if (vcc) atomic_inc(&vcc->stats->rx_err);
37538+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37539 continue;
37540 }
37541 if (!atm_charge(vcc,skb->truesize)) {
37542@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37543 skb->len = size;
37544 ATM_SKB(skb)->vcc = vcc;
37545 vcc->push(vcc,skb);
37546- atomic_inc(&vcc->stats->rx);
37547+ atomic_inc_unchecked(&vcc->stats->rx);
37548 }
37549 zout(pos & 0xffff,MTA(mbx));
37550 #if 0 /* probably a stupid idea */
37551@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37552 skb_queue_head(&zatm_vcc->backlog,skb);
37553 break;
37554 }
37555- atomic_inc(&vcc->stats->tx);
37556+ atomic_inc_unchecked(&vcc->stats->tx);
37557 wake_up(&zatm_vcc->tx_wait);
37558 }
37559
37560diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37561index 876bae5..8978785 100644
37562--- a/drivers/base/bus.c
37563+++ b/drivers/base/bus.c
37564@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37565 return -EINVAL;
37566
37567 mutex_lock(&subsys->p->mutex);
37568- list_add_tail(&sif->node, &subsys->p->interfaces);
37569+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37570 if (sif->add_dev) {
37571 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37572 while ((dev = subsys_dev_iter_next(&iter)))
37573@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37574 subsys = sif->subsys;
37575
37576 mutex_lock(&subsys->p->mutex);
37577- list_del_init(&sif->node);
37578+ pax_list_del_init((struct list_head *)&sif->node);
37579 if (sif->remove_dev) {
37580 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37581 while ((dev = subsys_dev_iter_next(&iter)))
37582diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37583index 25798db..15f130e 100644
37584--- a/drivers/base/devtmpfs.c
37585+++ b/drivers/base/devtmpfs.c
37586@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37587 if (!thread)
37588 return 0;
37589
37590- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37591+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37592 if (err)
37593 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37594 else
37595@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37596 *err = sys_unshare(CLONE_NEWNS);
37597 if (*err)
37598 goto out;
37599- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37600+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37601 if (*err)
37602 goto out;
37603- sys_chdir("/.."); /* will traverse into overmounted root */
37604- sys_chroot(".");
37605+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37606+ sys_chroot((char __force_user *)".");
37607 complete(&setup_done);
37608 while (1) {
37609 spin_lock(&req_lock);
37610diff --git a/drivers/base/node.c b/drivers/base/node.c
37611index a3b82e9..f90a8ce 100644
37612--- a/drivers/base/node.c
37613+++ b/drivers/base/node.c
37614@@ -614,7 +614,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37615 struct node_attr {
37616 struct device_attribute attr;
37617 enum node_states state;
37618-};
37619+} __do_const;
37620
37621 static ssize_t show_node_state(struct device *dev,
37622 struct device_attribute *attr, char *buf)
37623diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37624index 0d8780c..0b5df3f 100644
37625--- a/drivers/base/power/domain.c
37626+++ b/drivers/base/power/domain.c
37627@@ -1725,7 +1725,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37628 {
37629 struct cpuidle_driver *cpuidle_drv;
37630 struct gpd_cpuidle_data *cpuidle_data;
37631- struct cpuidle_state *idle_state;
37632+ cpuidle_state_no_const *idle_state;
37633 int ret = 0;
37634
37635 if (IS_ERR_OR_NULL(genpd) || state < 0)
37636@@ -1793,7 +1793,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37637 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37638 {
37639 struct gpd_cpuidle_data *cpuidle_data;
37640- struct cpuidle_state *idle_state;
37641+ cpuidle_state_no_const *idle_state;
37642 int ret = 0;
37643
37644 if (IS_ERR_OR_NULL(genpd))
37645@@ -2222,7 +2222,10 @@ int genpd_dev_pm_attach(struct device *dev)
37646 return ret;
37647 }
37648
37649- dev->pm_domain->detach = genpd_dev_pm_detach;
37650+ pax_open_kernel();
37651+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37652+ pax_close_kernel();
37653+
37654 pm_genpd_poweron(pd);
37655
37656 return 0;
37657diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37658index d2be3f9..0a3167a 100644
37659--- a/drivers/base/power/sysfs.c
37660+++ b/drivers/base/power/sysfs.c
37661@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37662 return -EIO;
37663 }
37664 }
37665- return sprintf(buf, p);
37666+ return sprintf(buf, "%s", p);
37667 }
37668
37669 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37670diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37671index c2744b3..08fac19 100644
37672--- a/drivers/base/power/wakeup.c
37673+++ b/drivers/base/power/wakeup.c
37674@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37675 * They need to be modified together atomically, so it's better to use one
37676 * atomic variable to hold them both.
37677 */
37678-static atomic_t combined_event_count = ATOMIC_INIT(0);
37679+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37680
37681 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37682 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37683
37684 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37685 {
37686- unsigned int comb = atomic_read(&combined_event_count);
37687+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37688
37689 *cnt = (comb >> IN_PROGRESS_BITS);
37690 *inpr = comb & MAX_IN_PROGRESS;
37691@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37692 ws->start_prevent_time = ws->last_time;
37693
37694 /* Increment the counter of events in progress. */
37695- cec = atomic_inc_return(&combined_event_count);
37696+ cec = atomic_inc_return_unchecked(&combined_event_count);
37697
37698 trace_wakeup_source_activate(ws->name, cec);
37699 }
37700@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37701 * Increment the counter of registered wakeup events and decrement the
37702 * couter of wakeup events in progress simultaneously.
37703 */
37704- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37705+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37706 trace_wakeup_source_deactivate(ws->name, cec);
37707
37708 split_counters(&cnt, &inpr);
37709diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37710index 8d98a32..61d3165 100644
37711--- a/drivers/base/syscore.c
37712+++ b/drivers/base/syscore.c
37713@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37714 void register_syscore_ops(struct syscore_ops *ops)
37715 {
37716 mutex_lock(&syscore_ops_lock);
37717- list_add_tail(&ops->node, &syscore_ops_list);
37718+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37719 mutex_unlock(&syscore_ops_lock);
37720 }
37721 EXPORT_SYMBOL_GPL(register_syscore_ops);
37722@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37723 void unregister_syscore_ops(struct syscore_ops *ops)
37724 {
37725 mutex_lock(&syscore_ops_lock);
37726- list_del(&ops->node);
37727+ pax_list_del((struct list_head *)&ops->node);
37728 mutex_unlock(&syscore_ops_lock);
37729 }
37730 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37731diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37732index ff20f19..018f1da 100644
37733--- a/drivers/block/cciss.c
37734+++ b/drivers/block/cciss.c
37735@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37736 while (!list_empty(&h->reqQ)) {
37737 c = list_entry(h->reqQ.next, CommandList_struct, list);
37738 /* can't do anything if fifo is full */
37739- if ((h->access.fifo_full(h))) {
37740+ if ((h->access->fifo_full(h))) {
37741 dev_warn(&h->pdev->dev, "fifo full\n");
37742 break;
37743 }
37744@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37745 h->Qdepth--;
37746
37747 /* Tell the controller execute command */
37748- h->access.submit_command(h, c);
37749+ h->access->submit_command(h, c);
37750
37751 /* Put job onto the completed Q */
37752 addQ(&h->cmpQ, c);
37753@@ -3444,17 +3444,17 @@ startio:
37754
37755 static inline unsigned long get_next_completion(ctlr_info_t *h)
37756 {
37757- return h->access.command_completed(h);
37758+ return h->access->command_completed(h);
37759 }
37760
37761 static inline int interrupt_pending(ctlr_info_t *h)
37762 {
37763- return h->access.intr_pending(h);
37764+ return h->access->intr_pending(h);
37765 }
37766
37767 static inline long interrupt_not_for_us(ctlr_info_t *h)
37768 {
37769- return ((h->access.intr_pending(h) == 0) ||
37770+ return ((h->access->intr_pending(h) == 0) ||
37771 (h->interrupts_enabled == 0));
37772 }
37773
37774@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
37775 u32 a;
37776
37777 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37778- return h->access.command_completed(h);
37779+ return h->access->command_completed(h);
37780
37781 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37782 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37783@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37784 trans_support & CFGTBL_Trans_use_short_tags);
37785
37786 /* Change the access methods to the performant access methods */
37787- h->access = SA5_performant_access;
37788+ h->access = &SA5_performant_access;
37789 h->transMethod = CFGTBL_Trans_Performant;
37790
37791 return;
37792@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37793 if (prod_index < 0)
37794 return -ENODEV;
37795 h->product_name = products[prod_index].product_name;
37796- h->access = *(products[prod_index].access);
37797+ h->access = products[prod_index].access;
37798
37799 if (cciss_board_disabled(h)) {
37800 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37801@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
37802 }
37803
37804 /* make sure the board interrupts are off */
37805- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37806+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37807 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37808 if (rc)
37809 goto clean2;
37810@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
37811 * fake ones to scoop up any residual completions.
37812 */
37813 spin_lock_irqsave(&h->lock, flags);
37814- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37815+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37816 spin_unlock_irqrestore(&h->lock, flags);
37817 free_irq(h->intr[h->intr_mode], h);
37818 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37819@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
37820 dev_info(&h->pdev->dev, "Board READY.\n");
37821 dev_info(&h->pdev->dev,
37822 "Waiting for stale completions to drain.\n");
37823- h->access.set_intr_mask(h, CCISS_INTR_ON);
37824+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37825 msleep(10000);
37826- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37827+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37828
37829 rc = controller_reset_failed(h->cfgtable);
37830 if (rc)
37831@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
37832 cciss_scsi_setup(h);
37833
37834 /* Turn the interrupts on so we can service requests */
37835- h->access.set_intr_mask(h, CCISS_INTR_ON);
37836+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37837
37838 /* Get the firmware version */
37839 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
37840@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
37841 kfree(flush_buf);
37842 if (return_code != IO_OK)
37843 dev_warn(&h->pdev->dev, "Error flushing cache\n");
37844- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37845+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37846 free_irq(h->intr[h->intr_mode], h);
37847 }
37848
37849diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
37850index 7fda30e..2f27946 100644
37851--- a/drivers/block/cciss.h
37852+++ b/drivers/block/cciss.h
37853@@ -101,7 +101,7 @@ struct ctlr_info
37854 /* information about each logical volume */
37855 drive_info_struct *drv[CISS_MAX_LUN];
37856
37857- struct access_method access;
37858+ struct access_method *access;
37859
37860 /* queue and queue Info */
37861 struct list_head reqQ;
37862@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
37863 }
37864
37865 static struct access_method SA5_access = {
37866- SA5_submit_command,
37867- SA5_intr_mask,
37868- SA5_fifo_full,
37869- SA5_intr_pending,
37870- SA5_completed,
37871+ .submit_command = SA5_submit_command,
37872+ .set_intr_mask = SA5_intr_mask,
37873+ .fifo_full = SA5_fifo_full,
37874+ .intr_pending = SA5_intr_pending,
37875+ .command_completed = SA5_completed,
37876 };
37877
37878 static struct access_method SA5B_access = {
37879- SA5_submit_command,
37880- SA5B_intr_mask,
37881- SA5_fifo_full,
37882- SA5B_intr_pending,
37883- SA5_completed,
37884+ .submit_command = SA5_submit_command,
37885+ .set_intr_mask = SA5B_intr_mask,
37886+ .fifo_full = SA5_fifo_full,
37887+ .intr_pending = SA5B_intr_pending,
37888+ .command_completed = SA5_completed,
37889 };
37890
37891 static struct access_method SA5_performant_access = {
37892- SA5_submit_command,
37893- SA5_performant_intr_mask,
37894- SA5_fifo_full,
37895- SA5_performant_intr_pending,
37896- SA5_performant_completed,
37897+ .submit_command = SA5_submit_command,
37898+ .set_intr_mask = SA5_performant_intr_mask,
37899+ .fifo_full = SA5_fifo_full,
37900+ .intr_pending = SA5_performant_intr_pending,
37901+ .command_completed = SA5_performant_completed,
37902 };
37903
37904 struct board_type {
37905diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
37906index 2b94403..fd6ad1f 100644
37907--- a/drivers/block/cpqarray.c
37908+++ b/drivers/block/cpqarray.c
37909@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37910 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
37911 goto Enomem4;
37912 }
37913- hba[i]->access.set_intr_mask(hba[i], 0);
37914+ hba[i]->access->set_intr_mask(hba[i], 0);
37915 if (request_irq(hba[i]->intr, do_ida_intr,
37916 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
37917 {
37918@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37919 add_timer(&hba[i]->timer);
37920
37921 /* Enable IRQ now that spinlock and rate limit timer are set up */
37922- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37923+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37924
37925 for(j=0; j<NWD; j++) {
37926 struct gendisk *disk = ida_gendisk[i][j];
37927@@ -694,7 +694,7 @@ DBGINFO(
37928 for(i=0; i<NR_PRODUCTS; i++) {
37929 if (board_id == products[i].board_id) {
37930 c->product_name = products[i].product_name;
37931- c->access = *(products[i].access);
37932+ c->access = products[i].access;
37933 break;
37934 }
37935 }
37936@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
37937 hba[ctlr]->intr = intr;
37938 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
37939 hba[ctlr]->product_name = products[j].product_name;
37940- hba[ctlr]->access = *(products[j].access);
37941+ hba[ctlr]->access = products[j].access;
37942 hba[ctlr]->ctlr = ctlr;
37943 hba[ctlr]->board_id = board_id;
37944 hba[ctlr]->pci_dev = NULL; /* not PCI */
37945@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
37946
37947 while((c = h->reqQ) != NULL) {
37948 /* Can't do anything if we're busy */
37949- if (h->access.fifo_full(h) == 0)
37950+ if (h->access->fifo_full(h) == 0)
37951 return;
37952
37953 /* Get the first entry from the request Q */
37954@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
37955 h->Qdepth--;
37956
37957 /* Tell the controller to do our bidding */
37958- h->access.submit_command(h, c);
37959+ h->access->submit_command(h, c);
37960
37961 /* Get onto the completion Q */
37962 addQ(&h->cmpQ, c);
37963@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37964 unsigned long flags;
37965 __u32 a,a1;
37966
37967- istat = h->access.intr_pending(h);
37968+ istat = h->access->intr_pending(h);
37969 /* Is this interrupt for us? */
37970 if (istat == 0)
37971 return IRQ_NONE;
37972@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37973 */
37974 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
37975 if (istat & FIFO_NOT_EMPTY) {
37976- while((a = h->access.command_completed(h))) {
37977+ while((a = h->access->command_completed(h))) {
37978 a1 = a; a &= ~3;
37979 if ((c = h->cmpQ) == NULL)
37980 {
37981@@ -1448,11 +1448,11 @@ static int sendcmd(
37982 /*
37983 * Disable interrupt
37984 */
37985- info_p->access.set_intr_mask(info_p, 0);
37986+ info_p->access->set_intr_mask(info_p, 0);
37987 /* Make sure there is room in the command FIFO */
37988 /* Actually it should be completely empty at this time. */
37989 for (i = 200000; i > 0; i--) {
37990- temp = info_p->access.fifo_full(info_p);
37991+ temp = info_p->access->fifo_full(info_p);
37992 if (temp != 0) {
37993 break;
37994 }
37995@@ -1465,7 +1465,7 @@ DBG(
37996 /*
37997 * Send the cmd
37998 */
37999- info_p->access.submit_command(info_p, c);
38000+ info_p->access->submit_command(info_p, c);
38001 complete = pollcomplete(ctlr);
38002
38003 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38004@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38005 * we check the new geometry. Then turn interrupts back on when
38006 * we're done.
38007 */
38008- host->access.set_intr_mask(host, 0);
38009+ host->access->set_intr_mask(host, 0);
38010 getgeometry(ctlr);
38011- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38012+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38013
38014 for(i=0; i<NWD; i++) {
38015 struct gendisk *disk = ida_gendisk[ctlr][i];
38016@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38017 /* Wait (up to 2 seconds) for a command to complete */
38018
38019 for (i = 200000; i > 0; i--) {
38020- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38021+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38022 if (done == 0) {
38023 udelay(10); /* a short fixed delay */
38024 } else
38025diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38026index be73e9d..7fbf140 100644
38027--- a/drivers/block/cpqarray.h
38028+++ b/drivers/block/cpqarray.h
38029@@ -99,7 +99,7 @@ struct ctlr_info {
38030 drv_info_t drv[NWD];
38031 struct proc_dir_entry *proc;
38032
38033- struct access_method access;
38034+ struct access_method *access;
38035
38036 cmdlist_t *reqQ;
38037 cmdlist_t *cmpQ;
38038diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38039index 434c77d..6d3219a 100644
38040--- a/drivers/block/drbd/drbd_bitmap.c
38041+++ b/drivers/block/drbd/drbd_bitmap.c
38042@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38043 submit_bio(rw, bio);
38044 /* this should not count as user activity and cause the
38045 * resync to throttle -- see drbd_rs_should_slow_down(). */
38046- atomic_add(len >> 9, &device->rs_sect_ev);
38047+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38048 }
38049 }
38050
38051diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38052index b905e98..0812ed8 100644
38053--- a/drivers/block/drbd/drbd_int.h
38054+++ b/drivers/block/drbd/drbd_int.h
38055@@ -385,7 +385,7 @@ struct drbd_epoch {
38056 struct drbd_connection *connection;
38057 struct list_head list;
38058 unsigned int barrier_nr;
38059- atomic_t epoch_size; /* increased on every request added. */
38060+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38061 atomic_t active; /* increased on every req. added, and dec on every finished. */
38062 unsigned long flags;
38063 };
38064@@ -946,7 +946,7 @@ struct drbd_device {
38065 unsigned int al_tr_number;
38066 int al_tr_cycle;
38067 wait_queue_head_t seq_wait;
38068- atomic_t packet_seq;
38069+ atomic_unchecked_t packet_seq;
38070 unsigned int peer_seq;
38071 spinlock_t peer_seq_lock;
38072 unsigned long comm_bm_set; /* communicated number of set bits. */
38073@@ -955,8 +955,8 @@ struct drbd_device {
38074 struct mutex own_state_mutex;
38075 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38076 char congestion_reason; /* Why we where congested... */
38077- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38078- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38079+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38080+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38081 int rs_last_sect_ev; /* counter to compare with */
38082 int rs_last_events; /* counter of read or write "events" (unit sectors)
38083 * on the lower level device when we last looked. */
38084diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38085index 1fc8342..7e7742b 100644
38086--- a/drivers/block/drbd/drbd_main.c
38087+++ b/drivers/block/drbd/drbd_main.c
38088@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38089 p->sector = sector;
38090 p->block_id = block_id;
38091 p->blksize = blksize;
38092- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38093+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38094 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38095 }
38096
38097@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38098 return -EIO;
38099 p->sector = cpu_to_be64(req->i.sector);
38100 p->block_id = (unsigned long)req;
38101- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38102+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38103 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38104 if (device->state.conn >= C_SYNC_SOURCE &&
38105 device->state.conn <= C_PAUSED_SYNC_T)
38106@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38107 atomic_set(&device->unacked_cnt, 0);
38108 atomic_set(&device->local_cnt, 0);
38109 atomic_set(&device->pp_in_use_by_net, 0);
38110- atomic_set(&device->rs_sect_in, 0);
38111- atomic_set(&device->rs_sect_ev, 0);
38112+ atomic_set_unchecked(&device->rs_sect_in, 0);
38113+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38114 atomic_set(&device->ap_in_flight, 0);
38115 atomic_set(&device->md_io.in_use, 0);
38116
38117@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38118 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38119 struct drbd_resource *resource = connection->resource;
38120
38121- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38122- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38123+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38124+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38125 kfree(connection->current_epoch);
38126
38127 idr_destroy(&connection->peer_devices);
38128diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38129index 74df8cf..e41fc24 100644
38130--- a/drivers/block/drbd/drbd_nl.c
38131+++ b/drivers/block/drbd/drbd_nl.c
38132@@ -3637,13 +3637,13 @@ finish:
38133
38134 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38135 {
38136- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38137+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38138 struct sk_buff *msg;
38139 struct drbd_genlmsghdr *d_out;
38140 unsigned seq;
38141 int err = -ENOMEM;
38142
38143- seq = atomic_inc_return(&drbd_genl_seq);
38144+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38145 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38146 if (!msg)
38147 goto failed;
38148diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38149index d169b4a..481463f 100644
38150--- a/drivers/block/drbd/drbd_receiver.c
38151+++ b/drivers/block/drbd/drbd_receiver.c
38152@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38153 struct drbd_device *device = peer_device->device;
38154 int err;
38155
38156- atomic_set(&device->packet_seq, 0);
38157+ atomic_set_unchecked(&device->packet_seq, 0);
38158 device->peer_seq = 0;
38159
38160 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38161@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38162 do {
38163 next_epoch = NULL;
38164
38165- epoch_size = atomic_read(&epoch->epoch_size);
38166+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38167
38168 switch (ev & ~EV_CLEANUP) {
38169 case EV_PUT:
38170@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38171 rv = FE_DESTROYED;
38172 } else {
38173 epoch->flags = 0;
38174- atomic_set(&epoch->epoch_size, 0);
38175+ atomic_set_unchecked(&epoch->epoch_size, 0);
38176 /* atomic_set(&epoch->active, 0); is already zero */
38177 if (rv == FE_STILL_LIVE)
38178 rv = FE_RECYCLED;
38179@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38180 conn_wait_active_ee_empty(connection);
38181 drbd_flush(connection);
38182
38183- if (atomic_read(&connection->current_epoch->epoch_size)) {
38184+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38185 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38186 if (epoch)
38187 break;
38188@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38189 }
38190
38191 epoch->flags = 0;
38192- atomic_set(&epoch->epoch_size, 0);
38193+ atomic_set_unchecked(&epoch->epoch_size, 0);
38194 atomic_set(&epoch->active, 0);
38195
38196 spin_lock(&connection->epoch_lock);
38197- if (atomic_read(&connection->current_epoch->epoch_size)) {
38198+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38199 list_add(&epoch->list, &connection->current_epoch->list);
38200 connection->current_epoch = epoch;
38201 connection->epochs++;
38202@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38203 list_add_tail(&peer_req->w.list, &device->sync_ee);
38204 spin_unlock_irq(&device->resource->req_lock);
38205
38206- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38207+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38208 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38209 return 0;
38210
38211@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38212 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38213 }
38214
38215- atomic_add(pi->size >> 9, &device->rs_sect_in);
38216+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38217
38218 return err;
38219 }
38220@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38221
38222 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38223 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38224- atomic_inc(&connection->current_epoch->epoch_size);
38225+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38226 err2 = drbd_drain_block(peer_device, pi->size);
38227 if (!err)
38228 err = err2;
38229@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38230
38231 spin_lock(&connection->epoch_lock);
38232 peer_req->epoch = connection->current_epoch;
38233- atomic_inc(&peer_req->epoch->epoch_size);
38234+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38235 atomic_inc(&peer_req->epoch->active);
38236 spin_unlock(&connection->epoch_lock);
38237
38238@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38239
38240 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38241 (int)part_stat_read(&disk->part0, sectors[1]) -
38242- atomic_read(&device->rs_sect_ev);
38243+ atomic_read_unchecked(&device->rs_sect_ev);
38244
38245 if (atomic_read(&device->ap_actlog_cnt)
38246 || curr_events - device->rs_last_events > 64) {
38247@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38248 device->use_csums = true;
38249 } else if (pi->cmd == P_OV_REPLY) {
38250 /* track progress, we may need to throttle */
38251- atomic_add(size >> 9, &device->rs_sect_in);
38252+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38253 peer_req->w.cb = w_e_end_ov_reply;
38254 dec_rs_pending(device);
38255 /* drbd_rs_begin_io done when we sent this request,
38256@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38257 goto out_free_e;
38258
38259 submit_for_resync:
38260- atomic_add(size >> 9, &device->rs_sect_ev);
38261+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38262
38263 submit:
38264 update_receiver_timing_details(connection, drbd_submit_peer_request);
38265@@ -4564,7 +4564,7 @@ struct data_cmd {
38266 int expect_payload;
38267 size_t pkt_size;
38268 int (*fn)(struct drbd_connection *, struct packet_info *);
38269-};
38270+} __do_const;
38271
38272 static struct data_cmd drbd_cmd_handler[] = {
38273 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38274@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38275 if (!list_empty(&connection->current_epoch->list))
38276 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38277 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38278- atomic_set(&connection->current_epoch->epoch_size, 0);
38279+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38280 connection->send.seen_any_write_yet = false;
38281
38282 drbd_info(connection, "Connection closed\n");
38283@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38284 put_ldev(device);
38285 }
38286 dec_rs_pending(device);
38287- atomic_add(blksize >> 9, &device->rs_sect_in);
38288+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38289
38290 return 0;
38291 }
38292@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38293 struct asender_cmd {
38294 size_t pkt_size;
38295 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38296-};
38297+} __do_const;
38298
38299 static struct asender_cmd asender_tbl[] = {
38300 [P_PING] = { 0, got_Ping },
38301diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38302index d0fae55..4469096 100644
38303--- a/drivers/block/drbd/drbd_worker.c
38304+++ b/drivers/block/drbd/drbd_worker.c
38305@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38306 list_add_tail(&peer_req->w.list, &device->read_ee);
38307 spin_unlock_irq(&device->resource->req_lock);
38308
38309- atomic_add(size >> 9, &device->rs_sect_ev);
38310+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38311 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38312 return 0;
38313
38314@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38315 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38316 int number, mxb;
38317
38318- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38319+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38320 device->rs_in_flight -= sect_in;
38321
38322 rcu_read_lock();
38323@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38324 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38325 struct fifo_buffer *plan;
38326
38327- atomic_set(&device->rs_sect_in, 0);
38328- atomic_set(&device->rs_sect_ev, 0);
38329+ atomic_set_unchecked(&device->rs_sect_in, 0);
38330+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38331 device->rs_in_flight = 0;
38332 device->rs_last_events =
38333 (int)part_stat_read(&disk->part0, sectors[0]) +
38334diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38335index 6cb1beb..bf490f7 100644
38336--- a/drivers/block/loop.c
38337+++ b/drivers/block/loop.c
38338@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
38339
38340 file_start_write(file);
38341 set_fs(get_ds());
38342- bw = file->f_op->write(file, buf, len, &pos);
38343+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38344 set_fs(old_fs);
38345 file_end_write(file);
38346 if (likely(bw == len))
38347diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
38348index d826bf3..8eb406c 100644
38349--- a/drivers/block/nvme-core.c
38350+++ b/drivers/block/nvme-core.c
38351@@ -76,7 +76,6 @@ static LIST_HEAD(dev_list);
38352 static struct task_struct *nvme_thread;
38353 static struct workqueue_struct *nvme_workq;
38354 static wait_queue_head_t nvme_kthread_wait;
38355-static struct notifier_block nvme_nb;
38356
38357 static void nvme_reset_failed_dev(struct work_struct *ws);
38358 static int nvme_process_cq(struct nvme_queue *nvmeq);
38359@@ -2955,7 +2954,6 @@ static int __init nvme_init(void)
38360 static void __exit nvme_exit(void)
38361 {
38362 pci_unregister_driver(&nvme_driver);
38363- unregister_hotcpu_notifier(&nvme_nb);
38364 unregister_blkdev(nvme_major, "nvme");
38365 destroy_workqueue(nvme_workq);
38366 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
38367diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38368index 09e628da..7607aaa 100644
38369--- a/drivers/block/pktcdvd.c
38370+++ b/drivers/block/pktcdvd.c
38371@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38372
38373 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38374 {
38375- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38376+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38377 }
38378
38379 /*
38380@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38381 return -EROFS;
38382 }
38383 pd->settings.fp = ti.fp;
38384- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38385+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38386
38387 if (ti.nwa_v) {
38388 pd->nwa = be32_to_cpu(ti.next_writable);
38389diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38390index 8a86b62..f54c87e 100644
38391--- a/drivers/block/rbd.c
38392+++ b/drivers/block/rbd.c
38393@@ -63,7 +63,7 @@
38394 * If the counter is already at its maximum value returns
38395 * -EINVAL without updating it.
38396 */
38397-static int atomic_inc_return_safe(atomic_t *v)
38398+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38399 {
38400 unsigned int counter;
38401
38402diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38403index e5565fb..71be10b4 100644
38404--- a/drivers/block/smart1,2.h
38405+++ b/drivers/block/smart1,2.h
38406@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38407 }
38408
38409 static struct access_method smart4_access = {
38410- smart4_submit_command,
38411- smart4_intr_mask,
38412- smart4_fifo_full,
38413- smart4_intr_pending,
38414- smart4_completed,
38415+ .submit_command = smart4_submit_command,
38416+ .set_intr_mask = smart4_intr_mask,
38417+ .fifo_full = smart4_fifo_full,
38418+ .intr_pending = smart4_intr_pending,
38419+ .command_completed = smart4_completed,
38420 };
38421
38422 /*
38423@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38424 }
38425
38426 static struct access_method smart2_access = {
38427- smart2_submit_command,
38428- smart2_intr_mask,
38429- smart2_fifo_full,
38430- smart2_intr_pending,
38431- smart2_completed,
38432+ .submit_command = smart2_submit_command,
38433+ .set_intr_mask = smart2_intr_mask,
38434+ .fifo_full = smart2_fifo_full,
38435+ .intr_pending = smart2_intr_pending,
38436+ .command_completed = smart2_completed,
38437 };
38438
38439 /*
38440@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38441 }
38442
38443 static struct access_method smart2e_access = {
38444- smart2e_submit_command,
38445- smart2e_intr_mask,
38446- smart2e_fifo_full,
38447- smart2e_intr_pending,
38448- smart2e_completed,
38449+ .submit_command = smart2e_submit_command,
38450+ .set_intr_mask = smart2e_intr_mask,
38451+ .fifo_full = smart2e_fifo_full,
38452+ .intr_pending = smart2e_intr_pending,
38453+ .command_completed = smart2e_completed,
38454 };
38455
38456 /*
38457@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38458 }
38459
38460 static struct access_method smart1_access = {
38461- smart1_submit_command,
38462- smart1_intr_mask,
38463- smart1_fifo_full,
38464- smart1_intr_pending,
38465- smart1_completed,
38466+ .submit_command = smart1_submit_command,
38467+ .set_intr_mask = smart1_intr_mask,
38468+ .fifo_full = smart1_fifo_full,
38469+ .intr_pending = smart1_intr_pending,
38470+ .command_completed = smart1_completed,
38471 };
38472diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38473index 55c135b..9f8d60c 100644
38474--- a/drivers/bluetooth/btwilink.c
38475+++ b/drivers/bluetooth/btwilink.c
38476@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38477
38478 static int bt_ti_probe(struct platform_device *pdev)
38479 {
38480- static struct ti_st *hst;
38481+ struct ti_st *hst;
38482 struct hci_dev *hdev;
38483 int err;
38484
38485diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38486index 5d28a45..a538f90 100644
38487--- a/drivers/cdrom/cdrom.c
38488+++ b/drivers/cdrom/cdrom.c
38489@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38490 ENSURE(reset, CDC_RESET);
38491 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38492 cdi->mc_flags = 0;
38493- cdo->n_minors = 0;
38494 cdi->options = CDO_USE_FFLAGS;
38495
38496 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38497@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38498 else
38499 cdi->cdda_method = CDDA_OLD;
38500
38501- if (!cdo->generic_packet)
38502- cdo->generic_packet = cdrom_dummy_generic_packet;
38503+ if (!cdo->generic_packet) {
38504+ pax_open_kernel();
38505+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38506+ pax_close_kernel();
38507+ }
38508
38509 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38510 mutex_lock(&cdrom_mutex);
38511@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38512 if (cdi->exit)
38513 cdi->exit(cdi);
38514
38515- cdi->ops->n_minors--;
38516 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38517 }
38518
38519@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38520 */
38521 nr = nframes;
38522 do {
38523- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38524+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38525 if (cgc.buffer)
38526 break;
38527
38528@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38529 struct cdrom_device_info *cdi;
38530 int ret;
38531
38532- ret = scnprintf(info + *pos, max_size - *pos, header);
38533+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38534 if (!ret)
38535 return 1;
38536
38537diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38538index 584bc31..e64a12c 100644
38539--- a/drivers/cdrom/gdrom.c
38540+++ b/drivers/cdrom/gdrom.c
38541@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38542 .audio_ioctl = gdrom_audio_ioctl,
38543 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38544 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38545- .n_minors = 1,
38546 };
38547
38548 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38549diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38550index efefd12..4f1d494 100644
38551--- a/drivers/char/Kconfig
38552+++ b/drivers/char/Kconfig
38553@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
38554
38555 config DEVKMEM
38556 bool "/dev/kmem virtual device support"
38557- default y
38558+ default n
38559+ depends on !GRKERNSEC_KMEM
38560 help
38561 Say Y here if you want to support the /dev/kmem device. The
38562 /dev/kmem device is rarely used, but can be used for certain
38563@@ -577,6 +578,7 @@ config DEVPORT
38564 bool
38565 depends on !M68K
38566 depends on ISA || PCI
38567+ depends on !GRKERNSEC_KMEM
38568 default y
38569
38570 source "drivers/s390/char/Kconfig"
38571diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38572index a48e05b..6bac831 100644
38573--- a/drivers/char/agp/compat_ioctl.c
38574+++ b/drivers/char/agp/compat_ioctl.c
38575@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38576 return -ENOMEM;
38577 }
38578
38579- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38580+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38581 sizeof(*usegment) * ureserve.seg_count)) {
38582 kfree(usegment);
38583 kfree(ksegment);
38584diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38585index 09f17eb..8531d2f 100644
38586--- a/drivers/char/agp/frontend.c
38587+++ b/drivers/char/agp/frontend.c
38588@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38589 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38590 return -EFAULT;
38591
38592- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38593+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38594 return -EFAULT;
38595
38596 client = agp_find_client_by_pid(reserve.pid);
38597@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38598 if (segment == NULL)
38599 return -ENOMEM;
38600
38601- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38602+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38603 sizeof(struct agp_segment) * reserve.seg_count)) {
38604 kfree(segment);
38605 return -EFAULT;
38606diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38607index 4f94375..413694e 100644
38608--- a/drivers/char/genrtc.c
38609+++ b/drivers/char/genrtc.c
38610@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38611 switch (cmd) {
38612
38613 case RTC_PLL_GET:
38614+ memset(&pll, 0, sizeof(pll));
38615 if (get_rtc_pll(&pll))
38616 return -EINVAL;
38617 else
38618diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38619index d5d4cd8..22d561d 100644
38620--- a/drivers/char/hpet.c
38621+++ b/drivers/char/hpet.c
38622@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38623 }
38624
38625 static int
38626-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38627+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38628 struct hpet_info *info)
38629 {
38630 struct hpet_timer __iomem *timer;
38631diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38632index 6b65fa4..8ebbc99 100644
38633--- a/drivers/char/ipmi/ipmi_msghandler.c
38634+++ b/drivers/char/ipmi/ipmi_msghandler.c
38635@@ -436,7 +436,7 @@ struct ipmi_smi {
38636 struct proc_dir_entry *proc_dir;
38637 char proc_dir_name[10];
38638
38639- atomic_t stats[IPMI_NUM_STATS];
38640+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38641
38642 /*
38643 * run_to_completion duplicate of smb_info, smi_info
38644@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38645 static DEFINE_MUTEX(smi_watchers_mutex);
38646
38647 #define ipmi_inc_stat(intf, stat) \
38648- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38649+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38650 #define ipmi_get_stat(intf, stat) \
38651- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38652+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38653
38654 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38655 "ACPI", "SMBIOS", "PCI",
38656@@ -2837,7 +2837,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38657 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38658 init_waitqueue_head(&intf->waitq);
38659 for (i = 0; i < IPMI_NUM_STATS; i++)
38660- atomic_set(&intf->stats[i], 0);
38661+ atomic_set_unchecked(&intf->stats[i], 0);
38662
38663 intf->proc_dir = NULL;
38664
38665diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38666index 967b73a..946e94c 100644
38667--- a/drivers/char/ipmi/ipmi_si_intf.c
38668+++ b/drivers/char/ipmi/ipmi_si_intf.c
38669@@ -284,7 +284,7 @@ struct smi_info {
38670 unsigned char slave_addr;
38671
38672 /* Counters and things for the proc filesystem. */
38673- atomic_t stats[SI_NUM_STATS];
38674+ atomic_unchecked_t stats[SI_NUM_STATS];
38675
38676 struct task_struct *thread;
38677
38678@@ -293,9 +293,9 @@ struct smi_info {
38679 };
38680
38681 #define smi_inc_stat(smi, stat) \
38682- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38683+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38684 #define smi_get_stat(smi, stat) \
38685- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38686+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38687
38688 #define SI_MAX_PARMS 4
38689
38690@@ -3412,7 +3412,7 @@ static int try_smi_init(struct smi_info *new_smi)
38691 atomic_set(&new_smi->req_events, 0);
38692 new_smi->run_to_completion = false;
38693 for (i = 0; i < SI_NUM_STATS; i++)
38694- atomic_set(&new_smi->stats[i], 0);
38695+ atomic_set_unchecked(&new_smi->stats[i], 0);
38696
38697 new_smi->interrupt_disabled = true;
38698 atomic_set(&new_smi->need_watch, 0);
38699diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38700index 4c58333..d5cca27 100644
38701--- a/drivers/char/mem.c
38702+++ b/drivers/char/mem.c
38703@@ -18,6 +18,7 @@
38704 #include <linux/raw.h>
38705 #include <linux/tty.h>
38706 #include <linux/capability.h>
38707+#include <linux/security.h>
38708 #include <linux/ptrace.h>
38709 #include <linux/device.h>
38710 #include <linux/highmem.h>
38711@@ -36,6 +37,10 @@
38712
38713 #define DEVPORT_MINOR 4
38714
38715+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38716+extern const struct file_operations grsec_fops;
38717+#endif
38718+
38719 static inline unsigned long size_inside_page(unsigned long start,
38720 unsigned long size)
38721 {
38722@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38723
38724 while (cursor < to) {
38725 if (!devmem_is_allowed(pfn)) {
38726+#ifdef CONFIG_GRKERNSEC_KMEM
38727+ gr_handle_mem_readwrite(from, to);
38728+#else
38729 printk(KERN_INFO
38730 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38731 current->comm, from, to);
38732+#endif
38733 return 0;
38734 }
38735 cursor += PAGE_SIZE;
38736@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38737 }
38738 return 1;
38739 }
38740+#elif defined(CONFIG_GRKERNSEC_KMEM)
38741+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38742+{
38743+ return 0;
38744+}
38745 #else
38746 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38747 {
38748@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38749 #endif
38750
38751 while (count > 0) {
38752- unsigned long remaining;
38753+ unsigned long remaining = 0;
38754+ char *temp;
38755
38756 sz = size_inside_page(p, count);
38757
38758@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38759 if (!ptr)
38760 return -EFAULT;
38761
38762- remaining = copy_to_user(buf, ptr, sz);
38763+#ifdef CONFIG_PAX_USERCOPY
38764+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38765+ if (!temp) {
38766+ unxlate_dev_mem_ptr(p, ptr);
38767+ return -ENOMEM;
38768+ }
38769+ remaining = probe_kernel_read(temp, ptr, sz);
38770+#else
38771+ temp = ptr;
38772+#endif
38773+
38774+ if (!remaining)
38775+ remaining = copy_to_user(buf, temp, sz);
38776+
38777+#ifdef CONFIG_PAX_USERCOPY
38778+ kfree(temp);
38779+#endif
38780+
38781 unxlate_dev_mem_ptr(p, ptr);
38782 if (remaining)
38783 return -EFAULT;
38784@@ -372,9 +404,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38785 size_t count, loff_t *ppos)
38786 {
38787 unsigned long p = *ppos;
38788- ssize_t low_count, read, sz;
38789+ ssize_t low_count, read, sz, err = 0;
38790 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38791- int err = 0;
38792
38793 read = 0;
38794 if (p < (unsigned long) high_memory) {
38795@@ -396,6 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38796 }
38797 #endif
38798 while (low_count > 0) {
38799+ char *temp;
38800+
38801 sz = size_inside_page(p, low_count);
38802
38803 /*
38804@@ -405,7 +438,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38805 */
38806 kbuf = xlate_dev_kmem_ptr((void *)p);
38807
38808- if (copy_to_user(buf, kbuf, sz))
38809+#ifdef CONFIG_PAX_USERCOPY
38810+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38811+ if (!temp)
38812+ return -ENOMEM;
38813+ err = probe_kernel_read(temp, kbuf, sz);
38814+#else
38815+ temp = kbuf;
38816+#endif
38817+
38818+ if (!err)
38819+ err = copy_to_user(buf, temp, sz);
38820+
38821+#ifdef CONFIG_PAX_USERCOPY
38822+ kfree(temp);
38823+#endif
38824+
38825+ if (err)
38826 return -EFAULT;
38827 buf += sz;
38828 p += sz;
38829@@ -800,6 +849,9 @@ static const struct memdev {
38830 #ifdef CONFIG_PRINTK
38831 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
38832 #endif
38833+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38834+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
38835+#endif
38836 };
38837
38838 static int memory_open(struct inode *inode, struct file *filp)
38839@@ -871,7 +923,7 @@ static int __init chr_dev_init(void)
38840 continue;
38841
38842 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38843- NULL, devlist[minor].name);
38844+ NULL, "%s", devlist[minor].name);
38845 }
38846
38847 return tty_init();
38848diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
38849index 9df78e2..01ba9ae 100644
38850--- a/drivers/char/nvram.c
38851+++ b/drivers/char/nvram.c
38852@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
38853
38854 spin_unlock_irq(&rtc_lock);
38855
38856- if (copy_to_user(buf, contents, tmp - contents))
38857+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
38858 return -EFAULT;
38859
38860 *ppos = i;
38861diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
38862index 0ea9986..e7b07e4 100644
38863--- a/drivers/char/pcmcia/synclink_cs.c
38864+++ b/drivers/char/pcmcia/synclink_cs.c
38865@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38866
38867 if (debug_level >= DEBUG_LEVEL_INFO)
38868 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
38869- __FILE__, __LINE__, info->device_name, port->count);
38870+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
38871
38872 if (tty_port_close_start(port, tty, filp) == 0)
38873 goto cleanup;
38874@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38875 cleanup:
38876 if (debug_level >= DEBUG_LEVEL_INFO)
38877 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
38878- tty->driver->name, port->count);
38879+ tty->driver->name, atomic_read(&port->count));
38880 }
38881
38882 /* Wait until the transmitter is empty.
38883@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38884
38885 if (debug_level >= DEBUG_LEVEL_INFO)
38886 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
38887- __FILE__, __LINE__, tty->driver->name, port->count);
38888+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
38889
38890 /* If port is closing, signal caller to try again */
38891 if (port->flags & ASYNC_CLOSING){
38892@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38893 goto cleanup;
38894 }
38895 spin_lock(&port->lock);
38896- port->count++;
38897+ atomic_inc(&port->count);
38898 spin_unlock(&port->lock);
38899 spin_unlock_irqrestore(&info->netlock, flags);
38900
38901- if (port->count == 1) {
38902+ if (atomic_read(&port->count) == 1) {
38903 /* 1st open on this device, init hardware */
38904 retval = startup(info, tty);
38905 if (retval < 0)
38906@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38907 unsigned short new_crctype;
38908
38909 /* return error if TTY interface open */
38910- if (info->port.count)
38911+ if (atomic_read(&info->port.count))
38912 return -EBUSY;
38913
38914 switch (encoding)
38915@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
38916
38917 /* arbitrate between network and tty opens */
38918 spin_lock_irqsave(&info->netlock, flags);
38919- if (info->port.count != 0 || info->netcount != 0) {
38920+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38921 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38922 spin_unlock_irqrestore(&info->netlock, flags);
38923 return -EBUSY;
38924@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38925 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
38926
38927 /* return error if TTY interface open */
38928- if (info->port.count)
38929+ if (atomic_read(&info->port.count))
38930 return -EBUSY;
38931
38932 if (cmd != SIOCWANDEV)
38933diff --git a/drivers/char/random.c b/drivers/char/random.c
38934index 9cd6968..6416f00 100644
38935--- a/drivers/char/random.c
38936+++ b/drivers/char/random.c
38937@@ -289,9 +289,6 @@
38938 /*
38939 * To allow fractional bits to be tracked, the entropy_count field is
38940 * denominated in units of 1/8th bits.
38941- *
38942- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
38943- * credit_entropy_bits() needs to be 64 bits wide.
38944 */
38945 #define ENTROPY_SHIFT 3
38946 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
38947@@ -439,9 +436,9 @@ struct entropy_store {
38948 };
38949
38950 static void push_to_pool(struct work_struct *work);
38951-static __u32 input_pool_data[INPUT_POOL_WORDS];
38952-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
38953-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
38954+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
38955+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38956+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38957
38958 static struct entropy_store input_pool = {
38959 .poolinfo = &poolinfo_table[0],
38960@@ -635,7 +632,7 @@ retry:
38961 /* The +2 corresponds to the /4 in the denominator */
38962
38963 do {
38964- unsigned int anfrac = min(pnfrac, pool_size/2);
38965+ u64 anfrac = min(pnfrac, pool_size/2);
38966 unsigned int add =
38967 ((pool_size - entropy_count)*anfrac*3) >> s;
38968
38969@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
38970
38971 extract_buf(r, tmp);
38972 i = min_t(int, nbytes, EXTRACT_SIZE);
38973- if (copy_to_user(buf, tmp, i)) {
38974+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
38975 ret = -EFAULT;
38976 break;
38977 }
38978@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
38979 static int proc_do_uuid(struct ctl_table *table, int write,
38980 void __user *buffer, size_t *lenp, loff_t *ppos)
38981 {
38982- struct ctl_table fake_table;
38983+ ctl_table_no_const fake_table;
38984 unsigned char buf[64], tmp_uuid[16], *uuid;
38985
38986 uuid = table->data;
38987@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
38988 static int proc_do_entropy(struct ctl_table *table, int write,
38989 void __user *buffer, size_t *lenp, loff_t *ppos)
38990 {
38991- struct ctl_table fake_table;
38992+ ctl_table_no_const fake_table;
38993 int entropy_count;
38994
38995 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
38996diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
38997index e496dae..b793e7d 100644
38998--- a/drivers/char/sonypi.c
38999+++ b/drivers/char/sonypi.c
39000@@ -54,6 +54,7 @@
39001
39002 #include <asm/uaccess.h>
39003 #include <asm/io.h>
39004+#include <asm/local.h>
39005
39006 #include <linux/sonypi.h>
39007
39008@@ -490,7 +491,7 @@ static struct sonypi_device {
39009 spinlock_t fifo_lock;
39010 wait_queue_head_t fifo_proc_list;
39011 struct fasync_struct *fifo_async;
39012- int open_count;
39013+ local_t open_count;
39014 int model;
39015 struct input_dev *input_jog_dev;
39016 struct input_dev *input_key_dev;
39017@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39018 static int sonypi_misc_release(struct inode *inode, struct file *file)
39019 {
39020 mutex_lock(&sonypi_device.lock);
39021- sonypi_device.open_count--;
39022+ local_dec(&sonypi_device.open_count);
39023 mutex_unlock(&sonypi_device.lock);
39024 return 0;
39025 }
39026@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39027 {
39028 mutex_lock(&sonypi_device.lock);
39029 /* Flush input queue on first open */
39030- if (!sonypi_device.open_count)
39031+ if (!local_read(&sonypi_device.open_count))
39032 kfifo_reset(&sonypi_device.fifo);
39033- sonypi_device.open_count++;
39034+ local_inc(&sonypi_device.open_count);
39035 mutex_unlock(&sonypi_device.lock);
39036
39037 return 0;
39038diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39039index 565a947..dcdc06e 100644
39040--- a/drivers/char/tpm/tpm_acpi.c
39041+++ b/drivers/char/tpm/tpm_acpi.c
39042@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39043 virt = acpi_os_map_iomem(start, len);
39044 if (!virt) {
39045 kfree(log->bios_event_log);
39046+ log->bios_event_log = NULL;
39047 printk("%s: ERROR - Unable to map memory\n", __func__);
39048 return -EIO;
39049 }
39050
39051- memcpy_fromio(log->bios_event_log, virt, len);
39052+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39053
39054 acpi_os_unmap_iomem(virt, len);
39055 return 0;
39056diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39057index 3a56a13..f8cbd25 100644
39058--- a/drivers/char/tpm/tpm_eventlog.c
39059+++ b/drivers/char/tpm/tpm_eventlog.c
39060@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39061 event = addr;
39062
39063 if ((event->event_type == 0 && event->event_size == 0) ||
39064- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39065+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39066 return NULL;
39067
39068 return addr;
39069@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39070 return NULL;
39071
39072 if ((event->event_type == 0 && event->event_size == 0) ||
39073- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39074+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39075 return NULL;
39076
39077 (*pos)++;
39078@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39079 int i;
39080
39081 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39082- seq_putc(m, data[i]);
39083+ if (!seq_putc(m, data[i]))
39084+ return -EFAULT;
39085
39086 return 0;
39087 }
39088diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39089index c3aac4c..88de09f9 100644
39090--- a/drivers/char/virtio_console.c
39091+++ b/drivers/char/virtio_console.c
39092@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39093 if (to_user) {
39094 ssize_t ret;
39095
39096- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39097+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39098 if (ret)
39099 return -EFAULT;
39100 } else {
39101@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39102 if (!port_has_data(port) && !port->host_connected)
39103 return 0;
39104
39105- return fill_readbuf(port, ubuf, count, true);
39106+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39107 }
39108
39109 static int wait_port_writable(struct port *port, bool nonblock)
39110diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39111index 4386697..754ceca 100644
39112--- a/drivers/clk/clk-composite.c
39113+++ b/drivers/clk/clk-composite.c
39114@@ -192,7 +192,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39115 struct clk *clk;
39116 struct clk_init_data init;
39117 struct clk_composite *composite;
39118- struct clk_ops *clk_composite_ops;
39119+ clk_ops_no_const *clk_composite_ops;
39120
39121 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39122 if (!composite) {
39123diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39124index dd3a78c..386d49c 100644
39125--- a/drivers/clk/socfpga/clk-gate.c
39126+++ b/drivers/clk/socfpga/clk-gate.c
39127@@ -22,6 +22,7 @@
39128 #include <linux/mfd/syscon.h>
39129 #include <linux/of.h>
39130 #include <linux/regmap.h>
39131+#include <asm/pgtable.h>
39132
39133 #include "clk.h"
39134
39135@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39136 return 0;
39137 }
39138
39139-static struct clk_ops gateclk_ops = {
39140+static clk_ops_no_const gateclk_ops __read_only = {
39141 .prepare = socfpga_clk_prepare,
39142 .recalc_rate = socfpga_clk_recalc_rate,
39143 .get_parent = socfpga_clk_get_parent,
39144@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39145 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39146 socfpga_clk->hw.bit_idx = clk_gate[1];
39147
39148- gateclk_ops.enable = clk_gate_ops.enable;
39149- gateclk_ops.disable = clk_gate_ops.disable;
39150+ pax_open_kernel();
39151+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39152+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39153+ pax_close_kernel();
39154 }
39155
39156 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39157diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39158index de6da95..c98278b 100644
39159--- a/drivers/clk/socfpga/clk-pll.c
39160+++ b/drivers/clk/socfpga/clk-pll.c
39161@@ -21,6 +21,7 @@
39162 #include <linux/io.h>
39163 #include <linux/of.h>
39164 #include <linux/of_address.h>
39165+#include <asm/pgtable.h>
39166
39167 #include "clk.h"
39168
39169@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39170 CLK_MGR_PLL_CLK_SRC_MASK;
39171 }
39172
39173-static struct clk_ops clk_pll_ops = {
39174+static clk_ops_no_const clk_pll_ops __read_only = {
39175 .recalc_rate = clk_pll_recalc_rate,
39176 .get_parent = clk_pll_get_parent,
39177 };
39178@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39179 pll_clk->hw.hw.init = &init;
39180
39181 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39182- clk_pll_ops.enable = clk_gate_ops.enable;
39183- clk_pll_ops.disable = clk_gate_ops.disable;
39184+ pax_open_kernel();
39185+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39186+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39187+ pax_close_kernel();
39188
39189 clk = clk_register(NULL, &pll_clk->hw.hw);
39190 if (WARN_ON(IS_ERR(clk))) {
39191diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39192index b0c18ed..1713a80 100644
39193--- a/drivers/cpufreq/acpi-cpufreq.c
39194+++ b/drivers/cpufreq/acpi-cpufreq.c
39195@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39196 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39197 per_cpu(acfreq_data, cpu) = data;
39198
39199- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39200- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39201+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39202+ pax_open_kernel();
39203+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39204+ pax_close_kernel();
39205+ }
39206
39207 result = acpi_processor_register_performance(data->acpi_data, cpu);
39208 if (result)
39209@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39210 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39211 break;
39212 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39213- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39214+ pax_open_kernel();
39215+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39216+ pax_close_kernel();
39217 break;
39218 default:
39219 break;
39220@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39221 if (!msrs)
39222 return;
39223
39224- acpi_cpufreq_driver.boost_supported = true;
39225- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39226+ pax_open_kernel();
39227+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39228+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39229+ pax_close_kernel();
39230
39231 cpu_notifier_register_begin();
39232
39233diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39234index fde97d6..3631eca 100644
39235--- a/drivers/cpufreq/cpufreq-dt.c
39236+++ b/drivers/cpufreq/cpufreq-dt.c
39237@@ -393,7 +393,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39238 if (!IS_ERR(cpu_reg))
39239 regulator_put(cpu_reg);
39240
39241- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39242+ pax_open_kernel();
39243+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39244+ pax_close_kernel();
39245
39246 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39247 if (ret)
39248diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39249index 7030c40..3a97de6 100644
39250--- a/drivers/cpufreq/cpufreq.c
39251+++ b/drivers/cpufreq/cpufreq.c
39252@@ -2135,7 +2135,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39253 }
39254
39255 mutex_lock(&cpufreq_governor_mutex);
39256- list_del(&governor->governor_list);
39257+ pax_list_del(&governor->governor_list);
39258 mutex_unlock(&cpufreq_governor_mutex);
39259 return;
39260 }
39261@@ -2351,7 +2351,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39262 return NOTIFY_OK;
39263 }
39264
39265-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39266+static struct notifier_block cpufreq_cpu_notifier = {
39267 .notifier_call = cpufreq_cpu_callback,
39268 };
39269
39270@@ -2391,13 +2391,17 @@ int cpufreq_boost_trigger_state(int state)
39271 return 0;
39272
39273 write_lock_irqsave(&cpufreq_driver_lock, flags);
39274- cpufreq_driver->boost_enabled = state;
39275+ pax_open_kernel();
39276+ *(bool *)&cpufreq_driver->boost_enabled = state;
39277+ pax_close_kernel();
39278 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39279
39280 ret = cpufreq_driver->set_boost(state);
39281 if (ret) {
39282 write_lock_irqsave(&cpufreq_driver_lock, flags);
39283- cpufreq_driver->boost_enabled = !state;
39284+ pax_open_kernel();
39285+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39286+ pax_close_kernel();
39287 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39288
39289 pr_err("%s: Cannot %s BOOST\n",
39290@@ -2454,8 +2458,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39291
39292 pr_debug("trying to register driver %s\n", driver_data->name);
39293
39294- if (driver_data->setpolicy)
39295- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39296+ if (driver_data->setpolicy) {
39297+ pax_open_kernel();
39298+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39299+ pax_close_kernel();
39300+ }
39301
39302 write_lock_irqsave(&cpufreq_driver_lock, flags);
39303 if (cpufreq_driver) {
39304@@ -2470,8 +2477,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39305 * Check if driver provides function to enable boost -
39306 * if not, use cpufreq_boost_set_sw as default
39307 */
39308- if (!cpufreq_driver->set_boost)
39309- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39310+ if (!cpufreq_driver->set_boost) {
39311+ pax_open_kernel();
39312+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39313+ pax_close_kernel();
39314+ }
39315
39316 ret = cpufreq_sysfs_create_file(&boost.attr);
39317 if (ret) {
39318diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39319index 1b44496..b80ff5e 100644
39320--- a/drivers/cpufreq/cpufreq_governor.c
39321+++ b/drivers/cpufreq/cpufreq_governor.c
39322@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39323 struct dbs_data *dbs_data;
39324 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39325 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39326- struct od_ops *od_ops = NULL;
39327+ const struct od_ops *od_ops = NULL;
39328 struct od_dbs_tuners *od_tuners = NULL;
39329 struct cs_dbs_tuners *cs_tuners = NULL;
39330 struct cpu_dbs_common_info *cpu_cdbs;
39331@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39332
39333 if ((cdata->governor == GOV_CONSERVATIVE) &&
39334 (!policy->governor->initialized)) {
39335- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39336+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39337
39338 cpufreq_register_notifier(cs_ops->notifier_block,
39339 CPUFREQ_TRANSITION_NOTIFIER);
39340@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39341
39342 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39343 (policy->governor->initialized == 1)) {
39344- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39345+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39346
39347 cpufreq_unregister_notifier(cs_ops->notifier_block,
39348 CPUFREQ_TRANSITION_NOTIFIER);
39349diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39350index cc401d1..8197340 100644
39351--- a/drivers/cpufreq/cpufreq_governor.h
39352+++ b/drivers/cpufreq/cpufreq_governor.h
39353@@ -212,7 +212,7 @@ struct common_dbs_data {
39354 void (*exit)(struct dbs_data *dbs_data);
39355
39356 /* Governor specific ops, see below */
39357- void *gov_ops;
39358+ const void *gov_ops;
39359 };
39360
39361 /* Governor Per policy data */
39362@@ -232,7 +232,7 @@ struct od_ops {
39363 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39364 unsigned int freq_next, unsigned int relation);
39365 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39366-};
39367+} __no_const;
39368
39369 struct cs_ops {
39370 struct notifier_block *notifier_block;
39371diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39372index ad3f38f..8f086cd 100644
39373--- a/drivers/cpufreq/cpufreq_ondemand.c
39374+++ b/drivers/cpufreq/cpufreq_ondemand.c
39375@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39376
39377 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39378
39379-static struct od_ops od_ops = {
39380+static struct od_ops od_ops __read_only = {
39381 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39382 .powersave_bias_target = generic_powersave_bias_target,
39383 .freq_increase = dbs_freq_increase,
39384@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39385 (struct cpufreq_policy *, unsigned int, unsigned int),
39386 unsigned int powersave_bias)
39387 {
39388- od_ops.powersave_bias_target = f;
39389+ pax_open_kernel();
39390+ *(void **)&od_ops.powersave_bias_target = f;
39391+ pax_close_kernel();
39392 od_set_powersave_bias(powersave_bias);
39393 }
39394 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39395
39396 void od_unregister_powersave_bias_handler(void)
39397 {
39398- od_ops.powersave_bias_target = generic_powersave_bias_target;
39399+ pax_open_kernel();
39400+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39401+ pax_close_kernel();
39402 od_set_powersave_bias(0);
39403 }
39404 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39405diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39406index 742eefb..e2fcfc8 100644
39407--- a/drivers/cpufreq/intel_pstate.c
39408+++ b/drivers/cpufreq/intel_pstate.c
39409@@ -133,10 +133,10 @@ struct pstate_funcs {
39410 struct cpu_defaults {
39411 struct pstate_adjust_policy pid_policy;
39412 struct pstate_funcs funcs;
39413-};
39414+} __do_const;
39415
39416 static struct pstate_adjust_policy pid_params;
39417-static struct pstate_funcs pstate_funcs;
39418+static struct pstate_funcs *pstate_funcs;
39419 static int hwp_active;
39420
39421 struct perf_limits {
39422@@ -653,18 +653,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39423
39424 cpu->pstate.current_pstate = pstate;
39425
39426- pstate_funcs.set(cpu, pstate);
39427+ pstate_funcs->set(cpu, pstate);
39428 }
39429
39430 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39431 {
39432- cpu->pstate.min_pstate = pstate_funcs.get_min();
39433- cpu->pstate.max_pstate = pstate_funcs.get_max();
39434- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39435- cpu->pstate.scaling = pstate_funcs.get_scaling();
39436+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39437+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39438+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39439+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39440
39441- if (pstate_funcs.get_vid)
39442- pstate_funcs.get_vid(cpu);
39443+ if (pstate_funcs->get_vid)
39444+ pstate_funcs->get_vid(cpu);
39445 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39446 }
39447
39448@@ -988,9 +988,9 @@ static int intel_pstate_msrs_not_valid(void)
39449 rdmsrl(MSR_IA32_APERF, aperf);
39450 rdmsrl(MSR_IA32_MPERF, mperf);
39451
39452- if (!pstate_funcs.get_max() ||
39453- !pstate_funcs.get_min() ||
39454- !pstate_funcs.get_turbo())
39455+ if (!pstate_funcs->get_max() ||
39456+ !pstate_funcs->get_min() ||
39457+ !pstate_funcs->get_turbo())
39458 return -ENODEV;
39459
39460 rdmsrl(MSR_IA32_APERF, tmp);
39461@@ -1004,7 +1004,7 @@ static int intel_pstate_msrs_not_valid(void)
39462 return 0;
39463 }
39464
39465-static void copy_pid_params(struct pstate_adjust_policy *policy)
39466+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39467 {
39468 pid_params.sample_rate_ms = policy->sample_rate_ms;
39469 pid_params.p_gain_pct = policy->p_gain_pct;
39470@@ -1016,12 +1016,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39471
39472 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39473 {
39474- pstate_funcs.get_max = funcs->get_max;
39475- pstate_funcs.get_min = funcs->get_min;
39476- pstate_funcs.get_turbo = funcs->get_turbo;
39477- pstate_funcs.get_scaling = funcs->get_scaling;
39478- pstate_funcs.set = funcs->set;
39479- pstate_funcs.get_vid = funcs->get_vid;
39480+ pstate_funcs = funcs;
39481 }
39482
39483 #if IS_ENABLED(CONFIG_ACPI)
39484diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39485index 529cfd9..0e28fff 100644
39486--- a/drivers/cpufreq/p4-clockmod.c
39487+++ b/drivers/cpufreq/p4-clockmod.c
39488@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39489 case 0x0F: /* Core Duo */
39490 case 0x16: /* Celeron Core */
39491 case 0x1C: /* Atom */
39492- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39493+ pax_open_kernel();
39494+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39495+ pax_close_kernel();
39496 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39497 case 0x0D: /* Pentium M (Dothan) */
39498- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39499+ pax_open_kernel();
39500+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39501+ pax_close_kernel();
39502 /* fall through */
39503 case 0x09: /* Pentium M (Banias) */
39504 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39505@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39506
39507 /* on P-4s, the TSC runs with constant frequency independent whether
39508 * throttling is active or not. */
39509- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39510+ pax_open_kernel();
39511+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39512+ pax_close_kernel();
39513
39514 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39515 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39516diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39517index 9bb42ba..b01b4a2 100644
39518--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39519+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39520@@ -18,14 +18,12 @@
39521 #include <asm/head.h>
39522 #include <asm/timer.h>
39523
39524-static struct cpufreq_driver *cpufreq_us3_driver;
39525-
39526 struct us3_freq_percpu_info {
39527 struct cpufreq_frequency_table table[4];
39528 };
39529
39530 /* Indexed by cpu number. */
39531-static struct us3_freq_percpu_info *us3_freq_table;
39532+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39533
39534 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39535 * in the Safari config register.
39536@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39537
39538 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39539 {
39540- if (cpufreq_us3_driver)
39541- us3_freq_target(policy, 0);
39542+ us3_freq_target(policy, 0);
39543
39544 return 0;
39545 }
39546
39547+static int __init us3_freq_init(void);
39548+static void __exit us3_freq_exit(void);
39549+
39550+static struct cpufreq_driver cpufreq_us3_driver = {
39551+ .init = us3_freq_cpu_init,
39552+ .verify = cpufreq_generic_frequency_table_verify,
39553+ .target_index = us3_freq_target,
39554+ .get = us3_freq_get,
39555+ .exit = us3_freq_cpu_exit,
39556+ .name = "UltraSPARC-III",
39557+
39558+};
39559+
39560 static int __init us3_freq_init(void)
39561 {
39562 unsigned long manuf, impl, ver;
39563- int ret;
39564
39565 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39566 return -ENODEV;
39567@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39568 (impl == CHEETAH_IMPL ||
39569 impl == CHEETAH_PLUS_IMPL ||
39570 impl == JAGUAR_IMPL ||
39571- impl == PANTHER_IMPL)) {
39572- struct cpufreq_driver *driver;
39573-
39574- ret = -ENOMEM;
39575- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39576- if (!driver)
39577- goto err_out;
39578-
39579- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39580- GFP_KERNEL);
39581- if (!us3_freq_table)
39582- goto err_out;
39583-
39584- driver->init = us3_freq_cpu_init;
39585- driver->verify = cpufreq_generic_frequency_table_verify;
39586- driver->target_index = us3_freq_target;
39587- driver->get = us3_freq_get;
39588- driver->exit = us3_freq_cpu_exit;
39589- strcpy(driver->name, "UltraSPARC-III");
39590-
39591- cpufreq_us3_driver = driver;
39592- ret = cpufreq_register_driver(driver);
39593- if (ret)
39594- goto err_out;
39595-
39596- return 0;
39597-
39598-err_out:
39599- if (driver) {
39600- kfree(driver);
39601- cpufreq_us3_driver = NULL;
39602- }
39603- kfree(us3_freq_table);
39604- us3_freq_table = NULL;
39605- return ret;
39606- }
39607+ impl == PANTHER_IMPL))
39608+ return cpufreq_register_driver(&cpufreq_us3_driver);
39609
39610 return -ENODEV;
39611 }
39612
39613 static void __exit us3_freq_exit(void)
39614 {
39615- if (cpufreq_us3_driver) {
39616- cpufreq_unregister_driver(cpufreq_us3_driver);
39617- kfree(cpufreq_us3_driver);
39618- cpufreq_us3_driver = NULL;
39619- kfree(us3_freq_table);
39620- us3_freq_table = NULL;
39621- }
39622+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39623 }
39624
39625 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39626diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39627index 7d4a315..21bb886 100644
39628--- a/drivers/cpufreq/speedstep-centrino.c
39629+++ b/drivers/cpufreq/speedstep-centrino.c
39630@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39631 !cpu_has(cpu, X86_FEATURE_EST))
39632 return -ENODEV;
39633
39634- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39635- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39636+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39637+ pax_open_kernel();
39638+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39639+ pax_close_kernel();
39640+ }
39641
39642 if (policy->cpu != 0)
39643 return -ENODEV;
39644diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39645index 2697e87..c32476c 100644
39646--- a/drivers/cpuidle/driver.c
39647+++ b/drivers/cpuidle/driver.c
39648@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39649
39650 static void poll_idle_init(struct cpuidle_driver *drv)
39651 {
39652- struct cpuidle_state *state = &drv->states[0];
39653+ cpuidle_state_no_const *state = &drv->states[0];
39654
39655 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39656 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39657diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39658index fb9f511..213e6cc 100644
39659--- a/drivers/cpuidle/governor.c
39660+++ b/drivers/cpuidle/governor.c
39661@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39662 mutex_lock(&cpuidle_lock);
39663 if (__cpuidle_find_governor(gov->name) == NULL) {
39664 ret = 0;
39665- list_add_tail(&gov->governor_list, &cpuidle_governors);
39666+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39667 if (!cpuidle_curr_governor ||
39668 cpuidle_curr_governor->rating < gov->rating)
39669 cpuidle_switch_governor(gov);
39670diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39671index 97c5903..023ad23 100644
39672--- a/drivers/cpuidle/sysfs.c
39673+++ b/drivers/cpuidle/sysfs.c
39674@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39675 NULL
39676 };
39677
39678-static struct attribute_group cpuidle_attr_group = {
39679+static attribute_group_no_const cpuidle_attr_group = {
39680 .attrs = cpuidle_default_attrs,
39681 .name = "cpuidle",
39682 };
39683diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39684index 8d2a772..33826c9 100644
39685--- a/drivers/crypto/hifn_795x.c
39686+++ b/drivers/crypto/hifn_795x.c
39687@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39688 MODULE_PARM_DESC(hifn_pll_ref,
39689 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39690
39691-static atomic_t hifn_dev_number;
39692+static atomic_unchecked_t hifn_dev_number;
39693
39694 #define ACRYPTO_OP_DECRYPT 0
39695 #define ACRYPTO_OP_ENCRYPT 1
39696@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39697 goto err_out_disable_pci_device;
39698
39699 snprintf(name, sizeof(name), "hifn%d",
39700- atomic_inc_return(&hifn_dev_number)-1);
39701+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39702
39703 err = pci_request_regions(pdev, name);
39704 if (err)
39705diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39706index 30b538d8..1610d75 100644
39707--- a/drivers/devfreq/devfreq.c
39708+++ b/drivers/devfreq/devfreq.c
39709@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39710 goto err_out;
39711 }
39712
39713- list_add(&governor->node, &devfreq_governor_list);
39714+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39715
39716 list_for_each_entry(devfreq, &devfreq_list, node) {
39717 int ret = 0;
39718@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39719 }
39720 }
39721
39722- list_del(&governor->node);
39723+ pax_list_del((struct list_head *)&governor->node);
39724 err_out:
39725 mutex_unlock(&devfreq_list_lock);
39726
39727diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39728index 3a2adb1..b3be9a3 100644
39729--- a/drivers/dma/sh/shdma-base.c
39730+++ b/drivers/dma/sh/shdma-base.c
39731@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39732 schan->slave_id = -EINVAL;
39733 }
39734
39735- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39736- sdev->desc_size, GFP_KERNEL);
39737+ schan->desc = kcalloc(sdev->desc_size,
39738+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39739 if (!schan->desc) {
39740 ret = -ENOMEM;
39741 goto edescalloc;
39742diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39743index aec8a84..7b45a1f 100644
39744--- a/drivers/dma/sh/shdmac.c
39745+++ b/drivers/dma/sh/shdmac.c
39746@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39747 return ret;
39748 }
39749
39750-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39751+static struct notifier_block sh_dmae_nmi_notifier = {
39752 .notifier_call = sh_dmae_nmi_handler,
39753
39754 /* Run before NMI debug handler and KGDB */
39755diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39756index 592af5f..bb1d583 100644
39757--- a/drivers/edac/edac_device.c
39758+++ b/drivers/edac/edac_device.c
39759@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39760 */
39761 int edac_device_alloc_index(void)
39762 {
39763- static atomic_t device_indexes = ATOMIC_INIT(0);
39764+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39765
39766- return atomic_inc_return(&device_indexes) - 1;
39767+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39768 }
39769 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39770
39771diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39772index 670d282..6675f4d 100644
39773--- a/drivers/edac/edac_mc_sysfs.c
39774+++ b/drivers/edac/edac_mc_sysfs.c
39775@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
39776 struct dev_ch_attribute {
39777 struct device_attribute attr;
39778 int channel;
39779-};
39780+} __do_const;
39781
39782 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39783 struct dev_ch_attribute dev_attr_legacy_##_name = \
39784@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39785 }
39786
39787 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39788+ pax_open_kernel();
39789 if (mci->get_sdram_scrub_rate) {
39790- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39791- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39792+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39793+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39794 }
39795 if (mci->set_sdram_scrub_rate) {
39796- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39797- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39798+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39799+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39800 }
39801+ pax_close_kernel();
39802 err = device_create_file(&mci->dev,
39803 &dev_attr_sdram_scrub_rate);
39804 if (err) {
39805diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39806index 2cf44b4d..6dd2dc7 100644
39807--- a/drivers/edac/edac_pci.c
39808+++ b/drivers/edac/edac_pci.c
39809@@ -29,7 +29,7 @@
39810
39811 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39812 static LIST_HEAD(edac_pci_list);
39813-static atomic_t pci_indexes = ATOMIC_INIT(0);
39814+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39815
39816 /*
39817 * edac_pci_alloc_ctl_info
39818@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39819 */
39820 int edac_pci_alloc_index(void)
39821 {
39822- return atomic_inc_return(&pci_indexes) - 1;
39823+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
39824 }
39825 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39826
39827diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39828index 24d877f..4e30133 100644
39829--- a/drivers/edac/edac_pci_sysfs.c
39830+++ b/drivers/edac/edac_pci_sysfs.c
39831@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39832 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39833 static int edac_pci_poll_msec = 1000; /* one second workq period */
39834
39835-static atomic_t pci_parity_count = ATOMIC_INIT(0);
39836-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39837+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
39838+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
39839
39840 static struct kobject *edac_pci_top_main_kobj;
39841 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
39842@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
39843 void *value;
39844 ssize_t(*show) (void *, char *);
39845 ssize_t(*store) (void *, const char *, size_t);
39846-};
39847+} __do_const;
39848
39849 /* Set of show/store abstract level functions for PCI Parity object */
39850 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
39851@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39852 edac_printk(KERN_CRIT, EDAC_PCI,
39853 "Signaled System Error on %s\n",
39854 pci_name(dev));
39855- atomic_inc(&pci_nonparity_count);
39856+ atomic_inc_unchecked(&pci_nonparity_count);
39857 }
39858
39859 if (status & (PCI_STATUS_PARITY)) {
39860@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39861 "Master Data Parity Error on %s\n",
39862 pci_name(dev));
39863
39864- atomic_inc(&pci_parity_count);
39865+ atomic_inc_unchecked(&pci_parity_count);
39866 }
39867
39868 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39869@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39870 "Detected Parity Error on %s\n",
39871 pci_name(dev));
39872
39873- atomic_inc(&pci_parity_count);
39874+ atomic_inc_unchecked(&pci_parity_count);
39875 }
39876 }
39877
39878@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39879 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
39880 "Signaled System Error on %s\n",
39881 pci_name(dev));
39882- atomic_inc(&pci_nonparity_count);
39883+ atomic_inc_unchecked(&pci_nonparity_count);
39884 }
39885
39886 if (status & (PCI_STATUS_PARITY)) {
39887@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39888 "Master Data Parity Error on "
39889 "%s\n", pci_name(dev));
39890
39891- atomic_inc(&pci_parity_count);
39892+ atomic_inc_unchecked(&pci_parity_count);
39893 }
39894
39895 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39896@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39897 "Detected Parity Error on %s\n",
39898 pci_name(dev));
39899
39900- atomic_inc(&pci_parity_count);
39901+ atomic_inc_unchecked(&pci_parity_count);
39902 }
39903 }
39904 }
39905@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
39906 if (!check_pci_errors)
39907 return;
39908
39909- before_count = atomic_read(&pci_parity_count);
39910+ before_count = atomic_read_unchecked(&pci_parity_count);
39911
39912 /* scan all PCI devices looking for a Parity Error on devices and
39913 * bridges.
39914@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
39915 /* Only if operator has selected panic on PCI Error */
39916 if (edac_pci_get_panic_on_pe()) {
39917 /* If the count is different 'after' from 'before' */
39918- if (before_count != atomic_read(&pci_parity_count))
39919+ if (before_count != atomic_read_unchecked(&pci_parity_count))
39920 panic("EDAC: PCI Parity Error");
39921 }
39922 }
39923diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
39924index c2359a1..8bd119d 100644
39925--- a/drivers/edac/mce_amd.h
39926+++ b/drivers/edac/mce_amd.h
39927@@ -74,7 +74,7 @@ struct amd_decoder_ops {
39928 bool (*mc0_mce)(u16, u8);
39929 bool (*mc1_mce)(u16, u8);
39930 bool (*mc2_mce)(u16, u8);
39931-};
39932+} __no_const;
39933
39934 void amd_report_gart_errors(bool);
39935 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
39936diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
39937index 57ea7f4..af06b76 100644
39938--- a/drivers/firewire/core-card.c
39939+++ b/drivers/firewire/core-card.c
39940@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
39941 const struct fw_card_driver *driver,
39942 struct device *device)
39943 {
39944- static atomic_t index = ATOMIC_INIT(-1);
39945+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
39946
39947- card->index = atomic_inc_return(&index);
39948+ card->index = atomic_inc_return_unchecked(&index);
39949 card->driver = driver;
39950 card->device = device;
39951 card->current_tlabel = 0;
39952@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
39953
39954 void fw_core_remove_card(struct fw_card *card)
39955 {
39956- struct fw_card_driver dummy_driver = dummy_driver_template;
39957+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
39958
39959 card->driver->update_phy_reg(card, 4,
39960 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
39961diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
39962index f9e3aee..269dbdb 100644
39963--- a/drivers/firewire/core-device.c
39964+++ b/drivers/firewire/core-device.c
39965@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
39966 struct config_rom_attribute {
39967 struct device_attribute attr;
39968 u32 key;
39969-};
39970+} __do_const;
39971
39972 static ssize_t show_immediate(struct device *dev,
39973 struct device_attribute *dattr, char *buf)
39974diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
39975index eb6935c..3cc2bfa 100644
39976--- a/drivers/firewire/core-transaction.c
39977+++ b/drivers/firewire/core-transaction.c
39978@@ -38,6 +38,7 @@
39979 #include <linux/timer.h>
39980 #include <linux/types.h>
39981 #include <linux/workqueue.h>
39982+#include <linux/sched.h>
39983
39984 #include <asm/byteorder.h>
39985
39986diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
39987index e1480ff6..1a429bd 100644
39988--- a/drivers/firewire/core.h
39989+++ b/drivers/firewire/core.h
39990@@ -111,6 +111,7 @@ struct fw_card_driver {
39991
39992 int (*stop_iso)(struct fw_iso_context *ctx);
39993 };
39994+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
39995
39996 void fw_card_initialize(struct fw_card *card,
39997 const struct fw_card_driver *driver, struct device *device);
39998diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
39999index aff9018..fc87ded 100644
40000--- a/drivers/firewire/ohci.c
40001+++ b/drivers/firewire/ohci.c
40002@@ -2054,10 +2054,12 @@ static void bus_reset_work(struct work_struct *work)
40003 be32_to_cpu(ohci->next_header));
40004 }
40005
40006+#ifndef CONFIG_GRKERNSEC
40007 if (param_remote_dma) {
40008 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40009 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40010 }
40011+#endif
40012
40013 spin_unlock_irq(&ohci->lock);
40014
40015@@ -2589,8 +2591,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40016 unsigned long flags;
40017 int n, ret = 0;
40018
40019+#ifndef CONFIG_GRKERNSEC
40020 if (param_remote_dma)
40021 return 0;
40022+#endif
40023
40024 /*
40025 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40026diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40027index 94a58a0..f5eba42 100644
40028--- a/drivers/firmware/dmi-id.c
40029+++ b/drivers/firmware/dmi-id.c
40030@@ -16,7 +16,7 @@
40031 struct dmi_device_attribute{
40032 struct device_attribute dev_attr;
40033 int field;
40034-};
40035+} __do_const;
40036 #define to_dmi_dev_attr(_dev_attr) \
40037 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40038
40039diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40040index 69fac06..820f0c9a 100644
40041--- a/drivers/firmware/dmi_scan.c
40042+++ b/drivers/firmware/dmi_scan.c
40043@@ -901,7 +901,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40044 if (buf == NULL)
40045 return -1;
40046
40047- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40048+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40049
40050 dmi_unmap(buf);
40051 return 0;
40052diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40053index 4fd9961..52d60ce 100644
40054--- a/drivers/firmware/efi/cper.c
40055+++ b/drivers/firmware/efi/cper.c
40056@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40057 */
40058 u64 cper_next_record_id(void)
40059 {
40060- static atomic64_t seq;
40061+ static atomic64_unchecked_t seq;
40062
40063- if (!atomic64_read(&seq))
40064- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40065+ if (!atomic64_read_unchecked(&seq))
40066+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40067
40068- return atomic64_inc_return(&seq);
40069+ return atomic64_inc_return_unchecked(&seq);
40070 }
40071 EXPORT_SYMBOL_GPL(cper_next_record_id);
40072
40073diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40074index 9035c1b..aff45f8 100644
40075--- a/drivers/firmware/efi/efi.c
40076+++ b/drivers/firmware/efi/efi.c
40077@@ -151,14 +151,16 @@ static struct attribute_group efi_subsys_attr_group = {
40078 };
40079
40080 static struct efivars generic_efivars;
40081-static struct efivar_operations generic_ops;
40082+static efivar_operations_no_const generic_ops __read_only;
40083
40084 static int generic_ops_register(void)
40085 {
40086- generic_ops.get_variable = efi.get_variable;
40087- generic_ops.set_variable = efi.set_variable;
40088- generic_ops.get_next_variable = efi.get_next_variable;
40089- generic_ops.query_variable_store = efi_query_variable_store;
40090+ pax_open_kernel();
40091+ *(void **)&generic_ops.get_variable = efi.get_variable;
40092+ *(void **)&generic_ops.set_variable = efi.set_variable;
40093+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40094+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40095+ pax_close_kernel();
40096
40097 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40098 }
40099diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40100index f256ecd..387dcb1 100644
40101--- a/drivers/firmware/efi/efivars.c
40102+++ b/drivers/firmware/efi/efivars.c
40103@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40104 static int
40105 create_efivars_bin_attributes(void)
40106 {
40107- struct bin_attribute *attr;
40108+ bin_attribute_no_const *attr;
40109 int error;
40110
40111 /* new_var */
40112diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40113index 2f569aa..c95f4fb 100644
40114--- a/drivers/firmware/google/memconsole.c
40115+++ b/drivers/firmware/google/memconsole.c
40116@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40117 if (!found_memconsole())
40118 return -ENODEV;
40119
40120- memconsole_bin_attr.size = memconsole_length;
40121+ pax_open_kernel();
40122+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40123+ pax_close_kernel();
40124+
40125 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40126 }
40127
40128diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40129index 3cfcfc6..09d6f117 100644
40130--- a/drivers/gpio/gpio-em.c
40131+++ b/drivers/gpio/gpio-em.c
40132@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40133 struct em_gio_priv *p;
40134 struct resource *io[2], *irq[2];
40135 struct gpio_chip *gpio_chip;
40136- struct irq_chip *irq_chip;
40137+ irq_chip_no_const *irq_chip;
40138 const char *name = dev_name(&pdev->dev);
40139 int ret;
40140
40141diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40142index 7818cd1..1be40e5 100644
40143--- a/drivers/gpio/gpio-ich.c
40144+++ b/drivers/gpio/gpio-ich.c
40145@@ -94,7 +94,7 @@ struct ichx_desc {
40146 * this option allows driver caching written output values
40147 */
40148 bool use_outlvl_cache;
40149-};
40150+} __do_const;
40151
40152 static struct {
40153 spinlock_t lock;
40154diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40155index f476ae2..05e1bdd 100644
40156--- a/drivers/gpio/gpio-omap.c
40157+++ b/drivers/gpio/gpio-omap.c
40158@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40159 const struct omap_gpio_platform_data *pdata;
40160 struct resource *res;
40161 struct gpio_bank *bank;
40162- struct irq_chip *irqc;
40163+ irq_chip_no_const *irqc;
40164 int ret;
40165
40166 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40167diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40168index 584484e..e26ebd6 100644
40169--- a/drivers/gpio/gpio-rcar.c
40170+++ b/drivers/gpio/gpio-rcar.c
40171@@ -366,7 +366,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40172 struct gpio_rcar_priv *p;
40173 struct resource *io, *irq;
40174 struct gpio_chip *gpio_chip;
40175- struct irq_chip *irq_chip;
40176+ irq_chip_no_const *irq_chip;
40177 struct device *dev = &pdev->dev;
40178 const char *name = dev_name(dev);
40179 int ret;
40180diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40181index c1caa45..f0f97d2 100644
40182--- a/drivers/gpio/gpio-vr41xx.c
40183+++ b/drivers/gpio/gpio-vr41xx.c
40184@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40185 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40186 maskl, pendl, maskh, pendh);
40187
40188- atomic_inc(&irq_err_count);
40189+ atomic_inc_unchecked(&irq_err_count);
40190
40191 return -EINVAL;
40192 }
40193diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40194index 568aa2b..d1204d8 100644
40195--- a/drivers/gpio/gpiolib.c
40196+++ b/drivers/gpio/gpiolib.c
40197@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40198 }
40199
40200 if (gpiochip->irqchip) {
40201- gpiochip->irqchip->irq_request_resources = NULL;
40202- gpiochip->irqchip->irq_release_resources = NULL;
40203+ pax_open_kernel();
40204+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40205+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40206+ pax_close_kernel();
40207 gpiochip->irqchip = NULL;
40208 }
40209 }
40210@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40211 gpiochip->irqchip = NULL;
40212 return -EINVAL;
40213 }
40214- irqchip->irq_request_resources = gpiochip_irq_reqres;
40215- irqchip->irq_release_resources = gpiochip_irq_relres;
40216+
40217+ pax_open_kernel();
40218+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40219+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40220+ pax_close_kernel();
40221
40222 /*
40223 * Prepare the mapping since the irqchip shall be orthogonal to
40224diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40225index 29168fa..c9baec6 100644
40226--- a/drivers/gpu/drm/drm_crtc.c
40227+++ b/drivers/gpu/drm/drm_crtc.c
40228@@ -3964,7 +3964,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40229 goto done;
40230 }
40231
40232- if (copy_to_user(&enum_ptr[copied].name,
40233+ if (copy_to_user(enum_ptr[copied].name,
40234 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40235 ret = -EFAULT;
40236 goto done;
40237diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40238index 4f41377..ee33f40 100644
40239--- a/drivers/gpu/drm/drm_drv.c
40240+++ b/drivers/gpu/drm/drm_drv.c
40241@@ -444,7 +444,7 @@ void drm_unplug_dev(struct drm_device *dev)
40242
40243 drm_device_set_unplugged(dev);
40244
40245- if (dev->open_count == 0) {
40246+ if (local_read(&dev->open_count) == 0) {
40247 drm_put_dev(dev);
40248 }
40249 mutex_unlock(&drm_global_mutex);
40250diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40251index 0b9514b..6acd174 100644
40252--- a/drivers/gpu/drm/drm_fops.c
40253+++ b/drivers/gpu/drm/drm_fops.c
40254@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40255 return PTR_ERR(minor);
40256
40257 dev = minor->dev;
40258- if (!dev->open_count++)
40259+ if (local_inc_return(&dev->open_count) == 1)
40260 need_setup = 1;
40261
40262 /* share address_space across all char-devs of a single device */
40263@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40264 return 0;
40265
40266 err_undo:
40267- dev->open_count--;
40268+ local_dec(&dev->open_count);
40269 drm_minor_release(minor);
40270 return retcode;
40271 }
40272@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40273
40274 mutex_lock(&drm_global_mutex);
40275
40276- DRM_DEBUG("open_count = %d\n", dev->open_count);
40277+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40278
40279 mutex_lock(&dev->struct_mutex);
40280 list_del(&file_priv->lhead);
40281@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40282 * Begin inline drm_release
40283 */
40284
40285- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40286+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40287 task_pid_nr(current),
40288 (long)old_encode_dev(file_priv->minor->kdev->devt),
40289- dev->open_count);
40290+ local_read(&dev->open_count));
40291
40292 /* Release any auth tokens that might point to this file_priv,
40293 (do that under the drm_global_mutex) */
40294@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40295 * End inline drm_release
40296 */
40297
40298- if (!--dev->open_count) {
40299+ if (local_dec_and_test(&dev->open_count)) {
40300 retcode = drm_lastclose(dev);
40301 if (drm_device_is_unplugged(dev))
40302 drm_put_dev(dev);
40303diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40304index 3d2e91c..d31c4c9 100644
40305--- a/drivers/gpu/drm/drm_global.c
40306+++ b/drivers/gpu/drm/drm_global.c
40307@@ -36,7 +36,7 @@
40308 struct drm_global_item {
40309 struct mutex mutex;
40310 void *object;
40311- int refcount;
40312+ atomic_t refcount;
40313 };
40314
40315 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40316@@ -49,7 +49,7 @@ void drm_global_init(void)
40317 struct drm_global_item *item = &glob[i];
40318 mutex_init(&item->mutex);
40319 item->object = NULL;
40320- item->refcount = 0;
40321+ atomic_set(&item->refcount, 0);
40322 }
40323 }
40324
40325@@ -59,7 +59,7 @@ void drm_global_release(void)
40326 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40327 struct drm_global_item *item = &glob[i];
40328 BUG_ON(item->object != NULL);
40329- BUG_ON(item->refcount != 0);
40330+ BUG_ON(atomic_read(&item->refcount) != 0);
40331 }
40332 }
40333
40334@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40335 struct drm_global_item *item = &glob[ref->global_type];
40336
40337 mutex_lock(&item->mutex);
40338- if (item->refcount == 0) {
40339+ if (atomic_read(&item->refcount) == 0) {
40340 item->object = kzalloc(ref->size, GFP_KERNEL);
40341 if (unlikely(item->object == NULL)) {
40342 ret = -ENOMEM;
40343@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40344 goto out_err;
40345
40346 }
40347- ++item->refcount;
40348+ atomic_inc(&item->refcount);
40349 ref->object = item->object;
40350 mutex_unlock(&item->mutex);
40351 return 0;
40352@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40353 struct drm_global_item *item = &glob[ref->global_type];
40354
40355 mutex_lock(&item->mutex);
40356- BUG_ON(item->refcount == 0);
40357+ BUG_ON(atomic_read(&item->refcount) == 0);
40358 BUG_ON(ref->object != item->object);
40359- if (--item->refcount == 0) {
40360+ if (atomic_dec_and_test(&item->refcount)) {
40361 ref->release(ref);
40362 item->object = NULL;
40363 }
40364diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40365index 51efebd..2b70935 100644
40366--- a/drivers/gpu/drm/drm_info.c
40367+++ b/drivers/gpu/drm/drm_info.c
40368@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40369 struct drm_local_map *map;
40370 struct drm_map_list *r_list;
40371
40372- /* Hardcoded from _DRM_FRAME_BUFFER,
40373- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40374- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40375- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40376+ static const char * const types[] = {
40377+ [_DRM_FRAME_BUFFER] = "FB",
40378+ [_DRM_REGISTERS] = "REG",
40379+ [_DRM_SHM] = "SHM",
40380+ [_DRM_AGP] = "AGP",
40381+ [_DRM_SCATTER_GATHER] = "SG",
40382+ [_DRM_CONSISTENT] = "PCI"};
40383 const char *type;
40384 int i;
40385
40386@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40387 map = r_list->map;
40388 if (!map)
40389 continue;
40390- if (map->type < 0 || map->type > 5)
40391+ if (map->type >= ARRAY_SIZE(types))
40392 type = "??";
40393 else
40394 type = types[map->type];
40395diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40396index 2f4c4343..dd12cd2 100644
40397--- a/drivers/gpu/drm/drm_ioc32.c
40398+++ b/drivers/gpu/drm/drm_ioc32.c
40399@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40400 request = compat_alloc_user_space(nbytes);
40401 if (!access_ok(VERIFY_WRITE, request, nbytes))
40402 return -EFAULT;
40403- list = (struct drm_buf_desc *) (request + 1);
40404+ list = (struct drm_buf_desc __user *) (request + 1);
40405
40406 if (__put_user(count, &request->count)
40407 || __put_user(list, &request->list))
40408@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40409 request = compat_alloc_user_space(nbytes);
40410 if (!access_ok(VERIFY_WRITE, request, nbytes))
40411 return -EFAULT;
40412- list = (struct drm_buf_pub *) (request + 1);
40413+ list = (struct drm_buf_pub __user *) (request + 1);
40414
40415 if (__put_user(count, &request->count)
40416 || __put_user(list, &request->list))
40417@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40418 return 0;
40419 }
40420
40421-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40422+drm_ioctl_compat_t drm_compat_ioctls[] = {
40423 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40424 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40425 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40426@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40427 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40428 {
40429 unsigned int nr = DRM_IOCTL_NR(cmd);
40430- drm_ioctl_compat_t *fn;
40431 int ret;
40432
40433 /* Assume that ioctls without an explicit compat routine will just
40434@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40435 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40436 return drm_ioctl(filp, cmd, arg);
40437
40438- fn = drm_compat_ioctls[nr];
40439-
40440- if (fn != NULL)
40441- ret = (*fn) (filp, cmd, arg);
40442+ if (drm_compat_ioctls[nr] != NULL)
40443+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40444 else
40445 ret = drm_ioctl(filp, cmd, arg);
40446
40447diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40448index 00587a1..57a65ca 100644
40449--- a/drivers/gpu/drm/drm_ioctl.c
40450+++ b/drivers/gpu/drm/drm_ioctl.c
40451@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
40452 struct drm_file *file_priv = filp->private_data;
40453 struct drm_device *dev;
40454 const struct drm_ioctl_desc *ioctl = NULL;
40455- drm_ioctl_t *func;
40456+ drm_ioctl_no_const_t func;
40457 unsigned int nr = DRM_IOCTL_NR(cmd);
40458 int retcode = -EINVAL;
40459 char stack_kdata[128];
40460diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40461index 93ec5dc..82acbaf 100644
40462--- a/drivers/gpu/drm/i810/i810_drv.h
40463+++ b/drivers/gpu/drm/i810/i810_drv.h
40464@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
40465 int page_flipping;
40466
40467 wait_queue_head_t irq_queue;
40468- atomic_t irq_received;
40469- atomic_t irq_emitted;
40470+ atomic_unchecked_t irq_received;
40471+ atomic_unchecked_t irq_emitted;
40472
40473 int front_offset;
40474 } drm_i810_private_t;
40475diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40476index ecee3bc..ad5ae67 100644
40477--- a/drivers/gpu/drm/i915/i915_dma.c
40478+++ b/drivers/gpu/drm/i915/i915_dma.c
40479@@ -356,7 +356,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40480 * locking inversion with the driver load path. And the access here is
40481 * completely racy anyway. So don't bother with locking for now.
40482 */
40483- return dev->open_count == 0;
40484+ return local_read(&dev->open_count) == 0;
40485 }
40486
40487 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
40488diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40489index 1173831..7dfb389 100644
40490--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40491+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40492@@ -863,12 +863,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40493 static int
40494 validate_exec_list(struct drm_device *dev,
40495 struct drm_i915_gem_exec_object2 *exec,
40496- int count)
40497+ unsigned int count)
40498 {
40499 unsigned relocs_total = 0;
40500 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40501 unsigned invalid_flags;
40502- int i;
40503+ unsigned int i;
40504
40505 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
40506 if (USES_FULL_PPGTT(dev))
40507diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40508index 176de63..1ef9ac7 100644
40509--- a/drivers/gpu/drm/i915/i915_ioc32.c
40510+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40511@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40512 (unsigned long)request);
40513 }
40514
40515-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40516+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40517 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40518 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40519 [DRM_I915_GETPARAM] = compat_i915_getparam,
40520@@ -201,18 +201,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40521 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40522 {
40523 unsigned int nr = DRM_IOCTL_NR(cmd);
40524- drm_ioctl_compat_t *fn = NULL;
40525 int ret;
40526
40527 if (nr < DRM_COMMAND_BASE)
40528 return drm_compat_ioctl(filp, cmd, arg);
40529
40530- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
40531- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40532-
40533- if (fn != NULL)
40534+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
40535+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40536 ret = (*fn) (filp, cmd, arg);
40537- else
40538+ } else
40539 ret = drm_ioctl(filp, cmd, arg);
40540
40541 return ret;
40542diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40543index c10b52e..e5e27ff 100644
40544--- a/drivers/gpu/drm/i915/intel_display.c
40545+++ b/drivers/gpu/drm/i915/intel_display.c
40546@@ -12935,13 +12935,13 @@ struct intel_quirk {
40547 int subsystem_vendor;
40548 int subsystem_device;
40549 void (*hook)(struct drm_device *dev);
40550-};
40551+} __do_const;
40552
40553 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40554 struct intel_dmi_quirk {
40555 void (*hook)(struct drm_device *dev);
40556 const struct dmi_system_id (*dmi_id_list)[];
40557-};
40558+} __do_const;
40559
40560 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40561 {
40562@@ -12949,18 +12949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40563 return 1;
40564 }
40565
40566-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40567+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40568 {
40569- .dmi_id_list = &(const struct dmi_system_id[]) {
40570- {
40571- .callback = intel_dmi_reverse_brightness,
40572- .ident = "NCR Corporation",
40573- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40574- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40575- },
40576- },
40577- { } /* terminating entry */
40578+ .callback = intel_dmi_reverse_brightness,
40579+ .ident = "NCR Corporation",
40580+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40581+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40582 },
40583+ },
40584+ { } /* terminating entry */
40585+};
40586+
40587+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40588+ {
40589+ .dmi_id_list = &intel_dmi_quirks_table,
40590 .hook = quirk_invert_brightness,
40591 },
40592 };
40593diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
40594index b250130..98df2a4 100644
40595--- a/drivers/gpu/drm/imx/imx-drm-core.c
40596+++ b/drivers/gpu/drm/imx/imx-drm-core.c
40597@@ -356,7 +356,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
40598 if (imxdrm->pipes >= MAX_CRTC)
40599 return -EINVAL;
40600
40601- if (imxdrm->drm->open_count)
40602+ if (local_read(&imxdrm->drm->open_count))
40603 return -EBUSY;
40604
40605 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
40606diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40607index b4a2014..219ab78 100644
40608--- a/drivers/gpu/drm/mga/mga_drv.h
40609+++ b/drivers/gpu/drm/mga/mga_drv.h
40610@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
40611 u32 clear_cmd;
40612 u32 maccess;
40613
40614- atomic_t vbl_received; /**< Number of vblanks received. */
40615+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40616 wait_queue_head_t fence_queue;
40617- atomic_t last_fence_retired;
40618+ atomic_unchecked_t last_fence_retired;
40619 u32 next_fence_to_post;
40620
40621 unsigned int fb_cpp;
40622diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40623index 729bfd5..ead8823 100644
40624--- a/drivers/gpu/drm/mga/mga_ioc32.c
40625+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40626@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40627 return 0;
40628 }
40629
40630-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40631+drm_ioctl_compat_t mga_compat_ioctls[] = {
40632 [DRM_MGA_INIT] = compat_mga_init,
40633 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40634 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40635@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40636 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40637 {
40638 unsigned int nr = DRM_IOCTL_NR(cmd);
40639- drm_ioctl_compat_t *fn = NULL;
40640 int ret;
40641
40642 if (nr < DRM_COMMAND_BASE)
40643 return drm_compat_ioctl(filp, cmd, arg);
40644
40645- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
40646- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40647-
40648- if (fn != NULL)
40649+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
40650+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40651 ret = (*fn) (filp, cmd, arg);
40652- else
40653+ } else
40654 ret = drm_ioctl(filp, cmd, arg);
40655
40656 return ret;
40657diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40658index 1b071b8..de8601a 100644
40659--- a/drivers/gpu/drm/mga/mga_irq.c
40660+++ b/drivers/gpu/drm/mga/mga_irq.c
40661@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40662 if (crtc != 0)
40663 return 0;
40664
40665- return atomic_read(&dev_priv->vbl_received);
40666+ return atomic_read_unchecked(&dev_priv->vbl_received);
40667 }
40668
40669
40670@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40671 /* VBLANK interrupt */
40672 if (status & MGA_VLINEPEN) {
40673 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40674- atomic_inc(&dev_priv->vbl_received);
40675+ atomic_inc_unchecked(&dev_priv->vbl_received);
40676 drm_handle_vblank(dev, 0);
40677 handled = 1;
40678 }
40679@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40680 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40681 MGA_WRITE(MGA_PRIMEND, prim_end);
40682
40683- atomic_inc(&dev_priv->last_fence_retired);
40684+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40685 wake_up(&dev_priv->fence_queue);
40686 handled = 1;
40687 }
40688@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40689 * using fences.
40690 */
40691 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
40692- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40693+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40694 - *sequence) <= (1 << 23)));
40695
40696 *sequence = cur_fence;
40697diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40698index 7df6acc..84bbe52 100644
40699--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40700+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40701@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40702 struct bit_table {
40703 const char id;
40704 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40705-};
40706+} __no_const;
40707
40708 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40709
40710diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40711index 8ae36f2..1147a30 100644
40712--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40713+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40714@@ -121,7 +121,6 @@ struct nouveau_drm {
40715 struct drm_global_reference mem_global_ref;
40716 struct ttm_bo_global_ref bo_global_ref;
40717 struct ttm_bo_device bdev;
40718- atomic_t validate_sequence;
40719 int (*move)(struct nouveau_channel *,
40720 struct ttm_buffer_object *,
40721 struct ttm_mem_reg *, struct ttm_mem_reg *);
40722diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40723index 462679a..88e32a7 100644
40724--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40725+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40726@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
40727 unsigned long arg)
40728 {
40729 unsigned int nr = DRM_IOCTL_NR(cmd);
40730- drm_ioctl_compat_t *fn = NULL;
40731+ drm_ioctl_compat_t fn = NULL;
40732 int ret;
40733
40734 if (nr < DRM_COMMAND_BASE)
40735diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40736index 3d1cfcb..0542700 100644
40737--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
40738+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40739@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40740 }
40741
40742 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
40743- nouveau_vram_manager_init,
40744- nouveau_vram_manager_fini,
40745- nouveau_vram_manager_new,
40746- nouveau_vram_manager_del,
40747- nouveau_vram_manager_debug
40748+ .init = nouveau_vram_manager_init,
40749+ .takedown = nouveau_vram_manager_fini,
40750+ .get_node = nouveau_vram_manager_new,
40751+ .put_node = nouveau_vram_manager_del,
40752+ .debug = nouveau_vram_manager_debug
40753 };
40754
40755 static int
40756@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40757 }
40758
40759 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
40760- nouveau_gart_manager_init,
40761- nouveau_gart_manager_fini,
40762- nouveau_gart_manager_new,
40763- nouveau_gart_manager_del,
40764- nouveau_gart_manager_debug
40765+ .init = nouveau_gart_manager_init,
40766+ .takedown = nouveau_gart_manager_fini,
40767+ .get_node = nouveau_gart_manager_new,
40768+ .put_node = nouveau_gart_manager_del,
40769+ .debug = nouveau_gart_manager_debug
40770 };
40771
40772 /*XXX*/
40773@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40774 }
40775
40776 const struct ttm_mem_type_manager_func nv04_gart_manager = {
40777- nv04_gart_manager_init,
40778- nv04_gart_manager_fini,
40779- nv04_gart_manager_new,
40780- nv04_gart_manager_del,
40781- nv04_gart_manager_debug
40782+ .init = nv04_gart_manager_init,
40783+ .takedown = nv04_gart_manager_fini,
40784+ .get_node = nv04_gart_manager_new,
40785+ .put_node = nv04_gart_manager_del,
40786+ .debug = nv04_gart_manager_debug
40787 };
40788
40789 int
40790diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
40791index c7592ec..dd45ebc 100644
40792--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
40793+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
40794@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
40795 * locking inversion with the driver load path. And the access here is
40796 * completely racy anyway. So don't bother with locking for now.
40797 */
40798- return dev->open_count == 0;
40799+ return local_read(&dev->open_count) == 0;
40800 }
40801
40802 static const struct vga_switcheroo_client_ops
40803diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
40804index 9782364..89bd954 100644
40805--- a/drivers/gpu/drm/qxl/qxl_cmd.c
40806+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
40807@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
40808 int ret;
40809
40810 mutex_lock(&qdev->async_io_mutex);
40811- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40812+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40813 if (qdev->last_sent_io_cmd > irq_num) {
40814 if (intr)
40815 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40816- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40817+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40818 else
40819 ret = wait_event_timeout(qdev->io_cmd_event,
40820- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40821+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40822 /* 0 is timeout, just bail the "hw" has gone away */
40823 if (ret <= 0)
40824 goto out;
40825- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40826+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40827 }
40828 outb(val, addr);
40829 qdev->last_sent_io_cmd = irq_num + 1;
40830 if (intr)
40831 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40832- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40833+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40834 else
40835 ret = wait_event_timeout(qdev->io_cmd_event,
40836- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40837+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40838 out:
40839 if (ret > 0)
40840 ret = 0;
40841diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
40842index 6911b8c..89d6867 100644
40843--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
40844+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
40845@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
40846 struct drm_info_node *node = (struct drm_info_node *) m->private;
40847 struct qxl_device *qdev = node->minor->dev->dev_private;
40848
40849- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
40850- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
40851- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
40852- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
40853+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
40854+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
40855+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
40856+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
40857 seq_printf(m, "%d\n", qdev->irq_received_error);
40858 return 0;
40859 }
40860diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
40861index 7c6cafe..460f542 100644
40862--- a/drivers/gpu/drm/qxl/qxl_drv.h
40863+++ b/drivers/gpu/drm/qxl/qxl_drv.h
40864@@ -290,10 +290,10 @@ struct qxl_device {
40865 unsigned int last_sent_io_cmd;
40866
40867 /* interrupt handling */
40868- atomic_t irq_received;
40869- atomic_t irq_received_display;
40870- atomic_t irq_received_cursor;
40871- atomic_t irq_received_io_cmd;
40872+ atomic_unchecked_t irq_received;
40873+ atomic_unchecked_t irq_received_display;
40874+ atomic_unchecked_t irq_received_cursor;
40875+ atomic_unchecked_t irq_received_io_cmd;
40876 unsigned irq_received_error;
40877 wait_queue_head_t display_event;
40878 wait_queue_head_t cursor_event;
40879diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
40880index b110883..dd06418 100644
40881--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
40882+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
40883@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40884
40885 /* TODO copy slow path code from i915 */
40886 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
40887- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
40888+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
40889
40890 {
40891 struct qxl_drawable *draw = fb_cmd;
40892@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40893 struct drm_qxl_reloc reloc;
40894
40895 if (copy_from_user(&reloc,
40896- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
40897+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
40898 sizeof(reloc))) {
40899 ret = -EFAULT;
40900 goto out_free_bos;
40901@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
40902
40903 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
40904
40905- struct drm_qxl_command *commands =
40906- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
40907+ struct drm_qxl_command __user *commands =
40908+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
40909
40910- if (copy_from_user(&user_cmd, &commands[cmd_num],
40911+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
40912 sizeof(user_cmd)))
40913 return -EFAULT;
40914
40915diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
40916index 0bf1e20..42a7310 100644
40917--- a/drivers/gpu/drm/qxl/qxl_irq.c
40918+++ b/drivers/gpu/drm/qxl/qxl_irq.c
40919@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
40920 if (!pending)
40921 return IRQ_NONE;
40922
40923- atomic_inc(&qdev->irq_received);
40924+ atomic_inc_unchecked(&qdev->irq_received);
40925
40926 if (pending & QXL_INTERRUPT_DISPLAY) {
40927- atomic_inc(&qdev->irq_received_display);
40928+ atomic_inc_unchecked(&qdev->irq_received_display);
40929 wake_up_all(&qdev->display_event);
40930 qxl_queue_garbage_collect(qdev, false);
40931 }
40932 if (pending & QXL_INTERRUPT_CURSOR) {
40933- atomic_inc(&qdev->irq_received_cursor);
40934+ atomic_inc_unchecked(&qdev->irq_received_cursor);
40935 wake_up_all(&qdev->cursor_event);
40936 }
40937 if (pending & QXL_INTERRUPT_IO_CMD) {
40938- atomic_inc(&qdev->irq_received_io_cmd);
40939+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
40940 wake_up_all(&qdev->io_cmd_event);
40941 }
40942 if (pending & QXL_INTERRUPT_ERROR) {
40943@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
40944 init_waitqueue_head(&qdev->io_cmd_event);
40945 INIT_WORK(&qdev->client_monitors_config_work,
40946 qxl_client_monitors_config_work_func);
40947- atomic_set(&qdev->irq_received, 0);
40948- atomic_set(&qdev->irq_received_display, 0);
40949- atomic_set(&qdev->irq_received_cursor, 0);
40950- atomic_set(&qdev->irq_received_io_cmd, 0);
40951+ atomic_set_unchecked(&qdev->irq_received, 0);
40952+ atomic_set_unchecked(&qdev->irq_received_display, 0);
40953+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
40954+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
40955 qdev->irq_received_error = 0;
40956 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
40957 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
40958diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
40959index 0cbc4c9..0e46686 100644
40960--- a/drivers/gpu/drm/qxl/qxl_ttm.c
40961+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
40962@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
40963 }
40964 }
40965
40966-static struct vm_operations_struct qxl_ttm_vm_ops;
40967+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
40968 static const struct vm_operations_struct *ttm_vm_ops;
40969
40970 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40971@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
40972 return r;
40973 if (unlikely(ttm_vm_ops == NULL)) {
40974 ttm_vm_ops = vma->vm_ops;
40975+ pax_open_kernel();
40976 qxl_ttm_vm_ops = *ttm_vm_ops;
40977 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
40978+ pax_close_kernel();
40979 }
40980 vma->vm_ops = &qxl_ttm_vm_ops;
40981 return 0;
40982@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
40983 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
40984 {
40985 #if defined(CONFIG_DEBUG_FS)
40986- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
40987- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
40988- unsigned i;
40989+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
40990+ {
40991+ .name = "qxl_mem_mm",
40992+ .show = &qxl_mm_dump_table,
40993+ },
40994+ {
40995+ .name = "qxl_surf_mm",
40996+ .show = &qxl_mm_dump_table,
40997+ }
40998+ };
40999
41000- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41001- if (i == 0)
41002- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41003- else
41004- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41005- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41006- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41007- qxl_mem_types_list[i].driver_features = 0;
41008- if (i == 0)
41009- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41010- else
41011- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41012+ pax_open_kernel();
41013+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41014+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41015+ pax_close_kernel();
41016
41017- }
41018- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41019+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41020 #else
41021 return 0;
41022 #endif
41023diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41024index 2c45ac9..5d740f8 100644
41025--- a/drivers/gpu/drm/r128/r128_cce.c
41026+++ b/drivers/gpu/drm/r128/r128_cce.c
41027@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41028
41029 /* GH: Simple idle check.
41030 */
41031- atomic_set(&dev_priv->idle_count, 0);
41032+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41033
41034 /* We don't support anything other than bus-mastering ring mode,
41035 * but the ring can be in either AGP or PCI space for the ring
41036diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41037index 723e5d6..102dbaf 100644
41038--- a/drivers/gpu/drm/r128/r128_drv.h
41039+++ b/drivers/gpu/drm/r128/r128_drv.h
41040@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41041 int is_pci;
41042 unsigned long cce_buffers_offset;
41043
41044- atomic_t idle_count;
41045+ atomic_unchecked_t idle_count;
41046
41047 int page_flipping;
41048 int current_page;
41049 u32 crtc_offset;
41050 u32 crtc_offset_cntl;
41051
41052- atomic_t vbl_received;
41053+ atomic_unchecked_t vbl_received;
41054
41055 u32 color_fmt;
41056 unsigned int front_offset;
41057diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41058index 663f38c..c689495 100644
41059--- a/drivers/gpu/drm/r128/r128_ioc32.c
41060+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41061@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41062 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41063 }
41064
41065-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41066+drm_ioctl_compat_t r128_compat_ioctls[] = {
41067 [DRM_R128_INIT] = compat_r128_init,
41068 [DRM_R128_DEPTH] = compat_r128_depth,
41069 [DRM_R128_STIPPLE] = compat_r128_stipple,
41070@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41071 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41072 {
41073 unsigned int nr = DRM_IOCTL_NR(cmd);
41074- drm_ioctl_compat_t *fn = NULL;
41075 int ret;
41076
41077 if (nr < DRM_COMMAND_BASE)
41078 return drm_compat_ioctl(filp, cmd, arg);
41079
41080- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41081- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41082-
41083- if (fn != NULL)
41084+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
41085+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41086 ret = (*fn) (filp, cmd, arg);
41087- else
41088+ } else
41089 ret = drm_ioctl(filp, cmd, arg);
41090
41091 return ret;
41092diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41093index c2ae496..30b5993 100644
41094--- a/drivers/gpu/drm/r128/r128_irq.c
41095+++ b/drivers/gpu/drm/r128/r128_irq.c
41096@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41097 if (crtc != 0)
41098 return 0;
41099
41100- return atomic_read(&dev_priv->vbl_received);
41101+ return atomic_read_unchecked(&dev_priv->vbl_received);
41102 }
41103
41104 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41105@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41106 /* VBLANK interrupt */
41107 if (status & R128_CRTC_VBLANK_INT) {
41108 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41109- atomic_inc(&dev_priv->vbl_received);
41110+ atomic_inc_unchecked(&dev_priv->vbl_received);
41111 drm_handle_vblank(dev, 0);
41112 return IRQ_HANDLED;
41113 }
41114diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41115index 8fd2d9f..18c9660 100644
41116--- a/drivers/gpu/drm/r128/r128_state.c
41117+++ b/drivers/gpu/drm/r128/r128_state.c
41118@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41119
41120 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41121 {
41122- if (atomic_read(&dev_priv->idle_count) == 0)
41123+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41124 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41125 else
41126- atomic_set(&dev_priv->idle_count, 0);
41127+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41128 }
41129
41130 #endif
41131diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41132index b928c17..e5d9400 100644
41133--- a/drivers/gpu/drm/radeon/mkregtable.c
41134+++ b/drivers/gpu/drm/radeon/mkregtable.c
41135@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41136 regex_t mask_rex;
41137 regmatch_t match[4];
41138 char buf[1024];
41139- size_t end;
41140+ long end;
41141 int len;
41142 int done = 0;
41143 int r;
41144 unsigned o;
41145 struct offset *offset;
41146 char last_reg_s[10];
41147- int last_reg;
41148+ unsigned long last_reg;
41149
41150 if (regcomp
41151 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41152diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41153index bd7519f..e1c2cd95 100644
41154--- a/drivers/gpu/drm/radeon/radeon_device.c
41155+++ b/drivers/gpu/drm/radeon/radeon_device.c
41156@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41157 * locking inversion with the driver load path. And the access here is
41158 * completely racy anyway. So don't bother with locking for now.
41159 */
41160- return dev->open_count == 0;
41161+ return local_read(&dev->open_count) == 0;
41162 }
41163
41164 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41165diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41166index 46bd393..6ae4719 100644
41167--- a/drivers/gpu/drm/radeon/radeon_drv.h
41168+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41169@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41170
41171 /* SW interrupt */
41172 wait_queue_head_t swi_queue;
41173- atomic_t swi_emitted;
41174+ atomic_unchecked_t swi_emitted;
41175 int vblank_crtc;
41176 uint32_t irq_enable_reg;
41177 uint32_t r500_disp_irq_reg;
41178diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41179index 0b98ea1..0881827 100644
41180--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41181+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41182@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41183 request = compat_alloc_user_space(sizeof(*request));
41184 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41185 || __put_user(req32.param, &request->param)
41186- || __put_user((void __user *)(unsigned long)req32.value,
41187+ || __put_user((unsigned long)req32.value,
41188 &request->value))
41189 return -EFAULT;
41190
41191@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41192 #define compat_radeon_cp_setparam NULL
41193 #endif /* X86_64 || IA64 */
41194
41195-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41196+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41197 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41198 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41199 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41200@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41201 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41202 {
41203 unsigned int nr = DRM_IOCTL_NR(cmd);
41204- drm_ioctl_compat_t *fn = NULL;
41205 int ret;
41206
41207 if (nr < DRM_COMMAND_BASE)
41208 return drm_compat_ioctl(filp, cmd, arg);
41209
41210- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41211- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41212-
41213- if (fn != NULL)
41214+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
41215+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41216 ret = (*fn) (filp, cmd, arg);
41217- else
41218+ } else
41219 ret = drm_ioctl(filp, cmd, arg);
41220
41221 return ret;
41222diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41223index 244b19b..c19226d 100644
41224--- a/drivers/gpu/drm/radeon/radeon_irq.c
41225+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41226@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41227 unsigned int ret;
41228 RING_LOCALS;
41229
41230- atomic_inc(&dev_priv->swi_emitted);
41231- ret = atomic_read(&dev_priv->swi_emitted);
41232+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41233+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41234
41235 BEGIN_RING(4);
41236 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41237@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41238 drm_radeon_private_t *dev_priv =
41239 (drm_radeon_private_t *) dev->dev_private;
41240
41241- atomic_set(&dev_priv->swi_emitted, 0);
41242+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41243 init_waitqueue_head(&dev_priv->swi_queue);
41244
41245 dev->max_vblank_count = 0x001fffff;
41246diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41247index 15aee72..cda326e 100644
41248--- a/drivers/gpu/drm/radeon/radeon_state.c
41249+++ b/drivers/gpu/drm/radeon/radeon_state.c
41250@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41251 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41252 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41253
41254- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41255+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41256 sarea_priv->nbox * sizeof(depth_boxes[0])))
41257 return -EFAULT;
41258
41259@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41260 {
41261 drm_radeon_private_t *dev_priv = dev->dev_private;
41262 drm_radeon_getparam_t *param = data;
41263- int value;
41264+ int value = 0;
41265
41266 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41267
41268diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41269index d02aa1d..ca19e2c 100644
41270--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41271+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41272@@ -959,7 +959,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41273 man->size = size >> PAGE_SHIFT;
41274 }
41275
41276-static struct vm_operations_struct radeon_ttm_vm_ops;
41277+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41278 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41279
41280 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41281@@ -1000,8 +1000,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41282 }
41283 if (unlikely(ttm_vm_ops == NULL)) {
41284 ttm_vm_ops = vma->vm_ops;
41285+ pax_open_kernel();
41286 radeon_ttm_vm_ops = *ttm_vm_ops;
41287 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41288+ pax_close_kernel();
41289 }
41290 vma->vm_ops = &radeon_ttm_vm_ops;
41291 return 0;
41292diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41293index 978993f..e36e50e 100644
41294--- a/drivers/gpu/drm/tegra/dc.c
41295+++ b/drivers/gpu/drm/tegra/dc.c
41296@@ -1416,7 +1416,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41297 }
41298
41299 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41300- dc->debugfs_files[i].data = dc;
41301+ *(void **)&dc->debugfs_files[i].data = dc;
41302
41303 err = drm_debugfs_create_files(dc->debugfs_files,
41304 ARRAY_SIZE(debugfs_files),
41305diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41306index 33f67fd..55ee9761 100644
41307--- a/drivers/gpu/drm/tegra/dsi.c
41308+++ b/drivers/gpu/drm/tegra/dsi.c
41309@@ -39,7 +39,7 @@ struct tegra_dsi {
41310 struct clk *clk_lp;
41311 struct clk *clk;
41312
41313- struct drm_info_list *debugfs_files;
41314+ drm_info_list_no_const *debugfs_files;
41315 struct drm_minor *minor;
41316 struct dentry *debugfs;
41317
41318diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41319index ffe2654..03c7b1c 100644
41320--- a/drivers/gpu/drm/tegra/hdmi.c
41321+++ b/drivers/gpu/drm/tegra/hdmi.c
41322@@ -60,7 +60,7 @@ struct tegra_hdmi {
41323 bool stereo;
41324 bool dvi;
41325
41326- struct drm_info_list *debugfs_files;
41327+ drm_info_list_no_const *debugfs_files;
41328 struct drm_minor *minor;
41329 struct dentry *debugfs;
41330 };
41331diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41332index aa0bd054..aea6a01 100644
41333--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41334+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41335@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41336 }
41337
41338 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41339- ttm_bo_man_init,
41340- ttm_bo_man_takedown,
41341- ttm_bo_man_get_node,
41342- ttm_bo_man_put_node,
41343- ttm_bo_man_debug
41344+ .init = ttm_bo_man_init,
41345+ .takedown = ttm_bo_man_takedown,
41346+ .get_node = ttm_bo_man_get_node,
41347+ .put_node = ttm_bo_man_put_node,
41348+ .debug = ttm_bo_man_debug
41349 };
41350 EXPORT_SYMBOL(ttm_bo_manager_func);
41351diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41352index a1803fb..c53f6b0 100644
41353--- a/drivers/gpu/drm/ttm/ttm_memory.c
41354+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41355@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41356 zone->glob = glob;
41357 glob->zone_kernel = zone;
41358 ret = kobject_init_and_add(
41359- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41360+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41361 if (unlikely(ret != 0)) {
41362 kobject_put(&zone->kobj);
41363 return ret;
41364@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41365 zone->glob = glob;
41366 glob->zone_dma32 = zone;
41367 ret = kobject_init_and_add(
41368- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41369+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41370 if (unlikely(ret != 0)) {
41371 kobject_put(&zone->kobj);
41372 return ret;
41373diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41374index 025c429..314062f 100644
41375--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41376+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41377@@ -54,7 +54,7 @@
41378
41379 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41380 #define SMALL_ALLOCATION 16
41381-#define FREE_ALL_PAGES (~0U)
41382+#define FREE_ALL_PAGES (~0UL)
41383 /* times are in msecs */
41384 #define PAGE_FREE_INTERVAL 1000
41385
41386@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41387 * @free_all: If set to true will free all pages in pool
41388 * @use_static: Safe to use static buffer
41389 **/
41390-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41391+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41392 bool use_static)
41393 {
41394 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41395 unsigned long irq_flags;
41396 struct page *p;
41397 struct page **pages_to_free;
41398- unsigned freed_pages = 0,
41399- npages_to_free = nr_free;
41400+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41401
41402 if (NUM_PAGES_TO_ALLOC < nr_free)
41403 npages_to_free = NUM_PAGES_TO_ALLOC;
41404@@ -371,7 +370,8 @@ restart:
41405 __list_del(&p->lru, &pool->list);
41406
41407 ttm_pool_update_free_locked(pool, freed_pages);
41408- nr_free -= freed_pages;
41409+ if (likely(nr_free != FREE_ALL_PAGES))
41410+ nr_free -= freed_pages;
41411 }
41412
41413 spin_unlock_irqrestore(&pool->lock, irq_flags);
41414@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41415 unsigned i;
41416 unsigned pool_offset;
41417 struct ttm_page_pool *pool;
41418- int shrink_pages = sc->nr_to_scan;
41419+ unsigned long shrink_pages = sc->nr_to_scan;
41420 unsigned long freed = 0;
41421
41422 if (!mutex_trylock(&lock))
41423@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41424 pool_offset = ++start_pool % NUM_POOLS;
41425 /* select start pool in round robin fashion */
41426 for (i = 0; i < NUM_POOLS; ++i) {
41427- unsigned nr_free = shrink_pages;
41428+ unsigned long nr_free = shrink_pages;
41429 if (shrink_pages == 0)
41430 break;
41431 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
41432@@ -673,7 +673,7 @@ out:
41433 }
41434
41435 /* Put all pages in pages list to correct pool to wait for reuse */
41436-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
41437+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
41438 enum ttm_caching_state cstate)
41439 {
41440 unsigned long irq_flags;
41441@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
41442 struct list_head plist;
41443 struct page *p = NULL;
41444 gfp_t gfp_flags = GFP_USER;
41445- unsigned count;
41446+ unsigned long count;
41447 int r;
41448
41449 /* set zero flag for page allocation if required */
41450diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41451index 01e1d27..aaa018a 100644
41452--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41453+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41454@@ -56,7 +56,7 @@
41455
41456 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41457 #define SMALL_ALLOCATION 4
41458-#define FREE_ALL_PAGES (~0U)
41459+#define FREE_ALL_PAGES (~0UL)
41460 /* times are in msecs */
41461 #define IS_UNDEFINED (0)
41462 #define IS_WC (1<<1)
41463@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
41464 * @nr_free: If set to true will free all pages in pool
41465 * @use_static: Safe to use static buffer
41466 **/
41467-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41468+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
41469 bool use_static)
41470 {
41471 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41472@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41473 struct dma_page *dma_p, *tmp;
41474 struct page **pages_to_free;
41475 struct list_head d_pages;
41476- unsigned freed_pages = 0,
41477- npages_to_free = nr_free;
41478+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41479
41480 if (NUM_PAGES_TO_ALLOC < nr_free)
41481 npages_to_free = NUM_PAGES_TO_ALLOC;
41482@@ -499,7 +498,8 @@ restart:
41483 /* remove range of pages from the pool */
41484 if (freed_pages) {
41485 ttm_pool_update_free_locked(pool, freed_pages);
41486- nr_free -= freed_pages;
41487+ if (likely(nr_free != FREE_ALL_PAGES))
41488+ nr_free -= freed_pages;
41489 }
41490
41491 spin_unlock_irqrestore(&pool->lock, irq_flags);
41492@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
41493 struct dma_page *d_page, *next;
41494 enum pool_type type;
41495 bool is_cached = false;
41496- unsigned count = 0, i, npages = 0;
41497+ unsigned long count = 0, i, npages = 0;
41498 unsigned long irq_flags;
41499
41500 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
41501@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41502 static unsigned start_pool;
41503 unsigned idx = 0;
41504 unsigned pool_offset;
41505- unsigned shrink_pages = sc->nr_to_scan;
41506+ unsigned long shrink_pages = sc->nr_to_scan;
41507 struct device_pools *p;
41508 unsigned long freed = 0;
41509
41510@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41511 goto out;
41512 pool_offset = ++start_pool % _manager->npools;
41513 list_for_each_entry(p, &_manager->pools, pools) {
41514- unsigned nr_free;
41515+ unsigned long nr_free;
41516
41517 if (!p->dev)
41518 continue;
41519@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41520 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
41521 freed += nr_free - shrink_pages;
41522
41523- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
41524+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
41525 p->pool->dev_name, p->pool->name, current->pid,
41526 nr_free, shrink_pages);
41527 }
41528diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41529index 8cbcb45..a4d9cf7 100644
41530--- a/drivers/gpu/drm/udl/udl_fb.c
41531+++ b/drivers/gpu/drm/udl/udl_fb.c
41532@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41533 fb_deferred_io_cleanup(info);
41534 kfree(info->fbdefio);
41535 info->fbdefio = NULL;
41536- info->fbops->fb_mmap = udl_fb_mmap;
41537 }
41538
41539 pr_warn("released /dev/fb%d user=%d count=%d\n",
41540diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41541index ef8c500..01030c8 100644
41542--- a/drivers/gpu/drm/via/via_drv.h
41543+++ b/drivers/gpu/drm/via/via_drv.h
41544@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
41545 typedef uint32_t maskarray_t[5];
41546
41547 typedef struct drm_via_irq {
41548- atomic_t irq_received;
41549+ atomic_unchecked_t irq_received;
41550 uint32_t pending_mask;
41551 uint32_t enable_mask;
41552 wait_queue_head_t irq_queue;
41553@@ -77,7 +77,7 @@ typedef struct drm_via_private {
41554 struct timeval last_vblank;
41555 int last_vblank_valid;
41556 unsigned usec_per_vblank;
41557- atomic_t vbl_received;
41558+ atomic_unchecked_t vbl_received;
41559 drm_via_state_t hc_state;
41560 char pci_buf[VIA_PCI_BUF_SIZE];
41561 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41562diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41563index 1319433..a993b0c 100644
41564--- a/drivers/gpu/drm/via/via_irq.c
41565+++ b/drivers/gpu/drm/via/via_irq.c
41566@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41567 if (crtc != 0)
41568 return 0;
41569
41570- return atomic_read(&dev_priv->vbl_received);
41571+ return atomic_read_unchecked(&dev_priv->vbl_received);
41572 }
41573
41574 irqreturn_t via_driver_irq_handler(int irq, void *arg)
41575@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41576
41577 status = VIA_READ(VIA_REG_INTERRUPT);
41578 if (status & VIA_IRQ_VBLANK_PENDING) {
41579- atomic_inc(&dev_priv->vbl_received);
41580- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41581+ atomic_inc_unchecked(&dev_priv->vbl_received);
41582+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41583 do_gettimeofday(&cur_vblank);
41584 if (dev_priv->last_vblank_valid) {
41585 dev_priv->usec_per_vblank =
41586@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41587 dev_priv->last_vblank = cur_vblank;
41588 dev_priv->last_vblank_valid = 1;
41589 }
41590- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41591+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41592 DRM_DEBUG("US per vblank is: %u\n",
41593 dev_priv->usec_per_vblank);
41594 }
41595@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41596
41597 for (i = 0; i < dev_priv->num_irqs; ++i) {
41598 if (status & cur_irq->pending_mask) {
41599- atomic_inc(&cur_irq->irq_received);
41600+ atomic_inc_unchecked(&cur_irq->irq_received);
41601 wake_up(&cur_irq->irq_queue);
41602 handled = 1;
41603 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41604@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41605 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41606 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41607 masks[irq][4]));
41608- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41609+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41610 } else {
41611 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41612 (((cur_irq_sequence =
41613- atomic_read(&cur_irq->irq_received)) -
41614+ atomic_read_unchecked(&cur_irq->irq_received)) -
41615 *sequence) <= (1 << 23)));
41616 }
41617 *sequence = cur_irq_sequence;
41618@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41619 }
41620
41621 for (i = 0; i < dev_priv->num_irqs; ++i) {
41622- atomic_set(&cur_irq->irq_received, 0);
41623+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41624 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41625 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41626 init_waitqueue_head(&cur_irq->irq_queue);
41627@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41628 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41629 case VIA_IRQ_RELATIVE:
41630 irqwait->request.sequence +=
41631- atomic_read(&cur_irq->irq_received);
41632+ atomic_read_unchecked(&cur_irq->irq_received);
41633 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41634 case VIA_IRQ_ABSOLUTE:
41635 break;
41636diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41637index d26a6da..5fa41ed 100644
41638--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41639+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41640@@ -447,7 +447,7 @@ struct vmw_private {
41641 * Fencing and IRQs.
41642 */
41643
41644- atomic_t marker_seq;
41645+ atomic_unchecked_t marker_seq;
41646 wait_queue_head_t fence_queue;
41647 wait_queue_head_t fifo_queue;
41648 spinlock_t waiter_lock;
41649diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41650index 39f2b03..d1b0a64 100644
41651--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41652+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41653@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41654 (unsigned int) min,
41655 (unsigned int) fifo->capabilities);
41656
41657- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41658+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41659 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41660 vmw_marker_queue_init(&fifo->marker_queue);
41661 return vmw_fifo_send_fence(dev_priv, &dummy);
41662@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41663 if (reserveable)
41664 iowrite32(bytes, fifo_mem +
41665 SVGA_FIFO_RESERVED);
41666- return fifo_mem + (next_cmd >> 2);
41667+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41668 } else {
41669 need_bounce = true;
41670 }
41671@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41672
41673 fm = vmw_fifo_reserve(dev_priv, bytes);
41674 if (unlikely(fm == NULL)) {
41675- *seqno = atomic_read(&dev_priv->marker_seq);
41676+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41677 ret = -ENOMEM;
41678 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41679 false, 3*HZ);
41680@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41681 }
41682
41683 do {
41684- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41685+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41686 } while (*seqno == 0);
41687
41688 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41689diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41690index 170b61b..fec7348 100644
41691--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41692+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41693@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41694 }
41695
41696 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41697- vmw_gmrid_man_init,
41698- vmw_gmrid_man_takedown,
41699- vmw_gmrid_man_get_node,
41700- vmw_gmrid_man_put_node,
41701- vmw_gmrid_man_debug
41702+ .init = vmw_gmrid_man_init,
41703+ .takedown = vmw_gmrid_man_takedown,
41704+ .get_node = vmw_gmrid_man_get_node,
41705+ .put_node = vmw_gmrid_man_put_node,
41706+ .debug = vmw_gmrid_man_debug
41707 };
41708diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41709index 69c8ce2..cacb0ab 100644
41710--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41711+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41712@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41713 int ret;
41714
41715 num_clips = arg->num_clips;
41716- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41717+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41718
41719 if (unlikely(num_clips == 0))
41720 return 0;
41721@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41722 int ret;
41723
41724 num_clips = arg->num_clips;
41725- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41726+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41727
41728 if (unlikely(num_clips == 0))
41729 return 0;
41730diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41731index 9fe9827..0aa2fc0 100644
41732--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41733+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41734@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41735 * emitted. Then the fence is stale and signaled.
41736 */
41737
41738- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41739+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41740 > VMW_FENCE_WRAP);
41741
41742 return ret;
41743@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
41744
41745 if (fifo_idle)
41746 down_read(&fifo_state->rwsem);
41747- signal_seq = atomic_read(&dev_priv->marker_seq);
41748+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
41749 ret = 0;
41750
41751 for (;;) {
41752diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41753index efd1ffd..0ae13ca 100644
41754--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41755+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41756@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
41757 while (!vmw_lag_lt(queue, us)) {
41758 spin_lock(&queue->lock);
41759 if (list_empty(&queue->head))
41760- seqno = atomic_read(&dev_priv->marker_seq);
41761+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41762 else {
41763 marker = list_first_entry(&queue->head,
41764 struct vmw_marker, head);
41765diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
41766index 37ac7b5..d52a5c9 100644
41767--- a/drivers/gpu/vga/vga_switcheroo.c
41768+++ b/drivers/gpu/vga/vga_switcheroo.c
41769@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
41770
41771 /* this version is for the case where the power switch is separate
41772 to the device being powered down. */
41773-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
41774+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
41775 {
41776 /* copy over all the bus versions */
41777 if (dev->bus && dev->bus->pm) {
41778@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
41779 return ret;
41780 }
41781
41782-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
41783+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
41784 {
41785 /* copy over all the bus versions */
41786 if (dev->bus && dev->bus->pm) {
41787diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
41788index 8b63879..a5a5e72 100644
41789--- a/drivers/hid/hid-core.c
41790+++ b/drivers/hid/hid-core.c
41791@@ -2508,7 +2508,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
41792
41793 int hid_add_device(struct hid_device *hdev)
41794 {
41795- static atomic_t id = ATOMIC_INIT(0);
41796+ static atomic_unchecked_t id = ATOMIC_INIT(0);
41797 int ret;
41798
41799 if (WARN_ON(hdev->status & HID_STAT_ADDED))
41800@@ -2551,7 +2551,7 @@ int hid_add_device(struct hid_device *hdev)
41801 /* XXX hack, any other cleaner solution after the driver core
41802 * is converted to allow more than 20 bytes as the device name? */
41803 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
41804- hdev->vendor, hdev->product, atomic_inc_return(&id));
41805+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
41806
41807 hid_debug_register(hdev, dev_name(&hdev->dev));
41808 ret = device_add(&hdev->dev);
41809diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
41810index 5bc6d80..e47b55a 100644
41811--- a/drivers/hid/hid-logitech-dj.c
41812+++ b/drivers/hid/hid-logitech-dj.c
41813@@ -853,6 +853,12 @@ static int logi_dj_dj_event(struct hid_device *hdev,
41814 * case we forward it to the correct hid device (via hid_input_report()
41815 * ) and return 1 so hid-core does not anything else with it.
41816 */
41817+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41818+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41819+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
41820+ __func__, dj_report->device_index);
41821+ return false;
41822+ }
41823
41824 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41825 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41826diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
41827index c13fb5b..55a3802 100644
41828--- a/drivers/hid/hid-wiimote-debug.c
41829+++ b/drivers/hid/hid-wiimote-debug.c
41830@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
41831 else if (size == 0)
41832 return -EIO;
41833
41834- if (copy_to_user(u, buf, size))
41835+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
41836 return -EFAULT;
41837
41838 *off += size;
41839diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
41840index 433f72a..2926005 100644
41841--- a/drivers/hv/channel.c
41842+++ b/drivers/hv/channel.c
41843@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
41844 unsigned long flags;
41845 int ret = 0;
41846
41847- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
41848- atomic_inc(&vmbus_connection.next_gpadl_handle);
41849+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
41850+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
41851
41852 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
41853 if (ret)
41854diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
41855index 3e4235c..877d0e5 100644
41856--- a/drivers/hv/hv.c
41857+++ b/drivers/hv/hv.c
41858@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
41859 u64 output_address = (output) ? virt_to_phys(output) : 0;
41860 u32 output_address_hi = output_address >> 32;
41861 u32 output_address_lo = output_address & 0xFFFFFFFF;
41862- void *hypercall_page = hv_context.hypercall_page;
41863+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
41864
41865 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
41866 "=a"(hv_status_lo) : "d" (control_hi),
41867@@ -156,7 +156,7 @@ int hv_init(void)
41868 /* See if the hypercall page is already set */
41869 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
41870
41871- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
41872+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
41873
41874 if (!virtaddr)
41875 goto cleanup;
41876diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
41877index b958ded..b2452bb 100644
41878--- a/drivers/hv/hv_balloon.c
41879+++ b/drivers/hv/hv_balloon.c
41880@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
41881
41882 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
41883 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
41884-static atomic_t trans_id = ATOMIC_INIT(0);
41885+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
41886
41887 static int dm_ring_size = (5 * PAGE_SIZE);
41888
41889@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
41890 pr_info("Memory hot add failed\n");
41891
41892 dm->state = DM_INITIALIZED;
41893- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41894+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41895 vmbus_sendpacket(dm->dev->channel, &resp,
41896 sizeof(struct dm_hot_add_response),
41897 (unsigned long)NULL,
41898@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
41899 memset(&status, 0, sizeof(struct dm_status));
41900 status.hdr.type = DM_STATUS_REPORT;
41901 status.hdr.size = sizeof(struct dm_status);
41902- status.hdr.trans_id = atomic_inc_return(&trans_id);
41903+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41904
41905 /*
41906 * The host expects the guest to report free memory.
41907@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
41908 * send the status. This can happen if we were interrupted
41909 * after we picked our transaction ID.
41910 */
41911- if (status.hdr.trans_id != atomic_read(&trans_id))
41912+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
41913 return;
41914
41915 /*
41916@@ -1133,7 +1133,7 @@ static void balloon_up(struct work_struct *dummy)
41917 */
41918
41919 do {
41920- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
41921+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41922 ret = vmbus_sendpacket(dm_device.dev->channel,
41923 bl_resp,
41924 bl_resp->hdr.size,
41925@@ -1179,7 +1179,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
41926
41927 memset(&resp, 0, sizeof(struct dm_unballoon_response));
41928 resp.hdr.type = DM_UNBALLOON_RESPONSE;
41929- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41930+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41931 resp.hdr.size = sizeof(struct dm_unballoon_response);
41932
41933 vmbus_sendpacket(dm_device.dev->channel, &resp,
41934@@ -1243,7 +1243,7 @@ static void version_resp(struct hv_dynmem_device *dm,
41935 memset(&version_req, 0, sizeof(struct dm_version_request));
41936 version_req.hdr.type = DM_VERSION_REQUEST;
41937 version_req.hdr.size = sizeof(struct dm_version_request);
41938- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41939+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41940 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
41941 version_req.is_last_attempt = 1;
41942
41943@@ -1413,7 +1413,7 @@ static int balloon_probe(struct hv_device *dev,
41944 memset(&version_req, 0, sizeof(struct dm_version_request));
41945 version_req.hdr.type = DM_VERSION_REQUEST;
41946 version_req.hdr.size = sizeof(struct dm_version_request);
41947- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41948+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41949 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
41950 version_req.is_last_attempt = 0;
41951
41952@@ -1444,7 +1444,7 @@ static int balloon_probe(struct hv_device *dev,
41953 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
41954 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
41955 cap_msg.hdr.size = sizeof(struct dm_capabilities);
41956- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
41957+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41958
41959 cap_msg.caps.cap_bits.balloon = 1;
41960 cap_msg.caps.cap_bits.hot_add = 1;
41961diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
41962index c386d8d..d6004c4 100644
41963--- a/drivers/hv/hyperv_vmbus.h
41964+++ b/drivers/hv/hyperv_vmbus.h
41965@@ -611,7 +611,7 @@ enum vmbus_connect_state {
41966 struct vmbus_connection {
41967 enum vmbus_connect_state conn_state;
41968
41969- atomic_t next_gpadl_handle;
41970+ atomic_unchecked_t next_gpadl_handle;
41971
41972 /*
41973 * Represents channel interrupts. Each bit position represents a
41974diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
41975index 4d6b269..2e23b86 100644
41976--- a/drivers/hv/vmbus_drv.c
41977+++ b/drivers/hv/vmbus_drv.c
41978@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
41979 {
41980 int ret = 0;
41981
41982- static atomic_t device_num = ATOMIC_INIT(0);
41983+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41984
41985 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
41986- atomic_inc_return(&device_num));
41987+ atomic_inc_return_unchecked(&device_num));
41988
41989 child_device_obj->device.bus = &hv_bus;
41990 child_device_obj->device.parent = &hv_acpi_dev->dev;
41991diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
41992index 579bdf9..75118b5 100644
41993--- a/drivers/hwmon/acpi_power_meter.c
41994+++ b/drivers/hwmon/acpi_power_meter.c
41995@@ -116,7 +116,7 @@ struct sensor_template {
41996 struct device_attribute *devattr,
41997 const char *buf, size_t count);
41998 int index;
41999-};
42000+} __do_const;
42001
42002 /* Averaging interval */
42003 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42004@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42005 struct sensor_template *attrs)
42006 {
42007 struct device *dev = &resource->acpi_dev->dev;
42008- struct sensor_device_attribute *sensors =
42009+ sensor_device_attribute_no_const *sensors =
42010 &resource->sensors[resource->num_sensors];
42011 int res = 0;
42012
42013diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42014index 0af63da..05a183a 100644
42015--- a/drivers/hwmon/applesmc.c
42016+++ b/drivers/hwmon/applesmc.c
42017@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42018 {
42019 struct applesmc_node_group *grp;
42020 struct applesmc_dev_attr *node;
42021- struct attribute *attr;
42022+ attribute_no_const *attr;
42023 int ret, i;
42024
42025 for (grp = groups; grp->format; grp++) {
42026diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42027index cccef87..06ce8ec 100644
42028--- a/drivers/hwmon/asus_atk0110.c
42029+++ b/drivers/hwmon/asus_atk0110.c
42030@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42031 struct atk_sensor_data {
42032 struct list_head list;
42033 struct atk_data *data;
42034- struct device_attribute label_attr;
42035- struct device_attribute input_attr;
42036- struct device_attribute limit1_attr;
42037- struct device_attribute limit2_attr;
42038+ device_attribute_no_const label_attr;
42039+ device_attribute_no_const input_attr;
42040+ device_attribute_no_const limit1_attr;
42041+ device_attribute_no_const limit2_attr;
42042 char label_attr_name[ATTR_NAME_SIZE];
42043 char input_attr_name[ATTR_NAME_SIZE];
42044 char limit1_attr_name[ATTR_NAME_SIZE];
42045@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42046 static struct device_attribute atk_name_attr =
42047 __ATTR(name, 0444, atk_name_show, NULL);
42048
42049-static void atk_init_attribute(struct device_attribute *attr, char *name,
42050+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42051 sysfs_show_func show)
42052 {
42053 sysfs_attr_init(&attr->attr);
42054diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42055index 5b7fec8..05c957a 100644
42056--- a/drivers/hwmon/coretemp.c
42057+++ b/drivers/hwmon/coretemp.c
42058@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42059 return NOTIFY_OK;
42060 }
42061
42062-static struct notifier_block coretemp_cpu_notifier __refdata = {
42063+static struct notifier_block coretemp_cpu_notifier = {
42064 .notifier_call = coretemp_cpu_callback,
42065 };
42066
42067diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42068index 7a8a6fb..015c1fd 100644
42069--- a/drivers/hwmon/ibmaem.c
42070+++ b/drivers/hwmon/ibmaem.c
42071@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42072 struct aem_rw_sensor_template *rw)
42073 {
42074 struct device *dev = &data->pdev->dev;
42075- struct sensor_device_attribute *sensors = data->sensors;
42076+ sensor_device_attribute_no_const *sensors = data->sensors;
42077 int err;
42078
42079 /* Set up read-only sensors */
42080diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42081index 17ae2eb..21b71dd 100644
42082--- a/drivers/hwmon/iio_hwmon.c
42083+++ b/drivers/hwmon/iio_hwmon.c
42084@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42085 {
42086 struct device *dev = &pdev->dev;
42087 struct iio_hwmon_state *st;
42088- struct sensor_device_attribute *a;
42089+ sensor_device_attribute_no_const *a;
42090 int ret, i;
42091 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42092 enum iio_chan_type type;
42093diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42094index f3830db..9f4d6d5 100644
42095--- a/drivers/hwmon/nct6683.c
42096+++ b/drivers/hwmon/nct6683.c
42097@@ -397,11 +397,11 @@ static struct attribute_group *
42098 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42099 int repeat)
42100 {
42101- struct sensor_device_attribute_2 *a2;
42102- struct sensor_device_attribute *a;
42103+ sensor_device_attribute_2_no_const *a2;
42104+ sensor_device_attribute_no_const *a;
42105 struct sensor_device_template **t;
42106 struct sensor_device_attr_u *su;
42107- struct attribute_group *group;
42108+ attribute_group_no_const *group;
42109 struct attribute **attrs;
42110 int i, j, count;
42111
42112diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42113index 1be4117..88ae1e1 100644
42114--- a/drivers/hwmon/nct6775.c
42115+++ b/drivers/hwmon/nct6775.c
42116@@ -952,10 +952,10 @@ static struct attribute_group *
42117 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42118 int repeat)
42119 {
42120- struct attribute_group *group;
42121+ attribute_group_no_const *group;
42122 struct sensor_device_attr_u *su;
42123- struct sensor_device_attribute *a;
42124- struct sensor_device_attribute_2 *a2;
42125+ sensor_device_attribute_no_const *a;
42126+ sensor_device_attribute_2_no_const *a2;
42127 struct attribute **attrs;
42128 struct sensor_device_template **t;
42129 int i, count;
42130diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42131index f2e47c7..45d7941 100644
42132--- a/drivers/hwmon/pmbus/pmbus_core.c
42133+++ b/drivers/hwmon/pmbus/pmbus_core.c
42134@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42135 return 0;
42136 }
42137
42138-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42139+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42140 const char *name,
42141 umode_t mode,
42142 ssize_t (*show)(struct device *dev,
42143@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42144 dev_attr->store = store;
42145 }
42146
42147-static void pmbus_attr_init(struct sensor_device_attribute *a,
42148+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42149 const char *name,
42150 umode_t mode,
42151 ssize_t (*show)(struct device *dev,
42152@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42153 u16 reg, u8 mask)
42154 {
42155 struct pmbus_boolean *boolean;
42156- struct sensor_device_attribute *a;
42157+ sensor_device_attribute_no_const *a;
42158
42159 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42160 if (!boolean)
42161@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42162 bool update, bool readonly)
42163 {
42164 struct pmbus_sensor *sensor;
42165- struct device_attribute *a;
42166+ device_attribute_no_const *a;
42167
42168 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42169 if (!sensor)
42170@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42171 const char *lstring, int index)
42172 {
42173 struct pmbus_label *label;
42174- struct device_attribute *a;
42175+ device_attribute_no_const *a;
42176
42177 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42178 if (!label)
42179diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42180index d4f0935..7420593 100644
42181--- a/drivers/hwmon/sht15.c
42182+++ b/drivers/hwmon/sht15.c
42183@@ -169,7 +169,7 @@ struct sht15_data {
42184 int supply_uv;
42185 bool supply_uv_valid;
42186 struct work_struct update_supply_work;
42187- atomic_t interrupt_handled;
42188+ atomic_unchecked_t interrupt_handled;
42189 };
42190
42191 /**
42192@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42193 ret = gpio_direction_input(data->pdata->gpio_data);
42194 if (ret)
42195 return ret;
42196- atomic_set(&data->interrupt_handled, 0);
42197+ atomic_set_unchecked(&data->interrupt_handled, 0);
42198
42199 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42200 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42201 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42202 /* Only relevant if the interrupt hasn't occurred. */
42203- if (!atomic_read(&data->interrupt_handled))
42204+ if (!atomic_read_unchecked(&data->interrupt_handled))
42205 schedule_work(&data->read_work);
42206 }
42207 ret = wait_event_timeout(data->wait_queue,
42208@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42209
42210 /* First disable the interrupt */
42211 disable_irq_nosync(irq);
42212- atomic_inc(&data->interrupt_handled);
42213+ atomic_inc_unchecked(&data->interrupt_handled);
42214 /* Then schedule a reading work struct */
42215 if (data->state != SHT15_READING_NOTHING)
42216 schedule_work(&data->read_work);
42217@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42218 * If not, then start the interrupt again - care here as could
42219 * have gone low in meantime so verify it hasn't!
42220 */
42221- atomic_set(&data->interrupt_handled, 0);
42222+ atomic_set_unchecked(&data->interrupt_handled, 0);
42223 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42224 /* If still not occurred or another handler was scheduled */
42225 if (gpio_get_value(data->pdata->gpio_data)
42226- || atomic_read(&data->interrupt_handled))
42227+ || atomic_read_unchecked(&data->interrupt_handled))
42228 return;
42229 }
42230
42231diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42232index ac91c07..8e69663 100644
42233--- a/drivers/hwmon/via-cputemp.c
42234+++ b/drivers/hwmon/via-cputemp.c
42235@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42236 return NOTIFY_OK;
42237 }
42238
42239-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42240+static struct notifier_block via_cputemp_cpu_notifier = {
42241 .notifier_call = via_cputemp_cpu_callback,
42242 };
42243
42244diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42245index 65e3240..e6c511d 100644
42246--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42247+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42248@@ -39,7 +39,7 @@
42249 extern struct i2c_adapter amd756_smbus;
42250
42251 static struct i2c_adapter *s4882_adapter;
42252-static struct i2c_algorithm *s4882_algo;
42253+static i2c_algorithm_no_const *s4882_algo;
42254
42255 /* Wrapper access functions for multiplexed SMBus */
42256 static DEFINE_MUTEX(amd756_lock);
42257diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42258index b19a310..d6eece0 100644
42259--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42260+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42261@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42262 /* usb layer */
42263
42264 /* Send command to device, and get response. */
42265-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42266+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42267 {
42268 int ret = 0;
42269 int actual;
42270diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42271index 88eda09..cf40434 100644
42272--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42273+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42274@@ -37,7 +37,7 @@
42275 extern struct i2c_adapter *nforce2_smbus;
42276
42277 static struct i2c_adapter *s4985_adapter;
42278-static struct i2c_algorithm *s4985_algo;
42279+static i2c_algorithm_no_const *s4985_algo;
42280
42281 /* Wrapper access functions for multiplexed SMBus */
42282 static DEFINE_MUTEX(nforce2_lock);
42283diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42284index 71c7a39..71dd3e0 100644
42285--- a/drivers/i2c/i2c-dev.c
42286+++ b/drivers/i2c/i2c-dev.c
42287@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42288 break;
42289 }
42290
42291- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42292+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42293 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42294 if (IS_ERR(rdwr_pa[i].buf)) {
42295 res = PTR_ERR(rdwr_pa[i].buf);
42296diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42297index 0b510ba..4fbb5085 100644
42298--- a/drivers/ide/ide-cd.c
42299+++ b/drivers/ide/ide-cd.c
42300@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42301 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42302 if ((unsigned long)buf & alignment
42303 || blk_rq_bytes(rq) & q->dma_pad_mask
42304- || object_is_on_stack(buf))
42305+ || object_starts_on_stack(buf))
42306 drive->dma = 0;
42307 }
42308 }
42309diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42310index af3e76d..96dfe5e 100644
42311--- a/drivers/iio/industrialio-core.c
42312+++ b/drivers/iio/industrialio-core.c
42313@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42314 }
42315
42316 static
42317-int __iio_device_attr_init(struct device_attribute *dev_attr,
42318+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42319 const char *postfix,
42320 struct iio_chan_spec const *chan,
42321 ssize_t (*readfunc)(struct device *dev,
42322diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42323index e28a494..f7c2671 100644
42324--- a/drivers/infiniband/core/cm.c
42325+++ b/drivers/infiniband/core/cm.c
42326@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42327
42328 struct cm_counter_group {
42329 struct kobject obj;
42330- atomic_long_t counter[CM_ATTR_COUNT];
42331+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42332 };
42333
42334 struct cm_counter_attribute {
42335@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42336 struct ib_mad_send_buf *msg = NULL;
42337 int ret;
42338
42339- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42340+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42341 counter[CM_REQ_COUNTER]);
42342
42343 /* Quick state check to discard duplicate REQs. */
42344@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42345 if (!cm_id_priv)
42346 return;
42347
42348- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42349+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42350 counter[CM_REP_COUNTER]);
42351 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42352 if (ret)
42353@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42354 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42355 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42356 spin_unlock_irq(&cm_id_priv->lock);
42357- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42358+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42359 counter[CM_RTU_COUNTER]);
42360 goto out;
42361 }
42362@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42363 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42364 dreq_msg->local_comm_id);
42365 if (!cm_id_priv) {
42366- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42367+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42368 counter[CM_DREQ_COUNTER]);
42369 cm_issue_drep(work->port, work->mad_recv_wc);
42370 return -EINVAL;
42371@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42372 case IB_CM_MRA_REP_RCVD:
42373 break;
42374 case IB_CM_TIMEWAIT:
42375- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42376+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42377 counter[CM_DREQ_COUNTER]);
42378 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42379 goto unlock;
42380@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42381 cm_free_msg(msg);
42382 goto deref;
42383 case IB_CM_DREQ_RCVD:
42384- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42385+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42386 counter[CM_DREQ_COUNTER]);
42387 goto unlock;
42388 default:
42389@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42390 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42391 cm_id_priv->msg, timeout)) {
42392 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42393- atomic_long_inc(&work->port->
42394+ atomic_long_inc_unchecked(&work->port->
42395 counter_group[CM_RECV_DUPLICATES].
42396 counter[CM_MRA_COUNTER]);
42397 goto out;
42398@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42399 break;
42400 case IB_CM_MRA_REQ_RCVD:
42401 case IB_CM_MRA_REP_RCVD:
42402- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42403+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42404 counter[CM_MRA_COUNTER]);
42405 /* fall through */
42406 default:
42407@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42408 case IB_CM_LAP_IDLE:
42409 break;
42410 case IB_CM_MRA_LAP_SENT:
42411- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42412+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42413 counter[CM_LAP_COUNTER]);
42414 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42415 goto unlock;
42416@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
42417 cm_free_msg(msg);
42418 goto deref;
42419 case IB_CM_LAP_RCVD:
42420- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42421+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42422 counter[CM_LAP_COUNTER]);
42423 goto unlock;
42424 default:
42425@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42426 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42427 if (cur_cm_id_priv) {
42428 spin_unlock_irq(&cm.lock);
42429- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42430+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42431 counter[CM_SIDR_REQ_COUNTER]);
42432 goto out; /* Duplicate message. */
42433 }
42434@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42435 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42436 msg->retries = 1;
42437
42438- atomic_long_add(1 + msg->retries,
42439+ atomic_long_add_unchecked(1 + msg->retries,
42440 &port->counter_group[CM_XMIT].counter[attr_index]);
42441 if (msg->retries)
42442- atomic_long_add(msg->retries,
42443+ atomic_long_add_unchecked(msg->retries,
42444 &port->counter_group[CM_XMIT_RETRIES].
42445 counter[attr_index]);
42446
42447@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42448 }
42449
42450 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42451- atomic_long_inc(&port->counter_group[CM_RECV].
42452+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42453 counter[attr_id - CM_ATTR_ID_OFFSET]);
42454
42455 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42456@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42457 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42458
42459 return sprintf(buf, "%ld\n",
42460- atomic_long_read(&group->counter[cm_attr->index]));
42461+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42462 }
42463
42464 static const struct sysfs_ops cm_counter_ops = {
42465diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42466index 9f5ad7c..588cd84 100644
42467--- a/drivers/infiniband/core/fmr_pool.c
42468+++ b/drivers/infiniband/core/fmr_pool.c
42469@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42470
42471 struct task_struct *thread;
42472
42473- atomic_t req_ser;
42474- atomic_t flush_ser;
42475+ atomic_unchecked_t req_ser;
42476+ atomic_unchecked_t flush_ser;
42477
42478 wait_queue_head_t force_wait;
42479 };
42480@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42481 struct ib_fmr_pool *pool = pool_ptr;
42482
42483 do {
42484- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42485+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42486 ib_fmr_batch_release(pool);
42487
42488- atomic_inc(&pool->flush_ser);
42489+ atomic_inc_unchecked(&pool->flush_ser);
42490 wake_up_interruptible(&pool->force_wait);
42491
42492 if (pool->flush_function)
42493@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42494 }
42495
42496 set_current_state(TASK_INTERRUPTIBLE);
42497- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42498+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42499 !kthread_should_stop())
42500 schedule();
42501 __set_current_state(TASK_RUNNING);
42502@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42503 pool->dirty_watermark = params->dirty_watermark;
42504 pool->dirty_len = 0;
42505 spin_lock_init(&pool->pool_lock);
42506- atomic_set(&pool->req_ser, 0);
42507- atomic_set(&pool->flush_ser, 0);
42508+ atomic_set_unchecked(&pool->req_ser, 0);
42509+ atomic_set_unchecked(&pool->flush_ser, 0);
42510 init_waitqueue_head(&pool->force_wait);
42511
42512 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42513@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42514 }
42515 spin_unlock_irq(&pool->pool_lock);
42516
42517- serial = atomic_inc_return(&pool->req_ser);
42518+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42519 wake_up_process(pool->thread);
42520
42521 if (wait_event_interruptible(pool->force_wait,
42522- atomic_read(&pool->flush_ser) - serial >= 0))
42523+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42524 return -EINTR;
42525
42526 return 0;
42527@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42528 } else {
42529 list_add_tail(&fmr->list, &pool->dirty_list);
42530 if (++pool->dirty_len >= pool->dirty_watermark) {
42531- atomic_inc(&pool->req_ser);
42532+ atomic_inc_unchecked(&pool->req_ser);
42533 wake_up_process(pool->thread);
42534 }
42535 }
42536diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
42537index aec7a6a..8c014b5 100644
42538--- a/drivers/infiniband/core/umem.c
42539+++ b/drivers/infiniband/core/umem.c
42540@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
42541 if (dmasync)
42542 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
42543
42544+ /*
42545+ * If the combination of the addr and size requested for this memory
42546+ * region causes an integer overflow, return error.
42547+ */
42548+ if ((PAGE_ALIGN(addr + size) <= size) ||
42549+ (PAGE_ALIGN(addr + size) <= addr))
42550+ return ERR_PTR(-EINVAL);
42551+
42552 if (!can_do_mlock())
42553 return ERR_PTR(-EPERM);
42554
42555diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42556index cb43c22..2e12dd7 100644
42557--- a/drivers/infiniband/hw/cxgb4/mem.c
42558+++ b/drivers/infiniband/hw/cxgb4/mem.c
42559@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42560 int err;
42561 struct fw_ri_tpte tpt;
42562 u32 stag_idx;
42563- static atomic_t key;
42564+ static atomic_unchecked_t key;
42565
42566 if (c4iw_fatal_error(rdev))
42567 return -EIO;
42568@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42569 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42570 rdev->stats.stag.max = rdev->stats.stag.cur;
42571 mutex_unlock(&rdev->stats.lock);
42572- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42573+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42574 }
42575 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42576 __func__, stag_state, type, pdid, stag_idx);
42577diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42578index 79b3dbc..96e5fcc 100644
42579--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42580+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42581@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42582 struct ib_atomic_eth *ateth;
42583 struct ipath_ack_entry *e;
42584 u64 vaddr;
42585- atomic64_t *maddr;
42586+ atomic64_unchecked_t *maddr;
42587 u64 sdata;
42588 u32 rkey;
42589 u8 next;
42590@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42591 IB_ACCESS_REMOTE_ATOMIC)))
42592 goto nack_acc_unlck;
42593 /* Perform atomic OP and save result. */
42594- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42595+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42596 sdata = be64_to_cpu(ateth->swap_data);
42597 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42598 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42599- (u64) atomic64_add_return(sdata, maddr) - sdata :
42600+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42601 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42602 be64_to_cpu(ateth->compare_data),
42603 sdata);
42604diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42605index 1f95bba..9530f87 100644
42606--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42607+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42608@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42609 unsigned long flags;
42610 struct ib_wc wc;
42611 u64 sdata;
42612- atomic64_t *maddr;
42613+ atomic64_unchecked_t *maddr;
42614 enum ib_wc_status send_status;
42615
42616 /*
42617@@ -382,11 +382,11 @@ again:
42618 IB_ACCESS_REMOTE_ATOMIC)))
42619 goto acc_err;
42620 /* Perform atomic OP and save result. */
42621- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42622+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42623 sdata = wqe->wr.wr.atomic.compare_add;
42624 *(u64 *) sqp->s_sge.sge.vaddr =
42625 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42626- (u64) atomic64_add_return(sdata, maddr) - sdata :
42627+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42628 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42629 sdata, wqe->wr.wr.atomic.swap);
42630 goto send_comp;
42631diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42632index 82a7dd8..8fb6ba6 100644
42633--- a/drivers/infiniband/hw/mlx4/mad.c
42634+++ b/drivers/infiniband/hw/mlx4/mad.c
42635@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42636
42637 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42638 {
42639- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42640+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42641 cpu_to_be64(0xff00000000000000LL);
42642 }
42643
42644diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42645index ed327e6..ca1739e0 100644
42646--- a/drivers/infiniband/hw/mlx4/mcg.c
42647+++ b/drivers/infiniband/hw/mlx4/mcg.c
42648@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42649 {
42650 char name[20];
42651
42652- atomic_set(&ctx->tid, 0);
42653+ atomic_set_unchecked(&ctx->tid, 0);
42654 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42655 ctx->mcg_wq = create_singlethread_workqueue(name);
42656 if (!ctx->mcg_wq)
42657diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42658index 6eb743f..a7b0f6d 100644
42659--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42660+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42661@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
42662 struct list_head mcg_mgid0_list;
42663 struct workqueue_struct *mcg_wq;
42664 struct mlx4_ib_demux_pv_ctx **tun;
42665- atomic_t tid;
42666+ atomic_unchecked_t tid;
42667 int flushing; /* flushing the work queue */
42668 };
42669
42670diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42671index 9d3e5c1..6f166df 100644
42672--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42673+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42674@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42675 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42676 }
42677
42678-int mthca_QUERY_FW(struct mthca_dev *dev)
42679+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42680 {
42681 struct mthca_mailbox *mailbox;
42682 u32 *outbox;
42683@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42684 CMD_TIME_CLASS_B);
42685 }
42686
42687-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42688+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42689 int num_mtt)
42690 {
42691 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42692@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42693 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42694 }
42695
42696-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42697+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42698 int eq_num)
42699 {
42700 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42701@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42702 CMD_TIME_CLASS_B);
42703 }
42704
42705-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42706+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42707 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42708 void *in_mad, void *response_mad)
42709 {
42710diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42711index ded76c1..0cf0a08 100644
42712--- a/drivers/infiniband/hw/mthca/mthca_main.c
42713+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42714@@ -692,7 +692,7 @@ err_close:
42715 return err;
42716 }
42717
42718-static int mthca_setup_hca(struct mthca_dev *dev)
42719+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42720 {
42721 int err;
42722
42723diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42724index ed9a989..6aa5dc2 100644
42725--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42726+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42727@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42728 * through the bitmaps)
42729 */
42730
42731-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42732+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42733 {
42734 int o;
42735 int m;
42736@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42737 return key;
42738 }
42739
42740-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42741+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42742 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42743 {
42744 struct mthca_mailbox *mailbox;
42745@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42746 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42747 }
42748
42749-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42750+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42751 u64 *buffer_list, int buffer_size_shift,
42752 int list_len, u64 iova, u64 total_size,
42753 u32 access, struct mthca_mr *mr)
42754diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42755index 415f8e1..e34214e 100644
42756--- a/drivers/infiniband/hw/mthca/mthca_provider.c
42757+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
42758@@ -764,7 +764,7 @@ unlock:
42759 return 0;
42760 }
42761
42762-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42763+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42764 {
42765 struct mthca_dev *dev = to_mdev(ibcq->device);
42766 struct mthca_cq *cq = to_mcq(ibcq);
42767diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
42768index 3b2a6dc..bce26ff 100644
42769--- a/drivers/infiniband/hw/nes/nes.c
42770+++ b/drivers/infiniband/hw/nes/nes.c
42771@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
42772 LIST_HEAD(nes_adapter_list);
42773 static LIST_HEAD(nes_dev_list);
42774
42775-atomic_t qps_destroyed;
42776+atomic_unchecked_t qps_destroyed;
42777
42778 static unsigned int ee_flsh_adapter;
42779 static unsigned int sysfs_nonidx_addr;
42780@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
42781 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
42782 struct nes_adapter *nesadapter = nesdev->nesadapter;
42783
42784- atomic_inc(&qps_destroyed);
42785+ atomic_inc_unchecked(&qps_destroyed);
42786
42787 /* Free the control structures */
42788
42789diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
42790index bd9d132..70d84f4 100644
42791--- a/drivers/infiniband/hw/nes/nes.h
42792+++ b/drivers/infiniband/hw/nes/nes.h
42793@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
42794 extern unsigned int wqm_quanta;
42795 extern struct list_head nes_adapter_list;
42796
42797-extern atomic_t cm_connects;
42798-extern atomic_t cm_accepts;
42799-extern atomic_t cm_disconnects;
42800-extern atomic_t cm_closes;
42801-extern atomic_t cm_connecteds;
42802-extern atomic_t cm_connect_reqs;
42803-extern atomic_t cm_rejects;
42804-extern atomic_t mod_qp_timouts;
42805-extern atomic_t qps_created;
42806-extern atomic_t qps_destroyed;
42807-extern atomic_t sw_qps_destroyed;
42808+extern atomic_unchecked_t cm_connects;
42809+extern atomic_unchecked_t cm_accepts;
42810+extern atomic_unchecked_t cm_disconnects;
42811+extern atomic_unchecked_t cm_closes;
42812+extern atomic_unchecked_t cm_connecteds;
42813+extern atomic_unchecked_t cm_connect_reqs;
42814+extern atomic_unchecked_t cm_rejects;
42815+extern atomic_unchecked_t mod_qp_timouts;
42816+extern atomic_unchecked_t qps_created;
42817+extern atomic_unchecked_t qps_destroyed;
42818+extern atomic_unchecked_t sw_qps_destroyed;
42819 extern u32 mh_detected;
42820 extern u32 mh_pauses_sent;
42821 extern u32 cm_packets_sent;
42822@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
42823 extern u32 cm_packets_received;
42824 extern u32 cm_packets_dropped;
42825 extern u32 cm_packets_retrans;
42826-extern atomic_t cm_listens_created;
42827-extern atomic_t cm_listens_destroyed;
42828+extern atomic_unchecked_t cm_listens_created;
42829+extern atomic_unchecked_t cm_listens_destroyed;
42830 extern u32 cm_backlog_drops;
42831-extern atomic_t cm_loopbacks;
42832-extern atomic_t cm_nodes_created;
42833-extern atomic_t cm_nodes_destroyed;
42834-extern atomic_t cm_accel_dropped_pkts;
42835-extern atomic_t cm_resets_recvd;
42836-extern atomic_t pau_qps_created;
42837-extern atomic_t pau_qps_destroyed;
42838+extern atomic_unchecked_t cm_loopbacks;
42839+extern atomic_unchecked_t cm_nodes_created;
42840+extern atomic_unchecked_t cm_nodes_destroyed;
42841+extern atomic_unchecked_t cm_accel_dropped_pkts;
42842+extern atomic_unchecked_t cm_resets_recvd;
42843+extern atomic_unchecked_t pau_qps_created;
42844+extern atomic_unchecked_t pau_qps_destroyed;
42845
42846 extern u32 int_mod_timer_init;
42847 extern u32 int_mod_cq_depth_256;
42848diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
42849index 6f09a72..cf4399d 100644
42850--- a/drivers/infiniband/hw/nes/nes_cm.c
42851+++ b/drivers/infiniband/hw/nes/nes_cm.c
42852@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
42853 u32 cm_packets_retrans;
42854 u32 cm_packets_created;
42855 u32 cm_packets_received;
42856-atomic_t cm_listens_created;
42857-atomic_t cm_listens_destroyed;
42858+atomic_unchecked_t cm_listens_created;
42859+atomic_unchecked_t cm_listens_destroyed;
42860 u32 cm_backlog_drops;
42861-atomic_t cm_loopbacks;
42862-atomic_t cm_nodes_created;
42863-atomic_t cm_nodes_destroyed;
42864-atomic_t cm_accel_dropped_pkts;
42865-atomic_t cm_resets_recvd;
42866+atomic_unchecked_t cm_loopbacks;
42867+atomic_unchecked_t cm_nodes_created;
42868+atomic_unchecked_t cm_nodes_destroyed;
42869+atomic_unchecked_t cm_accel_dropped_pkts;
42870+atomic_unchecked_t cm_resets_recvd;
42871
42872 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
42873 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
42874@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
42875 /* instance of function pointers for client API */
42876 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
42877 static struct nes_cm_ops nes_cm_api = {
42878- mini_cm_accelerated,
42879- mini_cm_listen,
42880- mini_cm_del_listen,
42881- mini_cm_connect,
42882- mini_cm_close,
42883- mini_cm_accept,
42884- mini_cm_reject,
42885- mini_cm_recv_pkt,
42886- mini_cm_dealloc_core,
42887- mini_cm_get,
42888- mini_cm_set
42889+ .accelerated = mini_cm_accelerated,
42890+ .listen = mini_cm_listen,
42891+ .stop_listener = mini_cm_del_listen,
42892+ .connect = mini_cm_connect,
42893+ .close = mini_cm_close,
42894+ .accept = mini_cm_accept,
42895+ .reject = mini_cm_reject,
42896+ .recv_pkt = mini_cm_recv_pkt,
42897+ .destroy_cm_core = mini_cm_dealloc_core,
42898+ .get = mini_cm_get,
42899+ .set = mini_cm_set
42900 };
42901
42902 static struct nes_cm_core *g_cm_core;
42903
42904-atomic_t cm_connects;
42905-atomic_t cm_accepts;
42906-atomic_t cm_disconnects;
42907-atomic_t cm_closes;
42908-atomic_t cm_connecteds;
42909-atomic_t cm_connect_reqs;
42910-atomic_t cm_rejects;
42911+atomic_unchecked_t cm_connects;
42912+atomic_unchecked_t cm_accepts;
42913+atomic_unchecked_t cm_disconnects;
42914+atomic_unchecked_t cm_closes;
42915+atomic_unchecked_t cm_connecteds;
42916+atomic_unchecked_t cm_connect_reqs;
42917+atomic_unchecked_t cm_rejects;
42918
42919 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
42920 {
42921@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
42922 kfree(listener);
42923 listener = NULL;
42924 ret = 0;
42925- atomic_inc(&cm_listens_destroyed);
42926+ atomic_inc_unchecked(&cm_listens_destroyed);
42927 } else {
42928 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
42929 }
42930@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
42931 cm_node->rem_mac);
42932
42933 add_hte_node(cm_core, cm_node);
42934- atomic_inc(&cm_nodes_created);
42935+ atomic_inc_unchecked(&cm_nodes_created);
42936
42937 return cm_node;
42938 }
42939@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
42940 }
42941
42942 atomic_dec(&cm_core->node_cnt);
42943- atomic_inc(&cm_nodes_destroyed);
42944+ atomic_inc_unchecked(&cm_nodes_destroyed);
42945 nesqp = cm_node->nesqp;
42946 if (nesqp) {
42947 nesqp->cm_node = NULL;
42948@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
42949
42950 static void drop_packet(struct sk_buff *skb)
42951 {
42952- atomic_inc(&cm_accel_dropped_pkts);
42953+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42954 dev_kfree_skb_any(skb);
42955 }
42956
42957@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
42958 {
42959
42960 int reset = 0; /* whether to send reset in case of err.. */
42961- atomic_inc(&cm_resets_recvd);
42962+ atomic_inc_unchecked(&cm_resets_recvd);
42963 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
42964 " refcnt=%d\n", cm_node, cm_node->state,
42965 atomic_read(&cm_node->ref_count));
42966@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
42967 rem_ref_cm_node(cm_node->cm_core, cm_node);
42968 return NULL;
42969 }
42970- atomic_inc(&cm_loopbacks);
42971+ atomic_inc_unchecked(&cm_loopbacks);
42972 loopbackremotenode->loopbackpartner = cm_node;
42973 loopbackremotenode->tcp_cntxt.rcv_wscale =
42974 NES_CM_DEFAULT_RCV_WND_SCALE;
42975@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
42976 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
42977 else {
42978 rem_ref_cm_node(cm_core, cm_node);
42979- atomic_inc(&cm_accel_dropped_pkts);
42980+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42981 dev_kfree_skb_any(skb);
42982 }
42983 break;
42984@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42985
42986 if ((cm_id) && (cm_id->event_handler)) {
42987 if (issue_disconn) {
42988- atomic_inc(&cm_disconnects);
42989+ atomic_inc_unchecked(&cm_disconnects);
42990 cm_event.event = IW_CM_EVENT_DISCONNECT;
42991 cm_event.status = disconn_status;
42992 cm_event.local_addr = cm_id->local_addr;
42993@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42994 }
42995
42996 if (issue_close) {
42997- atomic_inc(&cm_closes);
42998+ atomic_inc_unchecked(&cm_closes);
42999 nes_disconnect(nesqp, 1);
43000
43001 cm_id->provider_data = nesqp;
43002@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43003
43004 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43005 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43006- atomic_inc(&cm_accepts);
43007+ atomic_inc_unchecked(&cm_accepts);
43008
43009 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43010 netdev_refcnt_read(nesvnic->netdev));
43011@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43012 struct nes_cm_core *cm_core;
43013 u8 *start_buff;
43014
43015- atomic_inc(&cm_rejects);
43016+ atomic_inc_unchecked(&cm_rejects);
43017 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43018 loopback = cm_node->loopbackpartner;
43019 cm_core = cm_node->cm_core;
43020@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43021 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43022 ntohs(laddr->sin_port));
43023
43024- atomic_inc(&cm_connects);
43025+ atomic_inc_unchecked(&cm_connects);
43026 nesqp->active_conn = 1;
43027
43028 /* cache the cm_id in the qp */
43029@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43030 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43031 return err;
43032 }
43033- atomic_inc(&cm_listens_created);
43034+ atomic_inc_unchecked(&cm_listens_created);
43035 }
43036
43037 cm_id->add_ref(cm_id);
43038@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43039
43040 if (nesqp->destroyed)
43041 return;
43042- atomic_inc(&cm_connecteds);
43043+ atomic_inc_unchecked(&cm_connecteds);
43044 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43045 " local port 0x%04X. jiffies = %lu.\n",
43046 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43047@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43048
43049 cm_id->add_ref(cm_id);
43050 ret = cm_id->event_handler(cm_id, &cm_event);
43051- atomic_inc(&cm_closes);
43052+ atomic_inc_unchecked(&cm_closes);
43053 cm_event.event = IW_CM_EVENT_CLOSE;
43054 cm_event.status = 0;
43055 cm_event.provider_data = cm_id->provider_data;
43056@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43057 return;
43058 cm_id = cm_node->cm_id;
43059
43060- atomic_inc(&cm_connect_reqs);
43061+ atomic_inc_unchecked(&cm_connect_reqs);
43062 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43063 cm_node, cm_id, jiffies);
43064
43065@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43066 return;
43067 cm_id = cm_node->cm_id;
43068
43069- atomic_inc(&cm_connect_reqs);
43070+ atomic_inc_unchecked(&cm_connect_reqs);
43071 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43072 cm_node, cm_id, jiffies);
43073
43074diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43075index 4166452..fc952c3 100644
43076--- a/drivers/infiniband/hw/nes/nes_mgt.c
43077+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43078@@ -40,8 +40,8 @@
43079 #include "nes.h"
43080 #include "nes_mgt.h"
43081
43082-atomic_t pau_qps_created;
43083-atomic_t pau_qps_destroyed;
43084+atomic_unchecked_t pau_qps_created;
43085+atomic_unchecked_t pau_qps_destroyed;
43086
43087 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43088 {
43089@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43090 {
43091 struct sk_buff *skb;
43092 unsigned long flags;
43093- atomic_inc(&pau_qps_destroyed);
43094+ atomic_inc_unchecked(&pau_qps_destroyed);
43095
43096 /* Free packets that have not yet been forwarded */
43097 /* Lock is acquired by skb_dequeue when removing the skb */
43098@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43099 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43100 skb_queue_head_init(&nesqp->pau_list);
43101 spin_lock_init(&nesqp->pau_lock);
43102- atomic_inc(&pau_qps_created);
43103+ atomic_inc_unchecked(&pau_qps_created);
43104 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43105 }
43106
43107diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43108index 49eb511..a774366 100644
43109--- a/drivers/infiniband/hw/nes/nes_nic.c
43110+++ b/drivers/infiniband/hw/nes/nes_nic.c
43111@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43112 target_stat_values[++index] = mh_detected;
43113 target_stat_values[++index] = mh_pauses_sent;
43114 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43115- target_stat_values[++index] = atomic_read(&cm_connects);
43116- target_stat_values[++index] = atomic_read(&cm_accepts);
43117- target_stat_values[++index] = atomic_read(&cm_disconnects);
43118- target_stat_values[++index] = atomic_read(&cm_connecteds);
43119- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43120- target_stat_values[++index] = atomic_read(&cm_rejects);
43121- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43122- target_stat_values[++index] = atomic_read(&qps_created);
43123- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43124- target_stat_values[++index] = atomic_read(&qps_destroyed);
43125- target_stat_values[++index] = atomic_read(&cm_closes);
43126+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43127+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43128+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43129+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43130+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43131+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43132+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43133+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43134+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43135+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43136+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43137 target_stat_values[++index] = cm_packets_sent;
43138 target_stat_values[++index] = cm_packets_bounced;
43139 target_stat_values[++index] = cm_packets_created;
43140 target_stat_values[++index] = cm_packets_received;
43141 target_stat_values[++index] = cm_packets_dropped;
43142 target_stat_values[++index] = cm_packets_retrans;
43143- target_stat_values[++index] = atomic_read(&cm_listens_created);
43144- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43145+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43146+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43147 target_stat_values[++index] = cm_backlog_drops;
43148- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43149- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43150- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43151- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43152- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43153+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43154+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43155+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43156+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43157+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43158 target_stat_values[++index] = nesadapter->free_4kpbl;
43159 target_stat_values[++index] = nesadapter->free_256pbl;
43160 target_stat_values[++index] = int_mod_timer_init;
43161 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43162 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43163 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43164- target_stat_values[++index] = atomic_read(&pau_qps_created);
43165- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43166+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43167+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43168 }
43169
43170 /**
43171diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43172index c0d0296..3185f57 100644
43173--- a/drivers/infiniband/hw/nes/nes_verbs.c
43174+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43175@@ -46,9 +46,9 @@
43176
43177 #include <rdma/ib_umem.h>
43178
43179-atomic_t mod_qp_timouts;
43180-atomic_t qps_created;
43181-atomic_t sw_qps_destroyed;
43182+atomic_unchecked_t mod_qp_timouts;
43183+atomic_unchecked_t qps_created;
43184+atomic_unchecked_t sw_qps_destroyed;
43185
43186 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43187
43188@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43189 if (init_attr->create_flags)
43190 return ERR_PTR(-EINVAL);
43191
43192- atomic_inc(&qps_created);
43193+ atomic_inc_unchecked(&qps_created);
43194 switch (init_attr->qp_type) {
43195 case IB_QPT_RC:
43196 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43197@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43198 struct iw_cm_event cm_event;
43199 int ret = 0;
43200
43201- atomic_inc(&sw_qps_destroyed);
43202+ atomic_inc_unchecked(&sw_qps_destroyed);
43203 nesqp->destroyed = 1;
43204
43205 /* Blow away the connection if it exists. */
43206diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43207index b218254..1d1aa3c 100644
43208--- a/drivers/infiniband/hw/qib/qib.h
43209+++ b/drivers/infiniband/hw/qib/qib.h
43210@@ -52,6 +52,7 @@
43211 #include <linux/kref.h>
43212 #include <linux/sched.h>
43213 #include <linux/kthread.h>
43214+#include <linux/slab.h>
43215
43216 #include "qib_common.h"
43217 #include "qib_verbs.h"
43218diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43219index cdc7df4..a2fdfdb 100644
43220--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43221+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43222@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43223 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43224 }
43225
43226-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43227+static struct rtnl_link_ops ipoib_link_ops = {
43228 .kind = "ipoib",
43229 .maxtype = IFLA_IPOIB_MAX,
43230 .policy = ipoib_policy,
43231diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43232index e853a21..56fc5a8 100644
43233--- a/drivers/input/gameport/gameport.c
43234+++ b/drivers/input/gameport/gameport.c
43235@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43236 */
43237 static void gameport_init_port(struct gameport *gameport)
43238 {
43239- static atomic_t gameport_no = ATOMIC_INIT(-1);
43240+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43241
43242 __module_get(THIS_MODULE);
43243
43244 mutex_init(&gameport->drv_mutex);
43245 device_initialize(&gameport->dev);
43246 dev_set_name(&gameport->dev, "gameport%lu",
43247- (unsigned long)atomic_inc_return(&gameport_no));
43248+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43249 gameport->dev.bus = &gameport_bus;
43250 gameport->dev.release = gameport_release_port;
43251 if (gameport->parent)
43252diff --git a/drivers/input/input.c b/drivers/input/input.c
43253index 213e3a1..4fea837 100644
43254--- a/drivers/input/input.c
43255+++ b/drivers/input/input.c
43256@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
43257 */
43258 struct input_dev *input_allocate_device(void)
43259 {
43260- static atomic_t input_no = ATOMIC_INIT(-1);
43261+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43262 struct input_dev *dev;
43263
43264 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43265@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
43266 INIT_LIST_HEAD(&dev->node);
43267
43268 dev_set_name(&dev->dev, "input%lu",
43269- (unsigned long)atomic_inc_return(&input_no));
43270+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43271
43272 __module_get(THIS_MODULE);
43273 }
43274diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43275index 4a95b22..874c182 100644
43276--- a/drivers/input/joystick/sidewinder.c
43277+++ b/drivers/input/joystick/sidewinder.c
43278@@ -30,6 +30,7 @@
43279 #include <linux/kernel.h>
43280 #include <linux/module.h>
43281 #include <linux/slab.h>
43282+#include <linux/sched.h>
43283 #include <linux/input.h>
43284 #include <linux/gameport.h>
43285 #include <linux/jiffies.h>
43286diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43287index 3aa2f3f..53c00ea 100644
43288--- a/drivers/input/joystick/xpad.c
43289+++ b/drivers/input/joystick/xpad.c
43290@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43291
43292 static int xpad_led_probe(struct usb_xpad *xpad)
43293 {
43294- static atomic_t led_seq = ATOMIC_INIT(-1);
43295+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43296 unsigned long led_no;
43297 struct xpad_led *led;
43298 struct led_classdev *led_cdev;
43299@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43300 if (!led)
43301 return -ENOMEM;
43302
43303- led_no = atomic_inc_return(&led_seq);
43304+ led_no = atomic_inc_return_unchecked(&led_seq);
43305
43306 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43307 led->xpad = xpad;
43308diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43309index ac1fa5f..5f7502c 100644
43310--- a/drivers/input/misc/ims-pcu.c
43311+++ b/drivers/input/misc/ims-pcu.c
43312@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43313
43314 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43315 {
43316- static atomic_t device_no = ATOMIC_INIT(-1);
43317+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43318
43319 const struct ims_pcu_device_info *info;
43320 int error;
43321@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43322 }
43323
43324 /* Device appears to be operable, complete initialization */
43325- pcu->device_no = atomic_inc_return(&device_no);
43326+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43327
43328 /*
43329 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43330diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43331index f4cf664..3204fda 100644
43332--- a/drivers/input/mouse/psmouse.h
43333+++ b/drivers/input/mouse/psmouse.h
43334@@ -117,7 +117,7 @@ struct psmouse_attribute {
43335 ssize_t (*set)(struct psmouse *psmouse, void *data,
43336 const char *buf, size_t count);
43337 bool protect;
43338-};
43339+} __do_const;
43340 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43341
43342 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43343diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43344index b604564..3f14ae4 100644
43345--- a/drivers/input/mousedev.c
43346+++ b/drivers/input/mousedev.c
43347@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43348
43349 spin_unlock_irq(&client->packet_lock);
43350
43351- if (copy_to_user(buffer, data, count))
43352+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43353 return -EFAULT;
43354
43355 return count;
43356diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43357index a05a517..323a2fd 100644
43358--- a/drivers/input/serio/serio.c
43359+++ b/drivers/input/serio/serio.c
43360@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43361 */
43362 static void serio_init_port(struct serio *serio)
43363 {
43364- static atomic_t serio_no = ATOMIC_INIT(-1);
43365+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43366
43367 __module_get(THIS_MODULE);
43368
43369@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43370 mutex_init(&serio->drv_mutex);
43371 device_initialize(&serio->dev);
43372 dev_set_name(&serio->dev, "serio%lu",
43373- (unsigned long)atomic_inc_return(&serio_no));
43374+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43375 serio->dev.bus = &serio_bus;
43376 serio->dev.release = serio_release_port;
43377 serio->dev.groups = serio_device_attr_groups;
43378diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43379index 71ef5d6..93380a9 100644
43380--- a/drivers/input/serio/serio_raw.c
43381+++ b/drivers/input/serio/serio_raw.c
43382@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43383
43384 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43385 {
43386- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43387+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43388 struct serio_raw *serio_raw;
43389 int err;
43390
43391@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43392 }
43393
43394 snprintf(serio_raw->name, sizeof(serio_raw->name),
43395- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43396+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43397 kref_init(&serio_raw->kref);
43398 INIT_LIST_HEAD(&serio_raw->client_list);
43399 init_waitqueue_head(&serio_raw->wait);
43400diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43401index 9802485..2e9941d 100644
43402--- a/drivers/iommu/amd_iommu.c
43403+++ b/drivers/iommu/amd_iommu.c
43404@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
43405
43406 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
43407 {
43408+ phys_addr_t physaddr;
43409 WARN_ON(address & 0x7ULL);
43410
43411 memset(cmd, 0, sizeof(*cmd));
43412- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
43413- cmd->data[1] = upper_32_bits(__pa(address));
43414+
43415+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
43416+ if (object_starts_on_stack((void *)address)) {
43417+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
43418+ physaddr = __pa((u64)adjbuf);
43419+ } else
43420+#endif
43421+ physaddr = __pa(address);
43422+
43423+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
43424+ cmd->data[1] = upper_32_bits(physaddr);
43425 cmd->data[2] = 1;
43426 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
43427 }
43428diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
43429index 6cd47b7..264d14a 100644
43430--- a/drivers/iommu/arm-smmu.c
43431+++ b/drivers/iommu/arm-smmu.c
43432@@ -968,7 +968,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
43433 cfg->irptndx = cfg->cbndx;
43434 }
43435
43436- ACCESS_ONCE(smmu_domain->smmu) = smmu;
43437+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
43438 arm_smmu_init_context_bank(smmu_domain);
43439 spin_unlock_irqrestore(&smmu_domain->lock, flags);
43440
43441diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
43442index f7718d7..3ef740b 100644
43443--- a/drivers/iommu/iommu.c
43444+++ b/drivers/iommu/iommu.c
43445@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
43446 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
43447 {
43448 int err;
43449- struct notifier_block *nb;
43450+ notifier_block_no_const *nb;
43451 struct iommu_callback_data cb = {
43452 .ops = ops,
43453 };
43454diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43455index 89c4846..1de796f 100644
43456--- a/drivers/iommu/irq_remapping.c
43457+++ b/drivers/iommu/irq_remapping.c
43458@@ -353,7 +353,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43459 void panic_if_irq_remap(const char *msg)
43460 {
43461 if (irq_remapping_enabled)
43462- panic(msg);
43463+ panic("%s", msg);
43464 }
43465
43466 static void ir_ack_apic_edge(struct irq_data *data)
43467@@ -374,10 +374,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43468
43469 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43470 {
43471- chip->irq_print_chip = ir_print_prefix;
43472- chip->irq_ack = ir_ack_apic_edge;
43473- chip->irq_eoi = ir_ack_apic_level;
43474- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43475+ pax_open_kernel();
43476+ *(void **)&chip->irq_print_chip = ir_print_prefix;
43477+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
43478+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
43479+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43480+ pax_close_kernel();
43481 }
43482
43483 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43484diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43485index d617ee5..df8be8b 100644
43486--- a/drivers/irqchip/irq-gic.c
43487+++ b/drivers/irqchip/irq-gic.c
43488@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43489 * Supported arch specific GIC irq extension.
43490 * Default make them NULL.
43491 */
43492-struct irq_chip gic_arch_extn = {
43493+irq_chip_no_const gic_arch_extn = {
43494 .irq_eoi = NULL,
43495 .irq_mask = NULL,
43496 .irq_unmask = NULL,
43497@@ -311,7 +311,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43498 chained_irq_exit(chip, desc);
43499 }
43500
43501-static struct irq_chip gic_chip = {
43502+static irq_chip_no_const gic_chip __read_only = {
43503 .name = "GIC",
43504 .irq_mask = gic_mask_irq,
43505 .irq_unmask = gic_unmask_irq,
43506diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
43507index 078cac5..fb0f846 100644
43508--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
43509+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
43510@@ -353,7 +353,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
43511 struct intc_irqpin_iomem *i;
43512 struct resource *io[INTC_IRQPIN_REG_NR];
43513 struct resource *irq;
43514- struct irq_chip *irq_chip;
43515+ irq_chip_no_const *irq_chip;
43516 void (*enable_fn)(struct irq_data *d);
43517 void (*disable_fn)(struct irq_data *d);
43518 const char *name = dev_name(dev);
43519diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
43520index 384e6ed..7a771b2 100644
43521--- a/drivers/irqchip/irq-renesas-irqc.c
43522+++ b/drivers/irqchip/irq-renesas-irqc.c
43523@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
43524 struct irqc_priv *p;
43525 struct resource *io;
43526 struct resource *irq;
43527- struct irq_chip *irq_chip;
43528+ irq_chip_no_const *irq_chip;
43529 const char *name = dev_name(&pdev->dev);
43530 int ret;
43531 int k;
43532diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43533index 6a2df32..dc962f1 100644
43534--- a/drivers/isdn/capi/capi.c
43535+++ b/drivers/isdn/capi/capi.c
43536@@ -81,8 +81,8 @@ struct capiminor {
43537
43538 struct capi20_appl *ap;
43539 u32 ncci;
43540- atomic_t datahandle;
43541- atomic_t msgid;
43542+ atomic_unchecked_t datahandle;
43543+ atomic_unchecked_t msgid;
43544
43545 struct tty_port port;
43546 int ttyinstop;
43547@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43548 capimsg_setu16(s, 2, mp->ap->applid);
43549 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43550 capimsg_setu8 (s, 5, CAPI_RESP);
43551- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43552+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43553 capimsg_setu32(s, 8, mp->ncci);
43554 capimsg_setu16(s, 12, datahandle);
43555 }
43556@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43557 mp->outbytes -= len;
43558 spin_unlock_bh(&mp->outlock);
43559
43560- datahandle = atomic_inc_return(&mp->datahandle);
43561+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43562 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43563 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43564 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43565 capimsg_setu16(skb->data, 2, mp->ap->applid);
43566 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43567 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43568- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43569+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43570 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43571 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43572 capimsg_setu16(skb->data, 16, len); /* Data length */
43573diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43574index aecec6d..11e13c5 100644
43575--- a/drivers/isdn/gigaset/bas-gigaset.c
43576+++ b/drivers/isdn/gigaset/bas-gigaset.c
43577@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43578
43579
43580 static const struct gigaset_ops gigops = {
43581- gigaset_write_cmd,
43582- gigaset_write_room,
43583- gigaset_chars_in_buffer,
43584- gigaset_brkchars,
43585- gigaset_init_bchannel,
43586- gigaset_close_bchannel,
43587- gigaset_initbcshw,
43588- gigaset_freebcshw,
43589- gigaset_reinitbcshw,
43590- gigaset_initcshw,
43591- gigaset_freecshw,
43592- gigaset_set_modem_ctrl,
43593- gigaset_baud_rate,
43594- gigaset_set_line_ctrl,
43595- gigaset_isoc_send_skb,
43596- gigaset_isoc_input,
43597+ .write_cmd = gigaset_write_cmd,
43598+ .write_room = gigaset_write_room,
43599+ .chars_in_buffer = gigaset_chars_in_buffer,
43600+ .brkchars = gigaset_brkchars,
43601+ .init_bchannel = gigaset_init_bchannel,
43602+ .close_bchannel = gigaset_close_bchannel,
43603+ .initbcshw = gigaset_initbcshw,
43604+ .freebcshw = gigaset_freebcshw,
43605+ .reinitbcshw = gigaset_reinitbcshw,
43606+ .initcshw = gigaset_initcshw,
43607+ .freecshw = gigaset_freecshw,
43608+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43609+ .baud_rate = gigaset_baud_rate,
43610+ .set_line_ctrl = gigaset_set_line_ctrl,
43611+ .send_skb = gigaset_isoc_send_skb,
43612+ .handle_input = gigaset_isoc_input,
43613 };
43614
43615 /* bas_gigaset_init
43616diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43617index 600c79b..3752bab 100644
43618--- a/drivers/isdn/gigaset/interface.c
43619+++ b/drivers/isdn/gigaset/interface.c
43620@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43621 }
43622 tty->driver_data = cs;
43623
43624- ++cs->port.count;
43625+ atomic_inc(&cs->port.count);
43626
43627- if (cs->port.count == 1) {
43628+ if (atomic_read(&cs->port.count) == 1) {
43629 tty_port_tty_set(&cs->port, tty);
43630 cs->port.low_latency = 1;
43631 }
43632@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43633
43634 if (!cs->connected)
43635 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43636- else if (!cs->port.count)
43637+ else if (!atomic_read(&cs->port.count))
43638 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43639- else if (!--cs->port.count)
43640+ else if (!atomic_dec_return(&cs->port.count))
43641 tty_port_tty_set(&cs->port, NULL);
43642
43643 mutex_unlock(&cs->mutex);
43644diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43645index 8c91fd5..14f13ce 100644
43646--- a/drivers/isdn/gigaset/ser-gigaset.c
43647+++ b/drivers/isdn/gigaset/ser-gigaset.c
43648@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43649 }
43650
43651 static const struct gigaset_ops ops = {
43652- gigaset_write_cmd,
43653- gigaset_write_room,
43654- gigaset_chars_in_buffer,
43655- gigaset_brkchars,
43656- gigaset_init_bchannel,
43657- gigaset_close_bchannel,
43658- gigaset_initbcshw,
43659- gigaset_freebcshw,
43660- gigaset_reinitbcshw,
43661- gigaset_initcshw,
43662- gigaset_freecshw,
43663- gigaset_set_modem_ctrl,
43664- gigaset_baud_rate,
43665- gigaset_set_line_ctrl,
43666- gigaset_m10x_send_skb, /* asyncdata.c */
43667- gigaset_m10x_input, /* asyncdata.c */
43668+ .write_cmd = gigaset_write_cmd,
43669+ .write_room = gigaset_write_room,
43670+ .chars_in_buffer = gigaset_chars_in_buffer,
43671+ .brkchars = gigaset_brkchars,
43672+ .init_bchannel = gigaset_init_bchannel,
43673+ .close_bchannel = gigaset_close_bchannel,
43674+ .initbcshw = gigaset_initbcshw,
43675+ .freebcshw = gigaset_freebcshw,
43676+ .reinitbcshw = gigaset_reinitbcshw,
43677+ .initcshw = gigaset_initcshw,
43678+ .freecshw = gigaset_freecshw,
43679+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43680+ .baud_rate = gigaset_baud_rate,
43681+ .set_line_ctrl = gigaset_set_line_ctrl,
43682+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43683+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43684 };
43685
43686
43687diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43688index 5f306e2..5342f88 100644
43689--- a/drivers/isdn/gigaset/usb-gigaset.c
43690+++ b/drivers/isdn/gigaset/usb-gigaset.c
43691@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43692 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43693 memcpy(cs->hw.usb->bchars, buf, 6);
43694 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43695- 0, 0, &buf, 6, 2000);
43696+ 0, 0, buf, 6, 2000);
43697 }
43698
43699 static void gigaset_freebcshw(struct bc_state *bcs)
43700@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43701 }
43702
43703 static const struct gigaset_ops ops = {
43704- gigaset_write_cmd,
43705- gigaset_write_room,
43706- gigaset_chars_in_buffer,
43707- gigaset_brkchars,
43708- gigaset_init_bchannel,
43709- gigaset_close_bchannel,
43710- gigaset_initbcshw,
43711- gigaset_freebcshw,
43712- gigaset_reinitbcshw,
43713- gigaset_initcshw,
43714- gigaset_freecshw,
43715- gigaset_set_modem_ctrl,
43716- gigaset_baud_rate,
43717- gigaset_set_line_ctrl,
43718- gigaset_m10x_send_skb,
43719- gigaset_m10x_input,
43720+ .write_cmd = gigaset_write_cmd,
43721+ .write_room = gigaset_write_room,
43722+ .chars_in_buffer = gigaset_chars_in_buffer,
43723+ .brkchars = gigaset_brkchars,
43724+ .init_bchannel = gigaset_init_bchannel,
43725+ .close_bchannel = gigaset_close_bchannel,
43726+ .initbcshw = gigaset_initbcshw,
43727+ .freebcshw = gigaset_freebcshw,
43728+ .reinitbcshw = gigaset_reinitbcshw,
43729+ .initcshw = gigaset_initcshw,
43730+ .freecshw = gigaset_freecshw,
43731+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43732+ .baud_rate = gigaset_baud_rate,
43733+ .set_line_ctrl = gigaset_set_line_ctrl,
43734+ .send_skb = gigaset_m10x_send_skb,
43735+ .handle_input = gigaset_m10x_input,
43736 };
43737
43738 /*
43739diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43740index 4d9b195..455075c 100644
43741--- a/drivers/isdn/hardware/avm/b1.c
43742+++ b/drivers/isdn/hardware/avm/b1.c
43743@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43744 }
43745 if (left) {
43746 if (t4file->user) {
43747- if (copy_from_user(buf, dp, left))
43748+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43749 return -EFAULT;
43750 } else {
43751 memcpy(buf, dp, left);
43752@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43753 }
43754 if (left) {
43755 if (config->user) {
43756- if (copy_from_user(buf, dp, left))
43757+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43758 return -EFAULT;
43759 } else {
43760 memcpy(buf, dp, left);
43761diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
43762index 9b856e1..fa03c92 100644
43763--- a/drivers/isdn/i4l/isdn_common.c
43764+++ b/drivers/isdn/i4l/isdn_common.c
43765@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
43766 } else
43767 return -EINVAL;
43768 case IIOCDBGVAR:
43769+ if (!capable(CAP_SYS_RAWIO))
43770+ return -EPERM;
43771 if (arg) {
43772 if (copy_to_user(argp, &dev, sizeof(ulong)))
43773 return -EFAULT;
43774diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
43775index 91d5730..336523e 100644
43776--- a/drivers/isdn/i4l/isdn_concap.c
43777+++ b/drivers/isdn/i4l/isdn_concap.c
43778@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
43779 }
43780
43781 struct concap_device_ops isdn_concap_reliable_dl_dops = {
43782- &isdn_concap_dl_data_req,
43783- &isdn_concap_dl_connect_req,
43784- &isdn_concap_dl_disconn_req
43785+ .data_req = &isdn_concap_dl_data_req,
43786+ .connect_req = &isdn_concap_dl_connect_req,
43787+ .disconn_req = &isdn_concap_dl_disconn_req
43788 };
43789
43790 /* The following should better go into a dedicated source file such that
43791diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
43792index bc91261..2ef7e36 100644
43793--- a/drivers/isdn/i4l/isdn_tty.c
43794+++ b/drivers/isdn/i4l/isdn_tty.c
43795@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
43796
43797 #ifdef ISDN_DEBUG_MODEM_OPEN
43798 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
43799- port->count);
43800+ atomic_read(&port->count));
43801 #endif
43802- port->count++;
43803+ atomic_inc(&port->count);
43804 port->tty = tty;
43805 /*
43806 * Start up serial port
43807@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43808 #endif
43809 return;
43810 }
43811- if ((tty->count == 1) && (port->count != 1)) {
43812+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
43813 /*
43814 * Uh, oh. tty->count is 1, which means that the tty
43815 * structure will be freed. Info->count should always
43816@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43817 * serial port won't be shutdown.
43818 */
43819 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
43820- "info->count is %d\n", port->count);
43821- port->count = 1;
43822+ "info->count is %d\n", atomic_read(&port->count));
43823+ atomic_set(&port->count, 1);
43824 }
43825- if (--port->count < 0) {
43826+ if (atomic_dec_return(&port->count) < 0) {
43827 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
43828- info->line, port->count);
43829- port->count = 0;
43830+ info->line, atomic_read(&port->count));
43831+ atomic_set(&port->count, 0);
43832 }
43833- if (port->count) {
43834+ if (atomic_read(&port->count)) {
43835 #ifdef ISDN_DEBUG_MODEM_OPEN
43836 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
43837 #endif
43838@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
43839 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
43840 return;
43841 isdn_tty_shutdown(info);
43842- port->count = 0;
43843+ atomic_set(&port->count, 0);
43844 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43845 port->tty = NULL;
43846 wake_up_interruptible(&port->open_wait);
43847@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
43848 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
43849 modem_info *info = &dev->mdm.info[i];
43850
43851- if (info->port.count == 0)
43852+ if (atomic_read(&info->port.count) == 0)
43853 continue;
43854 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
43855 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
43856diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
43857index e2d4e58..40cd045 100644
43858--- a/drivers/isdn/i4l/isdn_x25iface.c
43859+++ b/drivers/isdn/i4l/isdn_x25iface.c
43860@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
43861
43862
43863 static struct concap_proto_ops ix25_pops = {
43864- &isdn_x25iface_proto_new,
43865- &isdn_x25iface_proto_del,
43866- &isdn_x25iface_proto_restart,
43867- &isdn_x25iface_proto_close,
43868- &isdn_x25iface_xmit,
43869- &isdn_x25iface_receive,
43870- &isdn_x25iface_connect_ind,
43871- &isdn_x25iface_disconn_ind
43872+ .proto_new = &isdn_x25iface_proto_new,
43873+ .proto_del = &isdn_x25iface_proto_del,
43874+ .restart = &isdn_x25iface_proto_restart,
43875+ .close = &isdn_x25iface_proto_close,
43876+ .encap_and_xmit = &isdn_x25iface_xmit,
43877+ .data_ind = &isdn_x25iface_receive,
43878+ .connect_ind = &isdn_x25iface_connect_ind,
43879+ .disconn_ind = &isdn_x25iface_disconn_ind
43880 };
43881
43882 /* error message helper function */
43883diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
43884index 6a7447c..b4987ea 100644
43885--- a/drivers/isdn/icn/icn.c
43886+++ b/drivers/isdn/icn/icn.c
43887@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
43888 if (count > len)
43889 count = len;
43890 if (user) {
43891- if (copy_from_user(msg, buf, count))
43892+ if (count > sizeof msg || copy_from_user(msg, buf, count))
43893 return -EFAULT;
43894 } else
43895 memcpy(msg, buf, count);
43896@@ -1609,7 +1609,7 @@ icn_setup(char *line)
43897 if (ints[0] > 1)
43898 membase = (unsigned long)ints[2];
43899 if (str && *str) {
43900- strcpy(sid, str);
43901+ strlcpy(sid, str, sizeof(sid));
43902 icn_id = sid;
43903 if ((p = strchr(sid, ','))) {
43904 *p++ = 0;
43905diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
43906index 87f7dff..7300125 100644
43907--- a/drivers/isdn/mISDN/dsp_cmx.c
43908+++ b/drivers/isdn/mISDN/dsp_cmx.c
43909@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
43910 static u16 dsp_count; /* last sample count */
43911 static int dsp_count_valid; /* if we have last sample count */
43912
43913-void
43914+void __intentional_overflow(-1)
43915 dsp_cmx_send(void *arg)
43916 {
43917 struct dsp_conf *conf;
43918diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
43919index 0f9ed1e..2715d6f 100644
43920--- a/drivers/leds/leds-clevo-mail.c
43921+++ b/drivers/leds/leds-clevo-mail.c
43922@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
43923 * detected as working, but in reality it is not) as low as
43924 * possible.
43925 */
43926-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
43927+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
43928 {
43929 .callback = clevo_mail_led_dmi_callback,
43930 .ident = "Clevo D410J",
43931diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
43932index 046cb70..6b20d39 100644
43933--- a/drivers/leds/leds-ss4200.c
43934+++ b/drivers/leds/leds-ss4200.c
43935@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
43936 * detected as working, but in reality it is not) as low as
43937 * possible.
43938 */
43939-static struct dmi_system_id nas_led_whitelist[] __initdata = {
43940+static struct dmi_system_id nas_led_whitelist[] __initconst = {
43941 {
43942 .callback = ss4200_led_dmi_callback,
43943 .ident = "Intel SS4200-E",
43944diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
43945index 6590558..a74c5dd 100644
43946--- a/drivers/lguest/core.c
43947+++ b/drivers/lguest/core.c
43948@@ -96,9 +96,17 @@ static __init int map_switcher(void)
43949 * The end address needs +1 because __get_vm_area allocates an
43950 * extra guard page, so we need space for that.
43951 */
43952+
43953+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
43954+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43955+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
43956+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43957+#else
43958 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43959 VM_ALLOC, switcher_addr, switcher_addr
43960 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43961+#endif
43962+
43963 if (!switcher_vma) {
43964 err = -ENOMEM;
43965 printk("lguest: could not map switcher pages high\n");
43966@@ -121,7 +129,7 @@ static __init int map_switcher(void)
43967 * Now the Switcher is mapped at the right address, we can't fail!
43968 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
43969 */
43970- memcpy(switcher_vma->addr, start_switcher_text,
43971+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
43972 end_switcher_text - start_switcher_text);
43973
43974 printk(KERN_INFO "lguest: mapped switcher at %p\n",
43975diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
43976index e8b55c3..3514c37 100644
43977--- a/drivers/lguest/page_tables.c
43978+++ b/drivers/lguest/page_tables.c
43979@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
43980 /*:*/
43981
43982 #ifdef CONFIG_X86_PAE
43983-static void release_pmd(pmd_t *spmd)
43984+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
43985 {
43986 /* If the entry's not present, there's nothing to release. */
43987 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
43988diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
43989index 922a1ac..9dd0c2a 100644
43990--- a/drivers/lguest/x86/core.c
43991+++ b/drivers/lguest/x86/core.c
43992@@ -59,7 +59,7 @@ static struct {
43993 /* Offset from where switcher.S was compiled to where we've copied it */
43994 static unsigned long switcher_offset(void)
43995 {
43996- return switcher_addr - (unsigned long)start_switcher_text;
43997+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
43998 }
43999
44000 /* This cpu's struct lguest_pages (after the Switcher text page) */
44001@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
44002 * These copies are pretty cheap, so we do them unconditionally: */
44003 /* Save the current Host top-level page directory.
44004 */
44005+
44006+#ifdef CONFIG_PAX_PER_CPU_PGD
44007+ pages->state.host_cr3 = read_cr3();
44008+#else
44009 pages->state.host_cr3 = __pa(current->mm->pgd);
44010+#endif
44011+
44012 /*
44013 * Set up the Guest's page tables to see this CPU's pages (and no
44014 * other CPU's pages).
44015@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
44016 * compiled-in switcher code and the high-mapped copy we just made.
44017 */
44018 for (i = 0; i < IDT_ENTRIES; i++)
44019- default_idt_entries[i] += switcher_offset();
44020+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
44021
44022 /*
44023 * Set up the Switcher's per-cpu areas.
44024@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
44025 * it will be undisturbed when we switch. To change %cs and jump we
44026 * need this structure to feed to Intel's "lcall" instruction.
44027 */
44028- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
44029+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
44030 lguest_entry.segment = LGUEST_CS;
44031
44032 /*
44033diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
44034index 40634b0..4f5855e 100644
44035--- a/drivers/lguest/x86/switcher_32.S
44036+++ b/drivers/lguest/x86/switcher_32.S
44037@@ -87,6 +87,7 @@
44038 #include <asm/page.h>
44039 #include <asm/segment.h>
44040 #include <asm/lguest.h>
44041+#include <asm/processor-flags.h>
44042
44043 // We mark the start of the code to copy
44044 // It's placed in .text tho it's never run here
44045@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44046 // Changes type when we load it: damn Intel!
44047 // For after we switch over our page tables
44048 // That entry will be read-only: we'd crash.
44049+
44050+#ifdef CONFIG_PAX_KERNEXEC
44051+ mov %cr0, %edx
44052+ xor $X86_CR0_WP, %edx
44053+ mov %edx, %cr0
44054+#endif
44055+
44056 movl $(GDT_ENTRY_TSS*8), %edx
44057 ltr %dx
44058
44059@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44060 // Let's clear it again for our return.
44061 // The GDT descriptor of the Host
44062 // Points to the table after two "size" bytes
44063- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44064+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44065 // Clear "used" from type field (byte 5, bit 2)
44066- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44067+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44068+
44069+#ifdef CONFIG_PAX_KERNEXEC
44070+ mov %cr0, %eax
44071+ xor $X86_CR0_WP, %eax
44072+ mov %eax, %cr0
44073+#endif
44074
44075 // Once our page table's switched, the Guest is live!
44076 // The Host fades as we run this final step.
44077@@ -295,13 +309,12 @@ deliver_to_host:
44078 // I consulted gcc, and it gave
44079 // These instructions, which I gladly credit:
44080 leal (%edx,%ebx,8), %eax
44081- movzwl (%eax),%edx
44082- movl 4(%eax), %eax
44083- xorw %ax, %ax
44084- orl %eax, %edx
44085+ movl 4(%eax), %edx
44086+ movw (%eax), %dx
44087 // Now the address of the handler's in %edx
44088 // We call it now: its "iret" drops us home.
44089- jmp *%edx
44090+ ljmp $__KERNEL_CS, $1f
44091+1: jmp *%edx
44092
44093 // Every interrupt can come to us here
44094 // But we must truly tell each apart.
44095diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44096index a08e3ee..df8ade2 100644
44097--- a/drivers/md/bcache/closure.h
44098+++ b/drivers/md/bcache/closure.h
44099@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44100 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44101 struct workqueue_struct *wq)
44102 {
44103- BUG_ON(object_is_on_stack(cl));
44104+ BUG_ON(object_starts_on_stack(cl));
44105 closure_set_ip(cl);
44106 cl->fn = fn;
44107 cl->wq = wq;
44108diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44109index 1695ee5..89f18ab 100644
44110--- a/drivers/md/bitmap.c
44111+++ b/drivers/md/bitmap.c
44112@@ -1784,7 +1784,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44113 chunk_kb ? "KB" : "B");
44114 if (bitmap->storage.file) {
44115 seq_printf(seq, ", file: ");
44116- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44117+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44118 }
44119
44120 seq_printf(seq, "\n");
44121diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44122index 73f791b..8c5d3ac 100644
44123--- a/drivers/md/dm-ioctl.c
44124+++ b/drivers/md/dm-ioctl.c
44125@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44126 cmd == DM_LIST_VERSIONS_CMD)
44127 return 0;
44128
44129- if ((cmd == DM_DEV_CREATE_CMD)) {
44130+ if (cmd == DM_DEV_CREATE_CMD) {
44131 if (!*param->name) {
44132 DMWARN("name not supplied when creating device");
44133 return -EINVAL;
44134diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44135index 089d627..ef7352e 100644
44136--- a/drivers/md/dm-raid1.c
44137+++ b/drivers/md/dm-raid1.c
44138@@ -40,7 +40,7 @@ enum dm_raid1_error {
44139
44140 struct mirror {
44141 struct mirror_set *ms;
44142- atomic_t error_count;
44143+ atomic_unchecked_t error_count;
44144 unsigned long error_type;
44145 struct dm_dev *dev;
44146 sector_t offset;
44147@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44148 struct mirror *m;
44149
44150 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44151- if (!atomic_read(&m->error_count))
44152+ if (!atomic_read_unchecked(&m->error_count))
44153 return m;
44154
44155 return NULL;
44156@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44157 * simple way to tell if a device has encountered
44158 * errors.
44159 */
44160- atomic_inc(&m->error_count);
44161+ atomic_inc_unchecked(&m->error_count);
44162
44163 if (test_and_set_bit(error_type, &m->error_type))
44164 return;
44165@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44166 struct mirror *m = get_default_mirror(ms);
44167
44168 do {
44169- if (likely(!atomic_read(&m->error_count)))
44170+ if (likely(!atomic_read_unchecked(&m->error_count)))
44171 return m;
44172
44173 if (m-- == ms->mirror)
44174@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44175 {
44176 struct mirror *default_mirror = get_default_mirror(m->ms);
44177
44178- return !atomic_read(&default_mirror->error_count);
44179+ return !atomic_read_unchecked(&default_mirror->error_count);
44180 }
44181
44182 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44183@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44184 */
44185 if (likely(region_in_sync(ms, region, 1)))
44186 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44187- else if (m && atomic_read(&m->error_count))
44188+ else if (m && atomic_read_unchecked(&m->error_count))
44189 m = NULL;
44190
44191 if (likely(m))
44192@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44193 }
44194
44195 ms->mirror[mirror].ms = ms;
44196- atomic_set(&(ms->mirror[mirror].error_count), 0);
44197+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44198 ms->mirror[mirror].error_type = 0;
44199 ms->mirror[mirror].offset = offset;
44200
44201@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
44202 */
44203 static char device_status_char(struct mirror *m)
44204 {
44205- if (!atomic_read(&(m->error_count)))
44206+ if (!atomic_read_unchecked(&(m->error_count)))
44207 return 'A';
44208
44209 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44210diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44211index f478a4c..4b8e5ef 100644
44212--- a/drivers/md/dm-stats.c
44213+++ b/drivers/md/dm-stats.c
44214@@ -382,7 +382,7 @@ do_sync_free:
44215 synchronize_rcu_expedited();
44216 dm_stat_free(&s->rcu_head);
44217 } else {
44218- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44219+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44220 call_rcu(&s->rcu_head, dm_stat_free);
44221 }
44222 return 0;
44223@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44224 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44225 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44226 ));
44227- ACCESS_ONCE(last->last_sector) = end_sector;
44228- ACCESS_ONCE(last->last_rw) = bi_rw;
44229+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44230+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44231 }
44232
44233 rcu_read_lock();
44234diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44235index f8b37d4..5c5cafd 100644
44236--- a/drivers/md/dm-stripe.c
44237+++ b/drivers/md/dm-stripe.c
44238@@ -21,7 +21,7 @@ struct stripe {
44239 struct dm_dev *dev;
44240 sector_t physical_start;
44241
44242- atomic_t error_count;
44243+ atomic_unchecked_t error_count;
44244 };
44245
44246 struct stripe_c {
44247@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44248 kfree(sc);
44249 return r;
44250 }
44251- atomic_set(&(sc->stripe[i].error_count), 0);
44252+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44253 }
44254
44255 ti->private = sc;
44256@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44257 DMEMIT("%d ", sc->stripes);
44258 for (i = 0; i < sc->stripes; i++) {
44259 DMEMIT("%s ", sc->stripe[i].dev->name);
44260- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44261+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44262 'D' : 'A';
44263 }
44264 buffer[i] = '\0';
44265@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44266 */
44267 for (i = 0; i < sc->stripes; i++)
44268 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44269- atomic_inc(&(sc->stripe[i].error_count));
44270- if (atomic_read(&(sc->stripe[i].error_count)) <
44271+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44272+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44273 DM_IO_ERROR_THRESHOLD)
44274 schedule_work(&sc->trigger_event);
44275 }
44276diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44277index 3afae9e..4e1c954 100644
44278--- a/drivers/md/dm-table.c
44279+++ b/drivers/md/dm-table.c
44280@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44281 if (!dev_size)
44282 return 0;
44283
44284- if ((start >= dev_size) || (start + len > dev_size)) {
44285+ if ((start >= dev_size) || (len > dev_size - start)) {
44286 DMWARN("%s: %s too small for target: "
44287 "start=%llu, len=%llu, dev_size=%llu",
44288 dm_device_name(ti->table->md), bdevname(bdev, b),
44289diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44290index 43adbb8..7b34305 100644
44291--- a/drivers/md/dm-thin-metadata.c
44292+++ b/drivers/md/dm-thin-metadata.c
44293@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44294 {
44295 pmd->info.tm = pmd->tm;
44296 pmd->info.levels = 2;
44297- pmd->info.value_type.context = pmd->data_sm;
44298+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44299 pmd->info.value_type.size = sizeof(__le64);
44300 pmd->info.value_type.inc = data_block_inc;
44301 pmd->info.value_type.dec = data_block_dec;
44302@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44303
44304 pmd->bl_info.tm = pmd->tm;
44305 pmd->bl_info.levels = 1;
44306- pmd->bl_info.value_type.context = pmd->data_sm;
44307+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44308 pmd->bl_info.value_type.size = sizeof(__le64);
44309 pmd->bl_info.value_type.inc = data_block_inc;
44310 pmd->bl_info.value_type.dec = data_block_dec;
44311diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44312index 64b10e0..07db8f4 100644
44313--- a/drivers/md/dm.c
44314+++ b/drivers/md/dm.c
44315@@ -185,9 +185,9 @@ struct mapped_device {
44316 /*
44317 * Event handling.
44318 */
44319- atomic_t event_nr;
44320+ atomic_unchecked_t event_nr;
44321 wait_queue_head_t eventq;
44322- atomic_t uevent_seq;
44323+ atomic_unchecked_t uevent_seq;
44324 struct list_head uevent_list;
44325 spinlock_t uevent_lock; /* Protect access to uevent_list */
44326
44327@@ -2070,8 +2070,8 @@ static struct mapped_device *alloc_dev(int minor)
44328 spin_lock_init(&md->deferred_lock);
44329 atomic_set(&md->holders, 1);
44330 atomic_set(&md->open_count, 0);
44331- atomic_set(&md->event_nr, 0);
44332- atomic_set(&md->uevent_seq, 0);
44333+ atomic_set_unchecked(&md->event_nr, 0);
44334+ atomic_set_unchecked(&md->uevent_seq, 0);
44335 INIT_LIST_HEAD(&md->uevent_list);
44336 INIT_LIST_HEAD(&md->table_devices);
44337 spin_lock_init(&md->uevent_lock);
44338@@ -2227,7 +2227,7 @@ static void event_callback(void *context)
44339
44340 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44341
44342- atomic_inc(&md->event_nr);
44343+ atomic_inc_unchecked(&md->event_nr);
44344 wake_up(&md->eventq);
44345 }
44346
44347@@ -3034,18 +3034,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44348
44349 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44350 {
44351- return atomic_add_return(1, &md->uevent_seq);
44352+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44353 }
44354
44355 uint32_t dm_get_event_nr(struct mapped_device *md)
44356 {
44357- return atomic_read(&md->event_nr);
44358+ return atomic_read_unchecked(&md->event_nr);
44359 }
44360
44361 int dm_wait_event(struct mapped_device *md, int event_nr)
44362 {
44363 return wait_event_interruptible(md->eventq,
44364- (event_nr != atomic_read(&md->event_nr)));
44365+ (event_nr != atomic_read_unchecked(&md->event_nr)));
44366 }
44367
44368 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
44369diff --git a/drivers/md/md.c b/drivers/md/md.c
44370index 709755f..5bc3fa4 100644
44371--- a/drivers/md/md.c
44372+++ b/drivers/md/md.c
44373@@ -190,10 +190,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
44374 * start build, activate spare
44375 */
44376 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
44377-static atomic_t md_event_count;
44378+static atomic_unchecked_t md_event_count;
44379 void md_new_event(struct mddev *mddev)
44380 {
44381- atomic_inc(&md_event_count);
44382+ atomic_inc_unchecked(&md_event_count);
44383 wake_up(&md_event_waiters);
44384 }
44385 EXPORT_SYMBOL_GPL(md_new_event);
44386@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
44387 */
44388 static void md_new_event_inintr(struct mddev *mddev)
44389 {
44390- atomic_inc(&md_event_count);
44391+ atomic_inc_unchecked(&md_event_count);
44392 wake_up(&md_event_waiters);
44393 }
44394
44395@@ -1422,7 +1422,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
44396 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
44397 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
44398 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
44399- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44400+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44401
44402 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
44403 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
44404@@ -1673,7 +1673,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
44405 else
44406 sb->resync_offset = cpu_to_le64(0);
44407
44408- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
44409+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
44410
44411 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
44412 sb->size = cpu_to_le64(mddev->dev_sectors);
44413@@ -2543,7 +2543,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
44414 static ssize_t
44415 errors_show(struct md_rdev *rdev, char *page)
44416 {
44417- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
44418+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
44419 }
44420
44421 static ssize_t
44422@@ -2552,7 +2552,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
44423 char *e;
44424 unsigned long n = simple_strtoul(buf, &e, 10);
44425 if (*buf && (*e == 0 || *e == '\n')) {
44426- atomic_set(&rdev->corrected_errors, n);
44427+ atomic_set_unchecked(&rdev->corrected_errors, n);
44428 return len;
44429 }
44430 return -EINVAL;
44431@@ -2997,8 +2997,8 @@ int md_rdev_init(struct md_rdev *rdev)
44432 rdev->sb_loaded = 0;
44433 rdev->bb_page = NULL;
44434 atomic_set(&rdev->nr_pending, 0);
44435- atomic_set(&rdev->read_errors, 0);
44436- atomic_set(&rdev->corrected_errors, 0);
44437+ atomic_set_unchecked(&rdev->read_errors, 0);
44438+ atomic_set_unchecked(&rdev->corrected_errors, 0);
44439
44440 INIT_LIST_HEAD(&rdev->same_set);
44441 init_waitqueue_head(&rdev->blocked_wait);
44442@@ -6865,7 +6865,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
44443
44444 spin_unlock(&pers_lock);
44445 seq_printf(seq, "\n");
44446- seq->poll_event = atomic_read(&md_event_count);
44447+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44448 return 0;
44449 }
44450 if (v == (void*)2) {
44451@@ -6968,7 +6968,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
44452 return error;
44453
44454 seq = file->private_data;
44455- seq->poll_event = atomic_read(&md_event_count);
44456+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44457 return error;
44458 }
44459
44460@@ -6985,7 +6985,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
44461 /* always allow read */
44462 mask = POLLIN | POLLRDNORM;
44463
44464- if (seq->poll_event != atomic_read(&md_event_count))
44465+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
44466 mask |= POLLERR | POLLPRI;
44467 return mask;
44468 }
44469@@ -7032,7 +7032,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44470 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44471 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44472 (int)part_stat_read(&disk->part0, sectors[1]) -
44473- atomic_read(&disk->sync_io);
44474+ atomic_read_unchecked(&disk->sync_io);
44475 /* sync IO will cause sync_io to increase before the disk_stats
44476 * as sync_io is counted when a request starts, and
44477 * disk_stats is counted when it completes.
44478diff --git a/drivers/md/md.h b/drivers/md/md.h
44479index 03cec5b..0a658c1 100644
44480--- a/drivers/md/md.h
44481+++ b/drivers/md/md.h
44482@@ -94,13 +94,13 @@ struct md_rdev {
44483 * only maintained for arrays that
44484 * support hot removal
44485 */
44486- atomic_t read_errors; /* number of consecutive read errors that
44487+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
44488 * we have tried to ignore.
44489 */
44490 struct timespec last_read_error; /* monotonic time since our
44491 * last read error
44492 */
44493- atomic_t corrected_errors; /* number of corrected read errors,
44494+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44495 * for reporting to userspace and storing
44496 * in superblock.
44497 */
44498@@ -448,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
44499
44500 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44501 {
44502- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44503+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44504 }
44505
44506 struct md_personality
44507diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
44508index e8a9042..35bd145 100644
44509--- a/drivers/md/persistent-data/dm-space-map-metadata.c
44510+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
44511@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
44512 * Flick into a mode where all blocks get allocated in the new area.
44513 */
44514 smm->begin = old_len;
44515- memcpy(sm, &bootstrap_ops, sizeof(*sm));
44516+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
44517
44518 /*
44519 * Extend.
44520@@ -714,7 +714,7 @@ out:
44521 /*
44522 * Switch back to normal behaviour.
44523 */
44524- memcpy(sm, &ops, sizeof(*sm));
44525+ memcpy((void *)sm, &ops, sizeof(*sm));
44526 return r;
44527 }
44528
44529diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44530index 3e6d115..ffecdeb 100644
44531--- a/drivers/md/persistent-data/dm-space-map.h
44532+++ b/drivers/md/persistent-data/dm-space-map.h
44533@@ -71,6 +71,7 @@ struct dm_space_map {
44534 dm_sm_threshold_fn fn,
44535 void *context);
44536 };
44537+typedef struct dm_space_map __no_const dm_space_map_no_const;
44538
44539 /*----------------------------------------------------------------*/
44540
44541diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44542index 2f2f38f..f6a8ebe 100644
44543--- a/drivers/md/raid1.c
44544+++ b/drivers/md/raid1.c
44545@@ -1932,7 +1932,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44546 if (r1_sync_page_io(rdev, sect, s,
44547 bio->bi_io_vec[idx].bv_page,
44548 READ) != 0)
44549- atomic_add(s, &rdev->corrected_errors);
44550+ atomic_add_unchecked(s, &rdev->corrected_errors);
44551 }
44552 sectors -= s;
44553 sect += s;
44554@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44555 !test_bit(Faulty, &rdev->flags)) {
44556 if (r1_sync_page_io(rdev, sect, s,
44557 conf->tmppage, READ)) {
44558- atomic_add(s, &rdev->corrected_errors);
44559+ atomic_add_unchecked(s, &rdev->corrected_errors);
44560 printk(KERN_INFO
44561 "md/raid1:%s: read error corrected "
44562 "(%d sectors at %llu on %s)\n",
44563diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44564index 32e282f..5cec803 100644
44565--- a/drivers/md/raid10.c
44566+++ b/drivers/md/raid10.c
44567@@ -1944,7 +1944,7 @@ static void end_sync_read(struct bio *bio, int error)
44568 /* The write handler will notice the lack of
44569 * R10BIO_Uptodate and record any errors etc
44570 */
44571- atomic_add(r10_bio->sectors,
44572+ atomic_add_unchecked(r10_bio->sectors,
44573 &conf->mirrors[d].rdev->corrected_errors);
44574
44575 /* for reconstruct, we always reschedule after a read.
44576@@ -2301,7 +2301,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44577 {
44578 struct timespec cur_time_mon;
44579 unsigned long hours_since_last;
44580- unsigned int read_errors = atomic_read(&rdev->read_errors);
44581+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44582
44583 ktime_get_ts(&cur_time_mon);
44584
44585@@ -2323,9 +2323,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44586 * overflowing the shift of read_errors by hours_since_last.
44587 */
44588 if (hours_since_last >= 8 * sizeof(read_errors))
44589- atomic_set(&rdev->read_errors, 0);
44590+ atomic_set_unchecked(&rdev->read_errors, 0);
44591 else
44592- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44593+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44594 }
44595
44596 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44597@@ -2379,8 +2379,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44598 return;
44599
44600 check_decay_read_errors(mddev, rdev);
44601- atomic_inc(&rdev->read_errors);
44602- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44603+ atomic_inc_unchecked(&rdev->read_errors);
44604+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44605 char b[BDEVNAME_SIZE];
44606 bdevname(rdev->bdev, b);
44607
44608@@ -2388,7 +2388,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44609 "md/raid10:%s: %s: Raid device exceeded "
44610 "read_error threshold [cur %d:max %d]\n",
44611 mdname(mddev), b,
44612- atomic_read(&rdev->read_errors), max_read_errors);
44613+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44614 printk(KERN_NOTICE
44615 "md/raid10:%s: %s: Failing raid device\n",
44616 mdname(mddev), b);
44617@@ -2543,7 +2543,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44618 sect +
44619 choose_data_offset(r10_bio, rdev)),
44620 bdevname(rdev->bdev, b));
44621- atomic_add(s, &rdev->corrected_errors);
44622+ atomic_add_unchecked(s, &rdev->corrected_errors);
44623 }
44624
44625 rdev_dec_pending(rdev, mddev);
44626diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44627index 8577cc7..e80e05d 100644
44628--- a/drivers/md/raid5.c
44629+++ b/drivers/md/raid5.c
44630@@ -1730,6 +1730,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
44631 return 1;
44632 }
44633
44634+#ifdef CONFIG_GRKERNSEC_HIDESYM
44635+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
44636+#endif
44637+
44638 static int grow_stripes(struct r5conf *conf, int num)
44639 {
44640 struct kmem_cache *sc;
44641@@ -1741,7 +1745,11 @@ static int grow_stripes(struct r5conf *conf, int num)
44642 "raid%d-%s", conf->level, mdname(conf->mddev));
44643 else
44644 sprintf(conf->cache_name[0],
44645+#ifdef CONFIG_GRKERNSEC_HIDESYM
44646+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
44647+#else
44648 "raid%d-%p", conf->level, conf->mddev);
44649+#endif
44650 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
44651
44652 conf->active_name = 0;
44653@@ -2017,21 +2025,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44654 mdname(conf->mddev), STRIPE_SECTORS,
44655 (unsigned long long)s,
44656 bdevname(rdev->bdev, b));
44657- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44658+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44659 clear_bit(R5_ReadError, &sh->dev[i].flags);
44660 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44661 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44662 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44663
44664- if (atomic_read(&rdev->read_errors))
44665- atomic_set(&rdev->read_errors, 0);
44666+ if (atomic_read_unchecked(&rdev->read_errors))
44667+ atomic_set_unchecked(&rdev->read_errors, 0);
44668 } else {
44669 const char *bdn = bdevname(rdev->bdev, b);
44670 int retry = 0;
44671 int set_bad = 0;
44672
44673 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44674- atomic_inc(&rdev->read_errors);
44675+ atomic_inc_unchecked(&rdev->read_errors);
44676 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44677 printk_ratelimited(
44678 KERN_WARNING
44679@@ -2059,7 +2067,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44680 mdname(conf->mddev),
44681 (unsigned long long)s,
44682 bdn);
44683- } else if (atomic_read(&rdev->read_errors)
44684+ } else if (atomic_read_unchecked(&rdev->read_errors)
44685 > conf->max_nr_stripes)
44686 printk(KERN_WARNING
44687 "md/raid:%s: Too many read errors, failing device %s.\n",
44688diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44689index 983db75..ef9248c 100644
44690--- a/drivers/media/dvb-core/dvbdev.c
44691+++ b/drivers/media/dvb-core/dvbdev.c
44692@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44693 const struct dvb_device *template, void *priv, int type)
44694 {
44695 struct dvb_device *dvbdev;
44696- struct file_operations *dvbdevfops;
44697+ file_operations_no_const *dvbdevfops;
44698 struct device *clsdev;
44699 int minor;
44700 int id;
44701diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
44702index 6ad22b6..6e90e2a 100644
44703--- a/drivers/media/dvb-frontends/af9033.h
44704+++ b/drivers/media/dvb-frontends/af9033.h
44705@@ -96,6 +96,6 @@ struct af9033_ops {
44706 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
44707 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
44708 int onoff);
44709-};
44710+} __no_const;
44711
44712 #endif /* AF9033_H */
44713diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44714index 9b6c3bb..baeb5c7 100644
44715--- a/drivers/media/dvb-frontends/dib3000.h
44716+++ b/drivers/media/dvb-frontends/dib3000.h
44717@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44718 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44719 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44720 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44721-};
44722+} __no_const;
44723
44724 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44725 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44726diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
44727index 1fea0e9..321ce8f 100644
44728--- a/drivers/media/dvb-frontends/dib7000p.h
44729+++ b/drivers/media/dvb-frontends/dib7000p.h
44730@@ -64,7 +64,7 @@ struct dib7000p_ops {
44731 int (*get_adc_power)(struct dvb_frontend *fe);
44732 int (*slave_reset)(struct dvb_frontend *fe);
44733 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
44734-};
44735+} __no_const;
44736
44737 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
44738 void *dib7000p_attach(struct dib7000p_ops *ops);
44739diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
44740index 84cc103..5780c54 100644
44741--- a/drivers/media/dvb-frontends/dib8000.h
44742+++ b/drivers/media/dvb-frontends/dib8000.h
44743@@ -61,7 +61,7 @@ struct dib8000_ops {
44744 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
44745 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
44746 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
44747-};
44748+} __no_const;
44749
44750 #if IS_ENABLED(CONFIG_DVB_DIB8000)
44751 void *dib8000_attach(struct dib8000_ops *ops);
44752diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
44753index 860c98fc..497fa25 100644
44754--- a/drivers/media/pci/cx88/cx88-video.c
44755+++ b/drivers/media/pci/cx88/cx88-video.c
44756@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
44757
44758 /* ------------------------------------------------------------------ */
44759
44760-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44761-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44762-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44763+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44764+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44765+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44766
44767 module_param_array(video_nr, int, NULL, 0444);
44768 module_param_array(vbi_nr, int, NULL, 0444);
44769diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
44770index 802642d..5534900 100644
44771--- a/drivers/media/pci/ivtv/ivtv-driver.c
44772+++ b/drivers/media/pci/ivtv/ivtv-driver.c
44773@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
44774 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
44775
44776 /* ivtv instance counter */
44777-static atomic_t ivtv_instance = ATOMIC_INIT(0);
44778+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
44779
44780 /* Parameter declarations */
44781 static int cardtype[IVTV_MAX_CARDS];
44782diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
44783index 8cbe6b4..ea3601c 100644
44784--- a/drivers/media/pci/solo6x10/solo6x10-core.c
44785+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
44786@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
44787
44788 static int solo_sysfs_init(struct solo_dev *solo_dev)
44789 {
44790- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
44791+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
44792 struct device *dev = &solo_dev->dev;
44793 const char *driver;
44794 int i;
44795diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
44796index c7141f2..5301fec 100644
44797--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
44798+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
44799@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
44800
44801 int solo_g723_init(struct solo_dev *solo_dev)
44802 {
44803- static struct snd_device_ops ops = { NULL };
44804+ static struct snd_device_ops ops = { };
44805 struct snd_card *card;
44806 struct snd_kcontrol_new kctl;
44807 char name[32];
44808diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44809index 8c84846..27b4f83 100644
44810--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
44811+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44812@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
44813
44814 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
44815 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
44816- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
44817+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
44818 if (p2m_id < 0)
44819 p2m_id = -p2m_id;
44820 }
44821diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
44822index bd8edfa..e82ed85 100644
44823--- a/drivers/media/pci/solo6x10/solo6x10.h
44824+++ b/drivers/media/pci/solo6x10/solo6x10.h
44825@@ -220,7 +220,7 @@ struct solo_dev {
44826
44827 /* P2M DMA Engine */
44828 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
44829- atomic_t p2m_count;
44830+ atomic_unchecked_t p2m_count;
44831 int p2m_jiffies;
44832 unsigned int p2m_timeouts;
44833
44834diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
44835index c135165..dc69499 100644
44836--- a/drivers/media/pci/tw68/tw68-core.c
44837+++ b/drivers/media/pci/tw68/tw68-core.c
44838@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
44839 module_param_array(card, int, NULL, 0444);
44840 MODULE_PARM_DESC(card, "card type");
44841
44842-static atomic_t tw68_instance = ATOMIC_INIT(0);
44843+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
44844
44845 /* ------------------------------------------------------------------ */
44846
44847diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
44848index ba2d8f9..1566684 100644
44849--- a/drivers/media/platform/omap/omap_vout.c
44850+++ b/drivers/media/platform/omap/omap_vout.c
44851@@ -63,7 +63,6 @@ enum omap_vout_channels {
44852 OMAP_VIDEO2,
44853 };
44854
44855-static struct videobuf_queue_ops video_vbq_ops;
44856 /* Variables configurable through module params*/
44857 static u32 video1_numbuffers = 3;
44858 static u32 video2_numbuffers = 3;
44859@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
44860 {
44861 struct videobuf_queue *q;
44862 struct omap_vout_device *vout = NULL;
44863+ static struct videobuf_queue_ops video_vbq_ops = {
44864+ .buf_setup = omap_vout_buffer_setup,
44865+ .buf_prepare = omap_vout_buffer_prepare,
44866+ .buf_release = omap_vout_buffer_release,
44867+ .buf_queue = omap_vout_buffer_queue,
44868+ };
44869
44870 vout = video_drvdata(file);
44871 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
44872@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
44873 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
44874
44875 q = &vout->vbq;
44876- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
44877- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
44878- video_vbq_ops.buf_release = omap_vout_buffer_release;
44879- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
44880 spin_lock_init(&vout->vbq_lock);
44881
44882 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
44883diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
44884index fb2acc5..a2fcbdc4 100644
44885--- a/drivers/media/platform/s5p-tv/mixer.h
44886+++ b/drivers/media/platform/s5p-tv/mixer.h
44887@@ -156,7 +156,7 @@ struct mxr_layer {
44888 /** layer index (unique identifier) */
44889 int idx;
44890 /** callbacks for layer methods */
44891- struct mxr_layer_ops ops;
44892+ struct mxr_layer_ops *ops;
44893 /** format array */
44894 const struct mxr_format **fmt_array;
44895 /** size of format array */
44896diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44897index 74344c7..a39e70e 100644
44898--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44899+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44900@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
44901 {
44902 struct mxr_layer *layer;
44903 int ret;
44904- struct mxr_layer_ops ops = {
44905+ static struct mxr_layer_ops ops = {
44906 .release = mxr_graph_layer_release,
44907 .buffer_set = mxr_graph_buffer_set,
44908 .stream_set = mxr_graph_stream_set,
44909diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
44910index b713403..53cb5ad 100644
44911--- a/drivers/media/platform/s5p-tv/mixer_reg.c
44912+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
44913@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
44914 layer->update_buf = next;
44915 }
44916
44917- layer->ops.buffer_set(layer, layer->update_buf);
44918+ layer->ops->buffer_set(layer, layer->update_buf);
44919
44920 if (done && done != layer->shadow_buf)
44921 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
44922diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
44923index b4d2696..91df48e 100644
44924--- a/drivers/media/platform/s5p-tv/mixer_video.c
44925+++ b/drivers/media/platform/s5p-tv/mixer_video.c
44926@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
44927 layer->geo.src.height = layer->geo.src.full_height;
44928
44929 mxr_geometry_dump(mdev, &layer->geo);
44930- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44931+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44932 mxr_geometry_dump(mdev, &layer->geo);
44933 }
44934
44935@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
44936 layer->geo.dst.full_width = mbus_fmt.width;
44937 layer->geo.dst.full_height = mbus_fmt.height;
44938 layer->geo.dst.field = mbus_fmt.field;
44939- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44940+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44941
44942 mxr_geometry_dump(mdev, &layer->geo);
44943 }
44944@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
44945 /* set source size to highest accepted value */
44946 geo->src.full_width = max(geo->dst.full_width, pix->width);
44947 geo->src.full_height = max(geo->dst.full_height, pix->height);
44948- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44949+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44950 mxr_geometry_dump(mdev, &layer->geo);
44951 /* set cropping to total visible screen */
44952 geo->src.width = pix->width;
44953@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
44954 geo->src.x_offset = 0;
44955 geo->src.y_offset = 0;
44956 /* assure consistency of geometry */
44957- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44958+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44959 mxr_geometry_dump(mdev, &layer->geo);
44960 /* set full size to lowest possible value */
44961 geo->src.full_width = 0;
44962 geo->src.full_height = 0;
44963- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44964+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44965 mxr_geometry_dump(mdev, &layer->geo);
44966
44967 /* returning results */
44968@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
44969 target->width = s->r.width;
44970 target->height = s->r.height;
44971
44972- layer->ops.fix_geometry(layer, stage, s->flags);
44973+ layer->ops->fix_geometry(layer, stage, s->flags);
44974
44975 /* retrieve update selection rectangle */
44976 res.left = target->x_offset;
44977@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
44978 mxr_output_get(mdev);
44979
44980 mxr_layer_update_output(layer);
44981- layer->ops.format_set(layer);
44982+ layer->ops->format_set(layer);
44983 /* enabling layer in hardware */
44984 spin_lock_irqsave(&layer->enq_slock, flags);
44985 layer->state = MXR_LAYER_STREAMING;
44986 spin_unlock_irqrestore(&layer->enq_slock, flags);
44987
44988- layer->ops.stream_set(layer, MXR_ENABLE);
44989+ layer->ops->stream_set(layer, MXR_ENABLE);
44990 mxr_streamer_get(mdev);
44991
44992 return 0;
44993@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
44994 spin_unlock_irqrestore(&layer->enq_slock, flags);
44995
44996 /* disabling layer in hardware */
44997- layer->ops.stream_set(layer, MXR_DISABLE);
44998+ layer->ops->stream_set(layer, MXR_DISABLE);
44999 /* remove one streamer */
45000 mxr_streamer_put(mdev);
45001 /* allow changes in output configuration */
45002@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
45003
45004 void mxr_layer_release(struct mxr_layer *layer)
45005 {
45006- if (layer->ops.release)
45007- layer->ops.release(layer);
45008+ if (layer->ops->release)
45009+ layer->ops->release(layer);
45010 }
45011
45012 void mxr_base_layer_release(struct mxr_layer *layer)
45013@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
45014
45015 layer->mdev = mdev;
45016 layer->idx = idx;
45017- layer->ops = *ops;
45018+ layer->ops = ops;
45019
45020 spin_lock_init(&layer->enq_slock);
45021 INIT_LIST_HEAD(&layer->enq_list);
45022diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45023index c9388c4..ce71ece 100644
45024--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45025+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45026@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
45027 {
45028 struct mxr_layer *layer;
45029 int ret;
45030- struct mxr_layer_ops ops = {
45031+ static struct mxr_layer_ops ops = {
45032 .release = mxr_vp_layer_release,
45033 .buffer_set = mxr_vp_buffer_set,
45034 .stream_set = mxr_vp_stream_set,
45035diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45036index 82affae..42833ec 100644
45037--- a/drivers/media/radio/radio-cadet.c
45038+++ b/drivers/media/radio/radio-cadet.c
45039@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45040 unsigned char readbuf[RDS_BUFFER];
45041 int i = 0;
45042
45043+ if (count > RDS_BUFFER)
45044+ return -EFAULT;
45045 mutex_lock(&dev->lock);
45046 if (dev->rdsstat == 0)
45047 cadet_start_rds(dev);
45048@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45049 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45050 mutex_unlock(&dev->lock);
45051
45052- if (i && copy_to_user(data, readbuf, i))
45053- return -EFAULT;
45054+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
45055+ i = -EFAULT;
45056+
45057 return i;
45058 }
45059
45060diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45061index 5236035..c622c74 100644
45062--- a/drivers/media/radio/radio-maxiradio.c
45063+++ b/drivers/media/radio/radio-maxiradio.c
45064@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45065 /* TEA5757 pin mappings */
45066 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45067
45068-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45069+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45070
45071 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45072 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45073diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45074index 050b3bb..79f62b9 100644
45075--- a/drivers/media/radio/radio-shark.c
45076+++ b/drivers/media/radio/radio-shark.c
45077@@ -79,7 +79,7 @@ struct shark_device {
45078 u32 last_val;
45079 };
45080
45081-static atomic_t shark_instance = ATOMIC_INIT(0);
45082+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45083
45084 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45085 {
45086diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45087index 8654e0d..0608a64 100644
45088--- a/drivers/media/radio/radio-shark2.c
45089+++ b/drivers/media/radio/radio-shark2.c
45090@@ -74,7 +74,7 @@ struct shark_device {
45091 u8 *transfer_buffer;
45092 };
45093
45094-static atomic_t shark_instance = ATOMIC_INIT(0);
45095+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45096
45097 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45098 {
45099diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45100index dccf586..d5db411 100644
45101--- a/drivers/media/radio/radio-si476x.c
45102+++ b/drivers/media/radio/radio-si476x.c
45103@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45104 struct si476x_radio *radio;
45105 struct v4l2_ctrl *ctrl;
45106
45107- static atomic_t instance = ATOMIC_INIT(0);
45108+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45109
45110 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45111 if (!radio)
45112diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
45113index 704397f..4d05977 100644
45114--- a/drivers/media/radio/wl128x/fmdrv_common.c
45115+++ b/drivers/media/radio/wl128x/fmdrv_common.c
45116@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
45117 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
45118
45119 /* Radio Nr */
45120-static u32 radio_nr = -1;
45121+static int radio_nr = -1;
45122 module_param(radio_nr, int, 0444);
45123 MODULE_PARM_DESC(radio_nr, "Radio Nr");
45124
45125diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45126index 9fd1527..8927230 100644
45127--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45128+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45129@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45130
45131 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45132 {
45133- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45134- char result[64];
45135- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45136- sizeof(result), 0);
45137+ char *buf;
45138+ char *result;
45139+ int retval;
45140+
45141+ buf = kmalloc(2, GFP_KERNEL);
45142+ if (buf == NULL)
45143+ return -ENOMEM;
45144+ result = kmalloc(64, GFP_KERNEL);
45145+ if (result == NULL) {
45146+ kfree(buf);
45147+ return -ENOMEM;
45148+ }
45149+
45150+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45151+ buf[1] = enable ? 1 : 0;
45152+
45153+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45154+
45155+ kfree(buf);
45156+ kfree(result);
45157+ return retval;
45158 }
45159
45160 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45161 {
45162- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45163- char state[3];
45164- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45165+ char *buf;
45166+ char *state;
45167+ int retval;
45168+
45169+ buf = kmalloc(2, GFP_KERNEL);
45170+ if (buf == NULL)
45171+ return -ENOMEM;
45172+ state = kmalloc(3, GFP_KERNEL);
45173+ if (state == NULL) {
45174+ kfree(buf);
45175+ return -ENOMEM;
45176+ }
45177+
45178+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45179+ buf[1] = enable ? 1 : 0;
45180+
45181+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45182+
45183+ kfree(buf);
45184+ kfree(state);
45185+ return retval;
45186 }
45187
45188 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45189 {
45190- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45191- char state[3];
45192+ char *query;
45193+ char *state;
45194 int ret;
45195+ query = kmalloc(1, GFP_KERNEL);
45196+ if (query == NULL)
45197+ return -ENOMEM;
45198+ state = kmalloc(3, GFP_KERNEL);
45199+ if (state == NULL) {
45200+ kfree(query);
45201+ return -ENOMEM;
45202+ }
45203+
45204+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45205
45206 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45207
45208- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45209- sizeof(state), 0);
45210+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45211 if (ret < 0) {
45212 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45213 "state info\n");
45214@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45215
45216 /* Copy this pointer as we are gonna need it in the release phase */
45217 cinergyt2_usb_device = adap->dev;
45218-
45219+ kfree(query);
45220+ kfree(state);
45221 return 0;
45222 }
45223
45224@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45225 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45226 {
45227 struct cinergyt2_state *st = d->priv;
45228- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45229+ u8 *key, *cmd;
45230 int i;
45231
45232+ cmd = kmalloc(1, GFP_KERNEL);
45233+ if (cmd == NULL)
45234+ return -EINVAL;
45235+ key = kzalloc(5, GFP_KERNEL);
45236+ if (key == NULL) {
45237+ kfree(cmd);
45238+ return -EINVAL;
45239+ }
45240+
45241+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45242+
45243 *state = REMOTE_NO_KEY_PRESSED;
45244
45245- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45246+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45247 if (key[4] == 0xff) {
45248 /* key repeat */
45249 st->rc_counter++;
45250@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45251 *event = d->last_event;
45252 deb_rc("repeat key, event %x\n",
45253 *event);
45254- return 0;
45255+ goto out;
45256 }
45257 }
45258 deb_rc("repeated key (non repeatable)\n");
45259 }
45260- return 0;
45261+ goto out;
45262 }
45263
45264 /* hack to pass checksum on the custom field */
45265@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45266
45267 deb_rc("key: %*ph\n", 5, key);
45268 }
45269+out:
45270+ kfree(cmd);
45271+ kfree(key);
45272 return 0;
45273 }
45274
45275diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45276index c890fe4..f9b2ae6 100644
45277--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45278+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45279@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45280 fe_status_t *status)
45281 {
45282 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45283- struct dvbt_get_status_msg result;
45284- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45285+ struct dvbt_get_status_msg *result;
45286+ u8 *cmd;
45287 int ret;
45288
45289- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45290- sizeof(result), 0);
45291+ cmd = kmalloc(1, GFP_KERNEL);
45292+ if (cmd == NULL)
45293+ return -ENOMEM;
45294+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45295+ if (result == NULL) {
45296+ kfree(cmd);
45297+ return -ENOMEM;
45298+ }
45299+
45300+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45301+
45302+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45303+ sizeof(*result), 0);
45304 if (ret < 0)
45305- return ret;
45306+ goto out;
45307
45308 *status = 0;
45309
45310- if (0xffff - le16_to_cpu(result.gain) > 30)
45311+ if (0xffff - le16_to_cpu(result->gain) > 30)
45312 *status |= FE_HAS_SIGNAL;
45313- if (result.lock_bits & (1 << 6))
45314+ if (result->lock_bits & (1 << 6))
45315 *status |= FE_HAS_LOCK;
45316- if (result.lock_bits & (1 << 5))
45317+ if (result->lock_bits & (1 << 5))
45318 *status |= FE_HAS_SYNC;
45319- if (result.lock_bits & (1 << 4))
45320+ if (result->lock_bits & (1 << 4))
45321 *status |= FE_HAS_CARRIER;
45322- if (result.lock_bits & (1 << 1))
45323+ if (result->lock_bits & (1 << 1))
45324 *status |= FE_HAS_VITERBI;
45325
45326 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45327 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45328 *status &= ~FE_HAS_LOCK;
45329
45330- return 0;
45331+out:
45332+ kfree(cmd);
45333+ kfree(result);
45334+ return ret;
45335 }
45336
45337 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45338 {
45339 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45340- struct dvbt_get_status_msg status;
45341- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45342+ struct dvbt_get_status_msg *status;
45343+ char *cmd;
45344 int ret;
45345
45346- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45347- sizeof(status), 0);
45348+ cmd = kmalloc(1, GFP_KERNEL);
45349+ if (cmd == NULL)
45350+ return -ENOMEM;
45351+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45352+ if (status == NULL) {
45353+ kfree(cmd);
45354+ return -ENOMEM;
45355+ }
45356+
45357+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45358+
45359+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45360+ sizeof(*status), 0);
45361 if (ret < 0)
45362- return ret;
45363+ goto out;
45364
45365- *ber = le32_to_cpu(status.viterbi_error_rate);
45366+ *ber = le32_to_cpu(status->viterbi_error_rate);
45367+out:
45368+ kfree(cmd);
45369+ kfree(status);
45370 return 0;
45371 }
45372
45373 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
45374 {
45375 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45376- struct dvbt_get_status_msg status;
45377- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45378+ struct dvbt_get_status_msg *status;
45379+ u8 *cmd;
45380 int ret;
45381
45382- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
45383- sizeof(status), 0);
45384+ cmd = kmalloc(1, GFP_KERNEL);
45385+ if (cmd == NULL)
45386+ return -ENOMEM;
45387+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45388+ if (status == NULL) {
45389+ kfree(cmd);
45390+ return -ENOMEM;
45391+ }
45392+
45393+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45394+
45395+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
45396+ sizeof(*status), 0);
45397 if (ret < 0) {
45398 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
45399 ret);
45400- return ret;
45401+ goto out;
45402 }
45403- *unc = le32_to_cpu(status.uncorrected_block_count);
45404- return 0;
45405+ *unc = le32_to_cpu(status->uncorrected_block_count);
45406+
45407+out:
45408+ kfree(cmd);
45409+ kfree(status);
45410+ return ret;
45411 }
45412
45413 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
45414 u16 *strength)
45415 {
45416 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45417- struct dvbt_get_status_msg status;
45418- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45419+ struct dvbt_get_status_msg *status;
45420+ char *cmd;
45421 int ret;
45422
45423- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45424- sizeof(status), 0);
45425+ cmd = kmalloc(1, GFP_KERNEL);
45426+ if (cmd == NULL)
45427+ return -ENOMEM;
45428+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45429+ if (status == NULL) {
45430+ kfree(cmd);
45431+ return -ENOMEM;
45432+ }
45433+
45434+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45435+
45436+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45437+ sizeof(*status), 0);
45438 if (ret < 0) {
45439 err("cinergyt2_fe_read_signal_strength() Failed!"
45440 " (Error=%d)\n", ret);
45441- return ret;
45442+ goto out;
45443 }
45444- *strength = (0xffff - le16_to_cpu(status.gain));
45445+ *strength = (0xffff - le16_to_cpu(status->gain));
45446+
45447+out:
45448+ kfree(cmd);
45449+ kfree(status);
45450 return 0;
45451 }
45452
45453 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
45454 {
45455 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45456- struct dvbt_get_status_msg status;
45457- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45458+ struct dvbt_get_status_msg *status;
45459+ char *cmd;
45460 int ret;
45461
45462- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45463- sizeof(status), 0);
45464+ cmd = kmalloc(1, GFP_KERNEL);
45465+ if (cmd == NULL)
45466+ return -ENOMEM;
45467+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45468+ if (status == NULL) {
45469+ kfree(cmd);
45470+ return -ENOMEM;
45471+ }
45472+
45473+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45474+
45475+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45476+ sizeof(*status), 0);
45477 if (ret < 0) {
45478 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
45479- return ret;
45480+ goto out;
45481 }
45482- *snr = (status.snr << 8) | status.snr;
45483- return 0;
45484+ *snr = (status->snr << 8) | status->snr;
45485+
45486+out:
45487+ kfree(cmd);
45488+ kfree(status);
45489+ return ret;
45490 }
45491
45492 static int cinergyt2_fe_init(struct dvb_frontend *fe)
45493@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
45494 {
45495 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
45496 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45497- struct dvbt_set_parameters_msg param;
45498- char result[2];
45499+ struct dvbt_set_parameters_msg *param;
45500+ char *result;
45501 int err;
45502
45503- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45504- param.tps = cpu_to_le16(compute_tps(fep));
45505- param.freq = cpu_to_le32(fep->frequency / 1000);
45506- param.flags = 0;
45507+ result = kmalloc(2, GFP_KERNEL);
45508+ if (result == NULL)
45509+ return -ENOMEM;
45510+ param = kmalloc(sizeof(*param), GFP_KERNEL);
45511+ if (param == NULL) {
45512+ kfree(result);
45513+ return -ENOMEM;
45514+ }
45515+
45516+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45517+ param->tps = cpu_to_le16(compute_tps(fep));
45518+ param->freq = cpu_to_le32(fep->frequency / 1000);
45519+ param->flags = 0;
45520
45521 switch (fep->bandwidth_hz) {
45522 default:
45523 case 8000000:
45524- param.bandwidth = 8;
45525+ param->bandwidth = 8;
45526 break;
45527 case 7000000:
45528- param.bandwidth = 7;
45529+ param->bandwidth = 7;
45530 break;
45531 case 6000000:
45532- param.bandwidth = 6;
45533+ param->bandwidth = 6;
45534 break;
45535 }
45536
45537 err = dvb_usb_generic_rw(state->d,
45538- (char *)&param, sizeof(param),
45539- result, sizeof(result), 0);
45540+ (char *)param, sizeof(*param),
45541+ result, 2, 0);
45542 if (err < 0)
45543 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
45544
45545- return (err < 0) ? err : 0;
45546+ kfree(result);
45547+ kfree(param);
45548+ return err;
45549 }
45550
45551 static void cinergyt2_fe_release(struct dvb_frontend *fe)
45552diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45553index 733a7ff..f8b52e3 100644
45554--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45555+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45556@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
45557
45558 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
45559 {
45560- struct hexline hx;
45561- u8 reset;
45562+ struct hexline *hx;
45563+ u8 *reset;
45564 int ret,pos=0;
45565
45566+ reset = kmalloc(1, GFP_KERNEL);
45567+ if (reset == NULL)
45568+ return -ENOMEM;
45569+
45570+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
45571+ if (hx == NULL) {
45572+ kfree(reset);
45573+ return -ENOMEM;
45574+ }
45575+
45576 /* stop the CPU */
45577- reset = 1;
45578- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
45579+ reset[0] = 1;
45580+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
45581 err("could not stop the USB controller CPU.");
45582
45583- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
45584- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
45585- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
45586+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
45587+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
45588+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
45589
45590- if (ret != hx.len) {
45591+ if (ret != hx->len) {
45592 err("error while transferring firmware "
45593 "(transferred size: %d, block size: %d)",
45594- ret,hx.len);
45595+ ret,hx->len);
45596 ret = -EINVAL;
45597 break;
45598 }
45599 }
45600 if (ret < 0) {
45601 err("firmware download failed at %d with %d",pos,ret);
45602+ kfree(reset);
45603+ kfree(hx);
45604 return ret;
45605 }
45606
45607 if (ret == 0) {
45608 /* restart the CPU */
45609- reset = 0;
45610- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
45611+ reset[0] = 0;
45612+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
45613 err("could not restart the USB controller CPU.");
45614 ret = -EINVAL;
45615 }
45616 } else
45617 ret = -EIO;
45618
45619+ kfree(reset);
45620+ kfree(hx);
45621+
45622 return ret;
45623 }
45624 EXPORT_SYMBOL(usb_cypress_load_firmware);
45625diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
45626index 1a3df10..57997a5 100644
45627--- a/drivers/media/usb/dvb-usb/dw2102.c
45628+++ b/drivers/media/usb/dvb-usb/dw2102.c
45629@@ -118,7 +118,7 @@ struct su3000_state {
45630
45631 struct s6x0_state {
45632 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
45633-};
45634+} __no_const;
45635
45636 /* debug */
45637 static int dvb_usb_dw2102_debug;
45638diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
45639index 5801ae7..83f71fa 100644
45640--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
45641+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
45642@@ -87,8 +87,11 @@ struct technisat_usb2_state {
45643 static int technisat_usb2_i2c_access(struct usb_device *udev,
45644 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
45645 {
45646- u8 b[64];
45647- int ret, actual_length;
45648+ u8 *b = kmalloc(64, GFP_KERNEL);
45649+ int ret, actual_length, error = 0;
45650+
45651+ if (b == NULL)
45652+ return -ENOMEM;
45653
45654 deb_i2c("i2c-access: %02x, tx: ", device_addr);
45655 debug_dump(tx, txlen, deb_i2c);
45656@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45657
45658 if (ret < 0) {
45659 err("i2c-error: out failed %02x = %d", device_addr, ret);
45660- return -ENODEV;
45661+ error = -ENODEV;
45662+ goto out;
45663 }
45664
45665 ret = usb_bulk_msg(udev,
45666@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45667 b, 64, &actual_length, 1000);
45668 if (ret < 0) {
45669 err("i2c-error: in failed %02x = %d", device_addr, ret);
45670- return -ENODEV;
45671+ error = -ENODEV;
45672+ goto out;
45673 }
45674
45675 if (b[0] != I2C_STATUS_OK) {
45676@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45677 /* handle tuner-i2c-nak */
45678 if (!(b[0] == I2C_STATUS_NAK &&
45679 device_addr == 0x60
45680- /* && device_is_technisat_usb2 */))
45681- return -ENODEV;
45682+ /* && device_is_technisat_usb2 */)) {
45683+ error = -ENODEV;
45684+ goto out;
45685+ }
45686 }
45687
45688 deb_i2c("status: %d, ", b[0]);
45689@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45690
45691 deb_i2c("\n");
45692
45693- return 0;
45694+out:
45695+ kfree(b);
45696+ return error;
45697 }
45698
45699 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
45700@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45701 {
45702 int ret;
45703
45704- u8 led[8] = {
45705- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45706- 0
45707- };
45708+ u8 *led = kzalloc(8, GFP_KERNEL);
45709+
45710+ if (led == NULL)
45711+ return -ENOMEM;
45712
45713 if (disable_led_control && state != TECH_LED_OFF)
45714 return 0;
45715
45716+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
45717+
45718 switch (state) {
45719 case TECH_LED_ON:
45720 led[1] = 0x82;
45721@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45722 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45723 USB_TYPE_VENDOR | USB_DIR_OUT,
45724 0, 0,
45725- led, sizeof(led), 500);
45726+ led, 8, 500);
45727
45728 mutex_unlock(&d->i2c_mutex);
45729+
45730+ kfree(led);
45731+
45732 return ret;
45733 }
45734
45735 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
45736 {
45737 int ret;
45738- u8 b = 0;
45739+ u8 *b = kzalloc(1, GFP_KERNEL);
45740+
45741+ if (b == NULL)
45742+ return -ENOMEM;
45743
45744 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
45745 return -EAGAIN;
45746@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
45747 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
45748 USB_TYPE_VENDOR | USB_DIR_OUT,
45749 (red << 8) | green, 0,
45750- &b, 1, 500);
45751+ b, 1, 500);
45752
45753 mutex_unlock(&d->i2c_mutex);
45754
45755+ kfree(b);
45756+
45757 return ret;
45758 }
45759
45760@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45761 struct dvb_usb_device_description **desc, int *cold)
45762 {
45763 int ret;
45764- u8 version[3];
45765+ u8 *version = kmalloc(3, GFP_KERNEL);
45766
45767 /* first select the interface */
45768 if (usb_set_interface(udev, 0, 1) != 0)
45769@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45770
45771 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
45772
45773+ if (version == NULL)
45774+ return 0;
45775+
45776 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
45777 GET_VERSION_INFO_VENDOR_REQUEST,
45778 USB_TYPE_VENDOR | USB_DIR_IN,
45779 0, 0,
45780- version, sizeof(version), 500);
45781+ version, 3, 500);
45782
45783 if (ret < 0)
45784 *cold = 1;
45785@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45786 *cold = 0;
45787 }
45788
45789+ kfree(version);
45790+
45791 return 0;
45792 }
45793
45794@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
45795
45796 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45797 {
45798- u8 buf[62], *b;
45799+ u8 *buf, *b;
45800 int ret;
45801 struct ir_raw_event ev;
45802
45803+ buf = kmalloc(62, GFP_KERNEL);
45804+
45805+ if (buf == NULL)
45806+ return -ENOMEM;
45807+
45808 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
45809 buf[1] = 0x08;
45810 buf[2] = 0x8f;
45811@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45812 GET_IR_DATA_VENDOR_REQUEST,
45813 USB_TYPE_VENDOR | USB_DIR_IN,
45814 0x8080, 0,
45815- buf, sizeof(buf), 500);
45816+ buf, 62, 500);
45817
45818 unlock:
45819 mutex_unlock(&d->i2c_mutex);
45820
45821- if (ret < 0)
45822+ if (ret < 0) {
45823+ kfree(buf);
45824 return ret;
45825+ }
45826
45827- if (ret == 1)
45828+ if (ret == 1) {
45829+ kfree(buf);
45830 return 0; /* no key pressed */
45831+ }
45832
45833 /* decoding */
45834 b = buf+1;
45835@@ -656,6 +689,8 @@ unlock:
45836
45837 ir_raw_event_handle(d->rc_dev);
45838
45839+ kfree(buf);
45840+
45841 return 1;
45842 }
45843
45844diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45845index af63543..0436f20 100644
45846--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45847+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45848@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45849 * by passing a very big num_planes value */
45850 uplane = compat_alloc_user_space(num_planes *
45851 sizeof(struct v4l2_plane));
45852- kp->m.planes = (__force struct v4l2_plane *)uplane;
45853+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
45854
45855 while (--num_planes >= 0) {
45856 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
45857@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45858 if (num_planes == 0)
45859 return 0;
45860
45861- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
45862+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
45863 if (get_user(p, &up->m.planes))
45864 return -EFAULT;
45865 uplane32 = compat_ptr(p);
45866@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
45867 get_user(kp->flags, &up->flags) ||
45868 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
45869 return -EFAULT;
45870- kp->base = (__force void *)compat_ptr(tmp);
45871+ kp->base = (__force_kernel void *)compat_ptr(tmp);
45872 return 0;
45873 }
45874
45875@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45876 n * sizeof(struct v4l2_ext_control32)))
45877 return -EFAULT;
45878 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
45879- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
45880+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
45881 while (--n >= 0) {
45882 u32 id;
45883
45884@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45885 {
45886 struct v4l2_ext_control32 __user *ucontrols;
45887 struct v4l2_ext_control __user *kcontrols =
45888- (__force struct v4l2_ext_control __user *)kp->controls;
45889+ (struct v4l2_ext_control __force_user *)kp->controls;
45890 int n = kp->count;
45891 compat_caddr_t p;
45892
45893@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
45894 get_user(tmp, &up->edid) ||
45895 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
45896 return -EFAULT;
45897- kp->edid = (__force u8 *)compat_ptr(tmp);
45898+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
45899 return 0;
45900 }
45901
45902diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
45903index 015f92a..59e311e 100644
45904--- a/drivers/media/v4l2-core/v4l2-device.c
45905+++ b/drivers/media/v4l2-core/v4l2-device.c
45906@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
45907 EXPORT_SYMBOL_GPL(v4l2_device_put);
45908
45909 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
45910- atomic_t *instance)
45911+ atomic_unchecked_t *instance)
45912 {
45913- int num = atomic_inc_return(instance) - 1;
45914+ int num = atomic_inc_return_unchecked(instance) - 1;
45915 int len = strlen(basename);
45916
45917 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
45918diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
45919index faac2f4..e39dcd9 100644
45920--- a/drivers/media/v4l2-core/v4l2-ioctl.c
45921+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
45922@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
45923 struct file *file, void *fh, void *p);
45924 } u;
45925 void (*debug)(const void *arg, bool write_only);
45926-};
45927+} __do_const;
45928+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
45929
45930 /* This control needs a priority check */
45931 #define INFO_FL_PRIO (1 << 0)
45932@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
45933 struct video_device *vfd = video_devdata(file);
45934 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
45935 bool write_only = false;
45936- struct v4l2_ioctl_info default_info;
45937+ v4l2_ioctl_info_no_const default_info;
45938 const struct v4l2_ioctl_info *info;
45939 void *fh = file->private_data;
45940 struct v4l2_fh *vfh = NULL;
45941@@ -2422,7 +2423,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45942 ret = -EINVAL;
45943 break;
45944 }
45945- *user_ptr = (void __user *)buf->m.planes;
45946+ *user_ptr = (void __force_user *)buf->m.planes;
45947 *kernel_ptr = (void **)&buf->m.planes;
45948 *array_size = sizeof(struct v4l2_plane) * buf->length;
45949 ret = 1;
45950@@ -2439,7 +2440,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45951 ret = -EINVAL;
45952 break;
45953 }
45954- *user_ptr = (void __user *)edid->edid;
45955+ *user_ptr = (void __force_user *)edid->edid;
45956 *kernel_ptr = (void **)&edid->edid;
45957 *array_size = edid->blocks * 128;
45958 ret = 1;
45959@@ -2457,7 +2458,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45960 ret = -EINVAL;
45961 break;
45962 }
45963- *user_ptr = (void __user *)ctrls->controls;
45964+ *user_ptr = (void __force_user *)ctrls->controls;
45965 *kernel_ptr = (void **)&ctrls->controls;
45966 *array_size = sizeof(struct v4l2_ext_control)
45967 * ctrls->count;
45968@@ -2558,7 +2559,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
45969 }
45970
45971 if (has_array_args) {
45972- *kernel_ptr = (void __force *)user_ptr;
45973+ *kernel_ptr = (void __force_kernel *)user_ptr;
45974 if (copy_to_user(user_ptr, mbuf, array_size))
45975 err = -EFAULT;
45976 goto out_array_args;
45977diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
45978index 24696f5..3637780 100644
45979--- a/drivers/memory/omap-gpmc.c
45980+++ b/drivers/memory/omap-gpmc.c
45981@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
45982 };
45983
45984 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
45985-static struct irq_chip gpmc_irq_chip;
45986 static int gpmc_irq_start;
45987
45988 static struct resource gpmc_mem_root;
45989@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
45990
45991 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
45992
45993+static struct irq_chip gpmc_irq_chip = {
45994+ .name = "gpmc",
45995+ .irq_startup = gpmc_irq_noop_ret,
45996+ .irq_enable = gpmc_irq_enable,
45997+ .irq_disable = gpmc_irq_disable,
45998+ .irq_shutdown = gpmc_irq_noop,
45999+ .irq_ack = gpmc_irq_noop,
46000+ .irq_mask = gpmc_irq_noop,
46001+ .irq_unmask = gpmc_irq_noop,
46002+};
46003+
46004 static int gpmc_setup_irq(void)
46005 {
46006 int i;
46007@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
46008 return gpmc_irq_start;
46009 }
46010
46011- gpmc_irq_chip.name = "gpmc";
46012- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
46013- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
46014- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
46015- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
46016- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
46017- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
46018- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
46019-
46020 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
46021 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
46022
46023diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
46024index 187f836..679544b 100644
46025--- a/drivers/message/fusion/mptbase.c
46026+++ b/drivers/message/fusion/mptbase.c
46027@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46028 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
46029 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
46030
46031+#ifdef CONFIG_GRKERNSEC_HIDESYM
46032+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
46033+#else
46034 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
46035 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
46036+#endif
46037+
46038 /*
46039 * Rounding UP to nearest 4-kB boundary here...
46040 */
46041@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46042 ioc->facts.GlobalCredits);
46043
46044 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
46045+#ifdef CONFIG_GRKERNSEC_HIDESYM
46046+ NULL, NULL);
46047+#else
46048 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
46049+#endif
46050 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
46051 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
46052 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
46053diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
46054index 5bdaae1..eced16f 100644
46055--- a/drivers/message/fusion/mptsas.c
46056+++ b/drivers/message/fusion/mptsas.c
46057@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
46058 return 0;
46059 }
46060
46061+static inline void
46062+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46063+{
46064+ if (phy_info->port_details) {
46065+ phy_info->port_details->rphy = rphy;
46066+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46067+ ioc->name, rphy));
46068+ }
46069+
46070+ if (rphy) {
46071+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46072+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46073+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46074+ ioc->name, rphy, rphy->dev.release));
46075+ }
46076+}
46077+
46078 /* no mutex */
46079 static void
46080 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
46081@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
46082 return NULL;
46083 }
46084
46085-static inline void
46086-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46087-{
46088- if (phy_info->port_details) {
46089- phy_info->port_details->rphy = rphy;
46090- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46091- ioc->name, rphy));
46092- }
46093-
46094- if (rphy) {
46095- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46096- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46097- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46098- ioc->name, rphy, rphy->dev.release));
46099- }
46100-}
46101-
46102 static inline struct sas_port *
46103 mptsas_get_port(struct mptsas_phyinfo *phy_info)
46104 {
46105diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
46106index b7d87cd..3fb36da 100644
46107--- a/drivers/message/i2o/i2o_proc.c
46108+++ b/drivers/message/i2o/i2o_proc.c
46109@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
46110 "Array Controller Device"
46111 };
46112
46113-static char *chtostr(char *tmp, u8 *chars, int n)
46114-{
46115- tmp[0] = 0;
46116- return strncat(tmp, (char *)chars, n);
46117-}
46118-
46119 static int i2o_report_query_status(struct seq_file *seq, int block_status,
46120 char *group)
46121 {
46122@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
46123 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
46124 {
46125 struct i2o_controller *c = (struct i2o_controller *)seq->private;
46126- static u32 work32[5];
46127- static u8 *work8 = (u8 *) work32;
46128- static u16 *work16 = (u16 *) work32;
46129+ u32 work32[5];
46130+ u8 *work8 = (u8 *) work32;
46131+ u16 *work16 = (u16 *) work32;
46132 int token;
46133 u32 hwcap;
46134
46135@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46136 } *result;
46137
46138 i2o_exec_execute_ddm_table ddm_table;
46139- char tmp[28 + 1];
46140
46141 result = kmalloc(sizeof(*result), GFP_KERNEL);
46142 if (!result)
46143@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46144
46145 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
46146 seq_printf(seq, "%-#8x", ddm_table.module_id);
46147- seq_printf(seq, "%-29s",
46148- chtostr(tmp, ddm_table.module_name_version, 28));
46149+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
46150 seq_printf(seq, "%9d ", ddm_table.data_size);
46151 seq_printf(seq, "%8d", ddm_table.code_size);
46152
46153@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46154
46155 i2o_driver_result_table *result;
46156 i2o_driver_store_table *dst;
46157- char tmp[28 + 1];
46158
46159 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
46160 if (result == NULL)
46161@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46162
46163 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
46164 seq_printf(seq, "%-#8x", dst->module_id);
46165- seq_printf(seq, "%-29s",
46166- chtostr(tmp, dst->module_name_version, 28));
46167- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
46168+ seq_printf(seq, "%-.28s", dst->module_name_version);
46169+ seq_printf(seq, "%-.8s", dst->date);
46170 seq_printf(seq, "%8d ", dst->module_size);
46171 seq_printf(seq, "%8d ", dst->mpb_size);
46172 seq_printf(seq, "0x%04x", dst->module_flags);
46173@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
46174 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46175 {
46176 struct i2o_device *d = (struct i2o_device *)seq->private;
46177- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46178+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46179 // == (allow) 512d bytes (max)
46180- static u16 *work16 = (u16 *) work32;
46181+ u16 *work16 = (u16 *) work32;
46182 int token;
46183- char tmp[16 + 1];
46184
46185 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
46186
46187@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46188 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
46189 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
46190 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
46191- seq_printf(seq, "Vendor info : %s\n",
46192- chtostr(tmp, (u8 *) (work32 + 2), 16));
46193- seq_printf(seq, "Product info : %s\n",
46194- chtostr(tmp, (u8 *) (work32 + 6), 16));
46195- seq_printf(seq, "Description : %s\n",
46196- chtostr(tmp, (u8 *) (work32 + 10), 16));
46197- seq_printf(seq, "Product rev. : %s\n",
46198- chtostr(tmp, (u8 *) (work32 + 14), 8));
46199+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
46200+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
46201+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
46202+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
46203
46204 seq_printf(seq, "Serial number : ");
46205 print_serial_number(seq, (u8 *) (work32 + 16),
46206@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46207 u8 pad[256]; // allow up to 256 byte (max) serial number
46208 } result;
46209
46210- char tmp[24 + 1];
46211-
46212 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
46213
46214 if (token < 0) {
46215@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46216 }
46217
46218 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
46219- seq_printf(seq, "Module name : %s\n",
46220- chtostr(tmp, result.module_name, 24));
46221- seq_printf(seq, "Module revision : %s\n",
46222- chtostr(tmp, result.module_rev, 8));
46223+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
46224+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
46225
46226 seq_printf(seq, "Serial number : ");
46227 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
46228@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46229 u8 instance_number[4];
46230 } result;
46231
46232- char tmp[64 + 1];
46233-
46234 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
46235
46236 if (token < 0) {
46237@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46238 return 0;
46239 }
46240
46241- seq_printf(seq, "Device name : %s\n",
46242- chtostr(tmp, result.device_name, 64));
46243- seq_printf(seq, "Service name : %s\n",
46244- chtostr(tmp, result.service_name, 64));
46245- seq_printf(seq, "Physical name : %s\n",
46246- chtostr(tmp, result.physical_location, 64));
46247- seq_printf(seq, "Instance number : %s\n",
46248- chtostr(tmp, result.instance_number, 4));
46249+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
46250+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
46251+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
46252+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
46253
46254 return 0;
46255 }
46256@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46257 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
46258 {
46259 struct i2o_device *d = (struct i2o_device *)seq->private;
46260- static u32 work32[12];
46261- static u16 *work16 = (u16 *) work32;
46262- static u8 *work8 = (u8 *) work32;
46263+ u32 work32[12];
46264+ u16 *work16 = (u16 *) work32;
46265+ u8 *work8 = (u8 *) work32;
46266 int token;
46267
46268 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
46269diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
46270index 92752fb..a7494f6 100644
46271--- a/drivers/message/i2o/iop.c
46272+++ b/drivers/message/i2o/iop.c
46273@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
46274
46275 spin_lock_irqsave(&c->context_list_lock, flags);
46276
46277- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
46278- atomic_inc(&c->context_list_counter);
46279+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
46280+ atomic_inc_unchecked(&c->context_list_counter);
46281
46282- entry->context = atomic_read(&c->context_list_counter);
46283+ entry->context = atomic_read_unchecked(&c->context_list_counter);
46284
46285 list_add(&entry->list, &c->context_list);
46286
46287@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
46288
46289 #if BITS_PER_LONG == 64
46290 spin_lock_init(&c->context_list_lock);
46291- atomic_set(&c->context_list_counter, 0);
46292+ atomic_set_unchecked(&c->context_list_counter, 0);
46293 INIT_LIST_HEAD(&c->context_list);
46294 #endif
46295
46296diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46297index 9a8e185..27ff17d 100644
46298--- a/drivers/mfd/ab8500-debugfs.c
46299+++ b/drivers/mfd/ab8500-debugfs.c
46300@@ -100,7 +100,7 @@ static int irq_last;
46301 static u32 *irq_count;
46302 static int num_irqs;
46303
46304-static struct device_attribute **dev_attr;
46305+static device_attribute_no_const **dev_attr;
46306 static char **event_name;
46307
46308 static u8 avg_sample = SAMPLE_16;
46309diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46310index c880c89..45a7c68 100644
46311--- a/drivers/mfd/max8925-i2c.c
46312+++ b/drivers/mfd/max8925-i2c.c
46313@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46314 const struct i2c_device_id *id)
46315 {
46316 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46317- static struct max8925_chip *chip;
46318+ struct max8925_chip *chip;
46319 struct device_node *node = client->dev.of_node;
46320
46321 if (node && !pdata) {
46322diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46323index 7612d89..70549c2 100644
46324--- a/drivers/mfd/tps65910.c
46325+++ b/drivers/mfd/tps65910.c
46326@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46327 struct tps65910_platform_data *pdata)
46328 {
46329 int ret = 0;
46330- static struct regmap_irq_chip *tps6591x_irqs_chip;
46331+ struct regmap_irq_chip *tps6591x_irqs_chip;
46332
46333 if (!irq) {
46334 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46335diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46336index 1b772ef..01e77d33 100644
46337--- a/drivers/mfd/twl4030-irq.c
46338+++ b/drivers/mfd/twl4030-irq.c
46339@@ -34,6 +34,7 @@
46340 #include <linux/of.h>
46341 #include <linux/irqdomain.h>
46342 #include <linux/i2c/twl.h>
46343+#include <asm/pgtable.h>
46344
46345 #include "twl-core.h"
46346
46347@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46348 * Install an irq handler for each of the SIH modules;
46349 * clone dummy irq_chip since PIH can't *do* anything
46350 */
46351- twl4030_irq_chip = dummy_irq_chip;
46352- twl4030_irq_chip.name = "twl4030";
46353+ pax_open_kernel();
46354+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46355+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46356
46357- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46358+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46359+ pax_close_kernel();
46360
46361 for (i = irq_base; i < irq_end; i++) {
46362 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46363diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46364index 464419b..64bae8d 100644
46365--- a/drivers/misc/c2port/core.c
46366+++ b/drivers/misc/c2port/core.c
46367@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46368 goto error_idr_alloc;
46369 c2dev->id = ret;
46370
46371- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46372+ pax_open_kernel();
46373+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46374+ pax_close_kernel();
46375
46376 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46377 "c2port%d", c2dev->id);
46378diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46379index 8385177..2f54635 100644
46380--- a/drivers/misc/eeprom/sunxi_sid.c
46381+++ b/drivers/misc/eeprom/sunxi_sid.c
46382@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46383
46384 platform_set_drvdata(pdev, sid_data);
46385
46386- sid_bin_attr.size = sid_data->keysize;
46387+ pax_open_kernel();
46388+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46389+ pax_close_kernel();
46390 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46391 return -ENODEV;
46392
46393diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
46394index 36f5d52..32311c3 100644
46395--- a/drivers/misc/kgdbts.c
46396+++ b/drivers/misc/kgdbts.c
46397@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
46398 char before[BREAK_INSTR_SIZE];
46399 char after[BREAK_INSTR_SIZE];
46400
46401- probe_kernel_read(before, (char *)kgdbts_break_test,
46402+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
46403 BREAK_INSTR_SIZE);
46404 init_simple_test();
46405 ts.tst = plant_and_detach_test;
46406@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
46407 /* Activate test with initial breakpoint */
46408 if (!is_early)
46409 kgdb_breakpoint();
46410- probe_kernel_read(after, (char *)kgdbts_break_test,
46411+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
46412 BREAK_INSTR_SIZE);
46413 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
46414 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
46415diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
46416index 3ef4627..8d00486 100644
46417--- a/drivers/misc/lis3lv02d/lis3lv02d.c
46418+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
46419@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
46420 * the lid is closed. This leads to interrupts as soon as a little move
46421 * is done.
46422 */
46423- atomic_inc(&lis3->count);
46424+ atomic_inc_unchecked(&lis3->count);
46425
46426 wake_up_interruptible(&lis3->misc_wait);
46427 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
46428@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
46429 if (lis3->pm_dev)
46430 pm_runtime_get_sync(lis3->pm_dev);
46431
46432- atomic_set(&lis3->count, 0);
46433+ atomic_set_unchecked(&lis3->count, 0);
46434 return 0;
46435 }
46436
46437@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
46438 add_wait_queue(&lis3->misc_wait, &wait);
46439 while (true) {
46440 set_current_state(TASK_INTERRUPTIBLE);
46441- data = atomic_xchg(&lis3->count, 0);
46442+ data = atomic_xchg_unchecked(&lis3->count, 0);
46443 if (data)
46444 break;
46445
46446@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
46447 struct lis3lv02d, miscdev);
46448
46449 poll_wait(file, &lis3->misc_wait, wait);
46450- if (atomic_read(&lis3->count))
46451+ if (atomic_read_unchecked(&lis3->count))
46452 return POLLIN | POLLRDNORM;
46453 return 0;
46454 }
46455diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
46456index c439c82..1f20f57 100644
46457--- a/drivers/misc/lis3lv02d/lis3lv02d.h
46458+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
46459@@ -297,7 +297,7 @@ struct lis3lv02d {
46460 struct input_polled_dev *idev; /* input device */
46461 struct platform_device *pdev; /* platform device */
46462 struct regulator_bulk_data regulators[2];
46463- atomic_t count; /* interrupt count after last read */
46464+ atomic_unchecked_t count; /* interrupt count after last read */
46465 union axis_conversion ac; /* hw -> logical axis */
46466 int mapped_btns[3];
46467
46468diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
46469index 2f30bad..c4c13d0 100644
46470--- a/drivers/misc/sgi-gru/gruhandles.c
46471+++ b/drivers/misc/sgi-gru/gruhandles.c
46472@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
46473 unsigned long nsec;
46474
46475 nsec = CLKS2NSEC(clks);
46476- atomic_long_inc(&mcs_op_statistics[op].count);
46477- atomic_long_add(nsec, &mcs_op_statistics[op].total);
46478+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
46479+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
46480 if (mcs_op_statistics[op].max < nsec)
46481 mcs_op_statistics[op].max = nsec;
46482 }
46483diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
46484index 4f76359..cdfcb2e 100644
46485--- a/drivers/misc/sgi-gru/gruprocfs.c
46486+++ b/drivers/misc/sgi-gru/gruprocfs.c
46487@@ -32,9 +32,9 @@
46488
46489 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
46490
46491-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
46492+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
46493 {
46494- unsigned long val = atomic_long_read(v);
46495+ unsigned long val = atomic_long_read_unchecked(v);
46496
46497 seq_printf(s, "%16lu %s\n", val, id);
46498 }
46499@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
46500
46501 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
46502 for (op = 0; op < mcsop_last; op++) {
46503- count = atomic_long_read(&mcs_op_statistics[op].count);
46504- total = atomic_long_read(&mcs_op_statistics[op].total);
46505+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
46506+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
46507 max = mcs_op_statistics[op].max;
46508 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
46509 count ? total / count : 0, max);
46510diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
46511index 5c3ce24..4915ccb 100644
46512--- a/drivers/misc/sgi-gru/grutables.h
46513+++ b/drivers/misc/sgi-gru/grutables.h
46514@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
46515 * GRU statistics.
46516 */
46517 struct gru_stats_s {
46518- atomic_long_t vdata_alloc;
46519- atomic_long_t vdata_free;
46520- atomic_long_t gts_alloc;
46521- atomic_long_t gts_free;
46522- atomic_long_t gms_alloc;
46523- atomic_long_t gms_free;
46524- atomic_long_t gts_double_allocate;
46525- atomic_long_t assign_context;
46526- atomic_long_t assign_context_failed;
46527- atomic_long_t free_context;
46528- atomic_long_t load_user_context;
46529- atomic_long_t load_kernel_context;
46530- atomic_long_t lock_kernel_context;
46531- atomic_long_t unlock_kernel_context;
46532- atomic_long_t steal_user_context;
46533- atomic_long_t steal_kernel_context;
46534- atomic_long_t steal_context_failed;
46535- atomic_long_t nopfn;
46536- atomic_long_t asid_new;
46537- atomic_long_t asid_next;
46538- atomic_long_t asid_wrap;
46539- atomic_long_t asid_reuse;
46540- atomic_long_t intr;
46541- atomic_long_t intr_cbr;
46542- atomic_long_t intr_tfh;
46543- atomic_long_t intr_spurious;
46544- atomic_long_t intr_mm_lock_failed;
46545- atomic_long_t call_os;
46546- atomic_long_t call_os_wait_queue;
46547- atomic_long_t user_flush_tlb;
46548- atomic_long_t user_unload_context;
46549- atomic_long_t user_exception;
46550- atomic_long_t set_context_option;
46551- atomic_long_t check_context_retarget_intr;
46552- atomic_long_t check_context_unload;
46553- atomic_long_t tlb_dropin;
46554- atomic_long_t tlb_preload_page;
46555- atomic_long_t tlb_dropin_fail_no_asid;
46556- atomic_long_t tlb_dropin_fail_upm;
46557- atomic_long_t tlb_dropin_fail_invalid;
46558- atomic_long_t tlb_dropin_fail_range_active;
46559- atomic_long_t tlb_dropin_fail_idle;
46560- atomic_long_t tlb_dropin_fail_fmm;
46561- atomic_long_t tlb_dropin_fail_no_exception;
46562- atomic_long_t tfh_stale_on_fault;
46563- atomic_long_t mmu_invalidate_range;
46564- atomic_long_t mmu_invalidate_page;
46565- atomic_long_t flush_tlb;
46566- atomic_long_t flush_tlb_gru;
46567- atomic_long_t flush_tlb_gru_tgh;
46568- atomic_long_t flush_tlb_gru_zero_asid;
46569+ atomic_long_unchecked_t vdata_alloc;
46570+ atomic_long_unchecked_t vdata_free;
46571+ atomic_long_unchecked_t gts_alloc;
46572+ atomic_long_unchecked_t gts_free;
46573+ atomic_long_unchecked_t gms_alloc;
46574+ atomic_long_unchecked_t gms_free;
46575+ atomic_long_unchecked_t gts_double_allocate;
46576+ atomic_long_unchecked_t assign_context;
46577+ atomic_long_unchecked_t assign_context_failed;
46578+ atomic_long_unchecked_t free_context;
46579+ atomic_long_unchecked_t load_user_context;
46580+ atomic_long_unchecked_t load_kernel_context;
46581+ atomic_long_unchecked_t lock_kernel_context;
46582+ atomic_long_unchecked_t unlock_kernel_context;
46583+ atomic_long_unchecked_t steal_user_context;
46584+ atomic_long_unchecked_t steal_kernel_context;
46585+ atomic_long_unchecked_t steal_context_failed;
46586+ atomic_long_unchecked_t nopfn;
46587+ atomic_long_unchecked_t asid_new;
46588+ atomic_long_unchecked_t asid_next;
46589+ atomic_long_unchecked_t asid_wrap;
46590+ atomic_long_unchecked_t asid_reuse;
46591+ atomic_long_unchecked_t intr;
46592+ atomic_long_unchecked_t intr_cbr;
46593+ atomic_long_unchecked_t intr_tfh;
46594+ atomic_long_unchecked_t intr_spurious;
46595+ atomic_long_unchecked_t intr_mm_lock_failed;
46596+ atomic_long_unchecked_t call_os;
46597+ atomic_long_unchecked_t call_os_wait_queue;
46598+ atomic_long_unchecked_t user_flush_tlb;
46599+ atomic_long_unchecked_t user_unload_context;
46600+ atomic_long_unchecked_t user_exception;
46601+ atomic_long_unchecked_t set_context_option;
46602+ atomic_long_unchecked_t check_context_retarget_intr;
46603+ atomic_long_unchecked_t check_context_unload;
46604+ atomic_long_unchecked_t tlb_dropin;
46605+ atomic_long_unchecked_t tlb_preload_page;
46606+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
46607+ atomic_long_unchecked_t tlb_dropin_fail_upm;
46608+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
46609+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
46610+ atomic_long_unchecked_t tlb_dropin_fail_idle;
46611+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
46612+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
46613+ atomic_long_unchecked_t tfh_stale_on_fault;
46614+ atomic_long_unchecked_t mmu_invalidate_range;
46615+ atomic_long_unchecked_t mmu_invalidate_page;
46616+ atomic_long_unchecked_t flush_tlb;
46617+ atomic_long_unchecked_t flush_tlb_gru;
46618+ atomic_long_unchecked_t flush_tlb_gru_tgh;
46619+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
46620
46621- atomic_long_t copy_gpa;
46622- atomic_long_t read_gpa;
46623+ atomic_long_unchecked_t copy_gpa;
46624+ atomic_long_unchecked_t read_gpa;
46625
46626- atomic_long_t mesq_receive;
46627- atomic_long_t mesq_receive_none;
46628- atomic_long_t mesq_send;
46629- atomic_long_t mesq_send_failed;
46630- atomic_long_t mesq_noop;
46631- atomic_long_t mesq_send_unexpected_error;
46632- atomic_long_t mesq_send_lb_overflow;
46633- atomic_long_t mesq_send_qlimit_reached;
46634- atomic_long_t mesq_send_amo_nacked;
46635- atomic_long_t mesq_send_put_nacked;
46636- atomic_long_t mesq_page_overflow;
46637- atomic_long_t mesq_qf_locked;
46638- atomic_long_t mesq_qf_noop_not_full;
46639- atomic_long_t mesq_qf_switch_head_failed;
46640- atomic_long_t mesq_qf_unexpected_error;
46641- atomic_long_t mesq_noop_unexpected_error;
46642- atomic_long_t mesq_noop_lb_overflow;
46643- atomic_long_t mesq_noop_qlimit_reached;
46644- atomic_long_t mesq_noop_amo_nacked;
46645- atomic_long_t mesq_noop_put_nacked;
46646- atomic_long_t mesq_noop_page_overflow;
46647+ atomic_long_unchecked_t mesq_receive;
46648+ atomic_long_unchecked_t mesq_receive_none;
46649+ atomic_long_unchecked_t mesq_send;
46650+ atomic_long_unchecked_t mesq_send_failed;
46651+ atomic_long_unchecked_t mesq_noop;
46652+ atomic_long_unchecked_t mesq_send_unexpected_error;
46653+ atomic_long_unchecked_t mesq_send_lb_overflow;
46654+ atomic_long_unchecked_t mesq_send_qlimit_reached;
46655+ atomic_long_unchecked_t mesq_send_amo_nacked;
46656+ atomic_long_unchecked_t mesq_send_put_nacked;
46657+ atomic_long_unchecked_t mesq_page_overflow;
46658+ atomic_long_unchecked_t mesq_qf_locked;
46659+ atomic_long_unchecked_t mesq_qf_noop_not_full;
46660+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
46661+ atomic_long_unchecked_t mesq_qf_unexpected_error;
46662+ atomic_long_unchecked_t mesq_noop_unexpected_error;
46663+ atomic_long_unchecked_t mesq_noop_lb_overflow;
46664+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
46665+ atomic_long_unchecked_t mesq_noop_amo_nacked;
46666+ atomic_long_unchecked_t mesq_noop_put_nacked;
46667+ atomic_long_unchecked_t mesq_noop_page_overflow;
46668
46669 };
46670
46671@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
46672 tghop_invalidate, mcsop_last};
46673
46674 struct mcs_op_statistic {
46675- atomic_long_t count;
46676- atomic_long_t total;
46677+ atomic_long_unchecked_t count;
46678+ atomic_long_unchecked_t total;
46679 unsigned long max;
46680 };
46681
46682@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
46683
46684 #define STAT(id) do { \
46685 if (gru_options & OPT_STATS) \
46686- atomic_long_inc(&gru_stats.id); \
46687+ atomic_long_inc_unchecked(&gru_stats.id); \
46688 } while (0)
46689
46690 #ifdef CONFIG_SGI_GRU_DEBUG
46691diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
46692index c862cd4..0d176fe 100644
46693--- a/drivers/misc/sgi-xp/xp.h
46694+++ b/drivers/misc/sgi-xp/xp.h
46695@@ -288,7 +288,7 @@ struct xpc_interface {
46696 xpc_notify_func, void *);
46697 void (*received) (short, int, void *);
46698 enum xp_retval (*partid_to_nasids) (short, void *);
46699-};
46700+} __no_const;
46701
46702 extern struct xpc_interface xpc_interface;
46703
46704diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
46705index 01be66d..e3a0c7e 100644
46706--- a/drivers/misc/sgi-xp/xp_main.c
46707+++ b/drivers/misc/sgi-xp/xp_main.c
46708@@ -78,13 +78,13 @@ xpc_notloaded(void)
46709 }
46710
46711 struct xpc_interface xpc_interface = {
46712- (void (*)(int))xpc_notloaded,
46713- (void (*)(int))xpc_notloaded,
46714- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46715- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46716+ .connect = (void (*)(int))xpc_notloaded,
46717+ .disconnect = (void (*)(int))xpc_notloaded,
46718+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46719+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46720 void *))xpc_notloaded,
46721- (void (*)(short, int, void *))xpc_notloaded,
46722- (enum xp_retval(*)(short, void *))xpc_notloaded
46723+ .received = (void (*)(short, int, void *))xpc_notloaded,
46724+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
46725 };
46726 EXPORT_SYMBOL_GPL(xpc_interface);
46727
46728diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
46729index b94d5f7..7f494c5 100644
46730--- a/drivers/misc/sgi-xp/xpc.h
46731+++ b/drivers/misc/sgi-xp/xpc.h
46732@@ -835,6 +835,7 @@ struct xpc_arch_operations {
46733 void (*received_payload) (struct xpc_channel *, void *);
46734 void (*notify_senders_of_disconnect) (struct xpc_channel *);
46735 };
46736+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
46737
46738 /* struct xpc_partition act_state values (for XPC HB) */
46739
46740@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
46741 /* found in xpc_main.c */
46742 extern struct device *xpc_part;
46743 extern struct device *xpc_chan;
46744-extern struct xpc_arch_operations xpc_arch_ops;
46745+extern xpc_arch_operations_no_const xpc_arch_ops;
46746 extern int xpc_disengage_timelimit;
46747 extern int xpc_disengage_timedout;
46748 extern int xpc_activate_IRQ_rcvd;
46749diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
46750index 82dc574..8539ab2 100644
46751--- a/drivers/misc/sgi-xp/xpc_main.c
46752+++ b/drivers/misc/sgi-xp/xpc_main.c
46753@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
46754 .notifier_call = xpc_system_die,
46755 };
46756
46757-struct xpc_arch_operations xpc_arch_ops;
46758+xpc_arch_operations_no_const xpc_arch_ops;
46759
46760 /*
46761 * Timer function to enforce the timelimit on the partition disengage.
46762@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
46763
46764 if (((die_args->trapnr == X86_TRAP_MF) ||
46765 (die_args->trapnr == X86_TRAP_XF)) &&
46766- !user_mode_vm(die_args->regs))
46767+ !user_mode(die_args->regs))
46768 xpc_die_deactivate();
46769
46770 break;
46771diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
46772index 4409d79..d7766d0 100644
46773--- a/drivers/mmc/card/block.c
46774+++ b/drivers/mmc/card/block.c
46775@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
46776 if (idata->ic.postsleep_min_us)
46777 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
46778
46779- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
46780+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
46781 err = -EFAULT;
46782 goto cmd_rel_host;
46783 }
46784diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
46785index 0d0f7a2..45b8d60 100644
46786--- a/drivers/mmc/host/dw_mmc.h
46787+++ b/drivers/mmc/host/dw_mmc.h
46788@@ -276,5 +276,5 @@ struct dw_mci_drv_data {
46789 int (*parse_dt)(struct dw_mci *host);
46790 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
46791 struct dw_mci_tuning_data *tuning_data);
46792-};
46793+} __do_const;
46794 #endif /* _DW_MMC_H_ */
46795diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
46796index 8232e9a..7776006 100644
46797--- a/drivers/mmc/host/mmci.c
46798+++ b/drivers/mmc/host/mmci.c
46799@@ -1635,7 +1635,9 @@ static int mmci_probe(struct amba_device *dev,
46800 mmc->caps |= MMC_CAP_CMD23;
46801
46802 if (variant->busy_detect) {
46803- mmci_ops.card_busy = mmci_card_busy;
46804+ pax_open_kernel();
46805+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
46806+ pax_close_kernel();
46807 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
46808 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
46809 mmc->max_busy_timeout = 0;
46810diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
46811index 7c71dcd..74cb746 100644
46812--- a/drivers/mmc/host/omap_hsmmc.c
46813+++ b/drivers/mmc/host/omap_hsmmc.c
46814@@ -2120,7 +2120,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
46815
46816 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
46817 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
46818- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46819+ pax_open_kernel();
46820+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46821+ pax_close_kernel();
46822 }
46823
46824 pm_runtime_enable(host->dev);
46825diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
46826index af1f7c0..00d368a 100644
46827--- a/drivers/mmc/host/sdhci-esdhc-imx.c
46828+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
46829@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
46830 host->mmc->caps |= MMC_CAP_1_8V_DDR;
46831 }
46832
46833- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
46834- sdhci_esdhc_ops.platform_execute_tuning =
46835+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
46836+ pax_open_kernel();
46837+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
46838 esdhc_executing_tuning;
46839+ pax_close_kernel();
46840+ }
46841
46842 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
46843 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
46844diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
46845index c45b893..fba0144 100644
46846--- a/drivers/mmc/host/sdhci-s3c.c
46847+++ b/drivers/mmc/host/sdhci-s3c.c
46848@@ -590,9 +590,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
46849 * we can use overriding functions instead of default.
46850 */
46851 if (sc->no_divider) {
46852- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46853- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46854- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46855+ pax_open_kernel();
46856+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46857+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46858+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46859+ pax_close_kernel();
46860 }
46861
46862 /* It supports additional host capabilities if needed */
46863diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
46864index 423666b..81ff5eb 100644
46865--- a/drivers/mtd/chips/cfi_cmdset_0020.c
46866+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
46867@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
46868 size_t totlen = 0, thislen;
46869 int ret = 0;
46870 size_t buflen = 0;
46871- static char *buffer;
46872+ char *buffer;
46873
46874 if (!ECCBUF_SIZE) {
46875 /* We should fall back to a general writev implementation.
46876diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
46877index b3b7ca1..5dd4634 100644
46878--- a/drivers/mtd/nand/denali.c
46879+++ b/drivers/mtd/nand/denali.c
46880@@ -24,6 +24,7 @@
46881 #include <linux/slab.h>
46882 #include <linux/mtd/mtd.h>
46883 #include <linux/module.h>
46884+#include <linux/slab.h>
46885
46886 #include "denali.h"
46887
46888diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46889index 4f3851a..f477a23 100644
46890--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46891+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46892@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
46893
46894 /* first try to map the upper buffer directly */
46895 if (virt_addr_valid(this->upper_buf) &&
46896- !object_is_on_stack(this->upper_buf)) {
46897+ !object_starts_on_stack(this->upper_buf)) {
46898 sg_init_one(sgl, this->upper_buf, this->upper_len);
46899 ret = dma_map_sg(this->dev, sgl, 1, dr);
46900 if (ret == 0)
46901diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
46902index 51b9d6a..52af9a7 100644
46903--- a/drivers/mtd/nftlmount.c
46904+++ b/drivers/mtd/nftlmount.c
46905@@ -24,6 +24,7 @@
46906 #include <asm/errno.h>
46907 #include <linux/delay.h>
46908 #include <linux/slab.h>
46909+#include <linux/sched.h>
46910 #include <linux/mtd/mtd.h>
46911 #include <linux/mtd/nand.h>
46912 #include <linux/mtd/nftl.h>
46913diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
46914index c23184a..4115c41 100644
46915--- a/drivers/mtd/sm_ftl.c
46916+++ b/drivers/mtd/sm_ftl.c
46917@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
46918 #define SM_CIS_VENDOR_OFFSET 0x59
46919 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
46920 {
46921- struct attribute_group *attr_group;
46922+ attribute_group_no_const *attr_group;
46923 struct attribute **attributes;
46924 struct sm_sysfs_attribute *vendor_attribute;
46925 char *vendor;
46926diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
46927index 7b11243..b3278a3 100644
46928--- a/drivers/net/bonding/bond_netlink.c
46929+++ b/drivers/net/bonding/bond_netlink.c
46930@@ -585,7 +585,7 @@ nla_put_failure:
46931 return -EMSGSIZE;
46932 }
46933
46934-struct rtnl_link_ops bond_link_ops __read_mostly = {
46935+struct rtnl_link_ops bond_link_ops = {
46936 .kind = "bond",
46937 .priv_size = sizeof(struct bonding),
46938 .setup = bond_setup,
46939diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
46940index b3b922a..80bba38 100644
46941--- a/drivers/net/caif/caif_hsi.c
46942+++ b/drivers/net/caif/caif_hsi.c
46943@@ -1444,7 +1444,7 @@ err:
46944 return -ENODEV;
46945 }
46946
46947-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
46948+static struct rtnl_link_ops caif_hsi_link_ops = {
46949 .kind = "cfhsi",
46950 .priv_size = sizeof(struct cfhsi),
46951 .setup = cfhsi_setup,
46952diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
46953index 98d73aa..63ef9da 100644
46954--- a/drivers/net/can/Kconfig
46955+++ b/drivers/net/can/Kconfig
46956@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
46957
46958 config CAN_FLEXCAN
46959 tristate "Support for Freescale FLEXCAN based chips"
46960- depends on ARM || PPC
46961+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
46962 ---help---
46963 Say Y here if you want to support for Freescale FlexCAN.
46964
46965diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
46966index 62ca0e8..3bed607 100644
46967--- a/drivers/net/can/dev.c
46968+++ b/drivers/net/can/dev.c
46969@@ -958,7 +958,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
46970 return -EOPNOTSUPP;
46971 }
46972
46973-static struct rtnl_link_ops can_link_ops __read_mostly = {
46974+static struct rtnl_link_ops can_link_ops = {
46975 .kind = "can",
46976 .maxtype = IFLA_CAN_MAX,
46977 .policy = can_policy,
46978diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
46979index 674f367..ec3a31f 100644
46980--- a/drivers/net/can/vcan.c
46981+++ b/drivers/net/can/vcan.c
46982@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
46983 dev->destructor = free_netdev;
46984 }
46985
46986-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
46987+static struct rtnl_link_ops vcan_link_ops = {
46988 .kind = "vcan",
46989 .setup = vcan_setup,
46990 };
46991diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
46992index 49adbf1..fff7ff8 100644
46993--- a/drivers/net/dummy.c
46994+++ b/drivers/net/dummy.c
46995@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
46996 return 0;
46997 }
46998
46999-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
47000+static struct rtnl_link_ops dummy_link_ops = {
47001 .kind = DRV_NAME,
47002 .setup = dummy_setup,
47003 .validate = dummy_validate,
47004diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47005index 0443654..4f0aa18 100644
47006--- a/drivers/net/ethernet/8390/ax88796.c
47007+++ b/drivers/net/ethernet/8390/ax88796.c
47008@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47009 if (ax->plat->reg_offsets)
47010 ei_local->reg_offset = ax->plat->reg_offsets;
47011 else {
47012+ resource_size_t _mem_size = mem_size;
47013+ do_div(_mem_size, 0x18);
47014 ei_local->reg_offset = ax->reg_offsets;
47015 for (ret = 0; ret < 0x18; ret++)
47016- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47017+ ax->reg_offsets[ret] = _mem_size * ret;
47018 }
47019
47020 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47021diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47022index 760c72c..a99728c 100644
47023--- a/drivers/net/ethernet/altera/altera_tse_main.c
47024+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47025@@ -1217,7 +1217,7 @@ static int tse_shutdown(struct net_device *dev)
47026 return 0;
47027 }
47028
47029-static struct net_device_ops altera_tse_netdev_ops = {
47030+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47031 .ndo_open = tse_open,
47032 .ndo_stop = tse_shutdown,
47033 .ndo_start_xmit = tse_start_xmit,
47034@@ -1454,11 +1454,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47035 ndev->netdev_ops = &altera_tse_netdev_ops;
47036 altera_tse_set_ethtool_ops(ndev);
47037
47038+ pax_open_kernel();
47039 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47040
47041 if (priv->hash_filter)
47042 altera_tse_netdev_ops.ndo_set_rx_mode =
47043 tse_set_rx_mode_hashfilter;
47044+ pax_close_kernel();
47045
47046 /* Scatter/gather IO is not supported,
47047 * so it is turned off
47048diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47049index 29a0927..5a348e24 100644
47050--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47051+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47052@@ -1122,14 +1122,14 @@ do { \
47053 * operations, everything works on mask values.
47054 */
47055 #define XMDIO_READ(_pdata, _mmd, _reg) \
47056- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
47057+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
47058 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
47059
47060 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
47061 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
47062
47063 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
47064- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
47065+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
47066 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
47067
47068 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
47069diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47070index 8a50b01..39c1ad0 100644
47071--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47072+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47073@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
47074
47075 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
47076
47077- pdata->hw_if.config_dcb_tc(pdata);
47078+ pdata->hw_if->config_dcb_tc(pdata);
47079
47080 return 0;
47081 }
47082@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
47083
47084 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
47085
47086- pdata->hw_if.config_dcb_pfc(pdata);
47087+ pdata->hw_if->config_dcb_pfc(pdata);
47088
47089 return 0;
47090 }
47091diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47092index a50891f..b26fe24 100644
47093--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47094+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47095@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
47096
47097 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47098 {
47099- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47100+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47101 struct xgbe_channel *channel;
47102 struct xgbe_ring *ring;
47103 struct xgbe_ring_data *rdata;
47104@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47105
47106 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
47107 {
47108- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47109+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47110 struct xgbe_channel *channel;
47111 struct xgbe_ring *ring;
47112 struct xgbe_ring_desc *rdesc;
47113@@ -624,7 +624,7 @@ err_out:
47114 static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47115 {
47116 struct xgbe_prv_data *pdata = channel->pdata;
47117- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47118+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47119 struct xgbe_ring *ring = channel->rx_ring;
47120 struct xgbe_ring_data *rdata;
47121 int i;
47122@@ -650,17 +650,12 @@ static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47123 DBGPR("<--xgbe_realloc_rx_buffer\n");
47124 }
47125
47126-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47127-{
47128- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47129-
47130- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47131- desc_if->free_ring_resources = xgbe_free_ring_resources;
47132- desc_if->map_tx_skb = xgbe_map_tx_skb;
47133- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
47134- desc_if->unmap_rdata = xgbe_unmap_rdata;
47135- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47136- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47137-
47138- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47139-}
47140+const struct xgbe_desc_if default_xgbe_desc_if = {
47141+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47142+ .free_ring_resources = xgbe_free_ring_resources,
47143+ .map_tx_skb = xgbe_map_tx_skb,
47144+ .realloc_rx_buffer = xgbe_realloc_rx_buffer,
47145+ .unmap_rdata = xgbe_unmap_rdata,
47146+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47147+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47148+};
47149diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47150index 4c66cd1..1a20aab 100644
47151--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47152+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47153@@ -2703,7 +2703,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47154
47155 static int xgbe_init(struct xgbe_prv_data *pdata)
47156 {
47157- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47158+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47159 int ret;
47160
47161 DBGPR("-->xgbe_init\n");
47162@@ -2767,108 +2767,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47163 return 0;
47164 }
47165
47166-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47167-{
47168- DBGPR("-->xgbe_init_function_ptrs\n");
47169-
47170- hw_if->tx_complete = xgbe_tx_complete;
47171-
47172- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47173- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47174- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47175- hw_if->set_mac_address = xgbe_set_mac_address;
47176-
47177- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47178- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47179-
47180- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47181- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47182- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47183- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47184- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47185-
47186- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47187- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47188-
47189- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47190- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47191- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47192-
47193- hw_if->enable_tx = xgbe_enable_tx;
47194- hw_if->disable_tx = xgbe_disable_tx;
47195- hw_if->enable_rx = xgbe_enable_rx;
47196- hw_if->disable_rx = xgbe_disable_rx;
47197-
47198- hw_if->powerup_tx = xgbe_powerup_tx;
47199- hw_if->powerdown_tx = xgbe_powerdown_tx;
47200- hw_if->powerup_rx = xgbe_powerup_rx;
47201- hw_if->powerdown_rx = xgbe_powerdown_rx;
47202-
47203- hw_if->dev_xmit = xgbe_dev_xmit;
47204- hw_if->dev_read = xgbe_dev_read;
47205- hw_if->enable_int = xgbe_enable_int;
47206- hw_if->disable_int = xgbe_disable_int;
47207- hw_if->init = xgbe_init;
47208- hw_if->exit = xgbe_exit;
47209+const struct xgbe_hw_if default_xgbe_hw_if = {
47210+ .tx_complete = xgbe_tx_complete,
47211+
47212+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47213+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47214+ .add_mac_addresses = xgbe_add_mac_addresses,
47215+ .set_mac_address = xgbe_set_mac_address,
47216+
47217+ .enable_rx_csum = xgbe_enable_rx_csum,
47218+ .disable_rx_csum = xgbe_disable_rx_csum,
47219+
47220+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47221+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47222+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47223+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47224+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47225+
47226+ .read_mmd_regs = xgbe_read_mmd_regs,
47227+ .write_mmd_regs = xgbe_write_mmd_regs,
47228+
47229+ .set_gmii_speed = xgbe_set_gmii_speed,
47230+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47231+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47232+
47233+ .enable_tx = xgbe_enable_tx,
47234+ .disable_tx = xgbe_disable_tx,
47235+ .enable_rx = xgbe_enable_rx,
47236+ .disable_rx = xgbe_disable_rx,
47237+
47238+ .powerup_tx = xgbe_powerup_tx,
47239+ .powerdown_tx = xgbe_powerdown_tx,
47240+ .powerup_rx = xgbe_powerup_rx,
47241+ .powerdown_rx = xgbe_powerdown_rx,
47242+
47243+ .dev_xmit = xgbe_dev_xmit,
47244+ .dev_read = xgbe_dev_read,
47245+ .enable_int = xgbe_enable_int,
47246+ .disable_int = xgbe_disable_int,
47247+ .init = xgbe_init,
47248+ .exit = xgbe_exit,
47249
47250 /* Descriptor related Sequences have to be initialized here */
47251- hw_if->tx_desc_init = xgbe_tx_desc_init;
47252- hw_if->rx_desc_init = xgbe_rx_desc_init;
47253- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47254- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47255- hw_if->is_last_desc = xgbe_is_last_desc;
47256- hw_if->is_context_desc = xgbe_is_context_desc;
47257- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
47258+ .tx_desc_init = xgbe_tx_desc_init,
47259+ .rx_desc_init = xgbe_rx_desc_init,
47260+ .tx_desc_reset = xgbe_tx_desc_reset,
47261+ .rx_desc_reset = xgbe_rx_desc_reset,
47262+ .is_last_desc = xgbe_is_last_desc,
47263+ .is_context_desc = xgbe_is_context_desc,
47264+ .tx_start_xmit = xgbe_tx_start_xmit,
47265
47266 /* For FLOW ctrl */
47267- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47268- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47269+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47270+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47271
47272 /* For RX coalescing */
47273- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47274- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47275- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47276- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47277+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47278+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47279+ .usec_to_riwt = xgbe_usec_to_riwt,
47280+ .riwt_to_usec = xgbe_riwt_to_usec,
47281
47282 /* For RX and TX threshold config */
47283- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47284- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47285+ .config_rx_threshold = xgbe_config_rx_threshold,
47286+ .config_tx_threshold = xgbe_config_tx_threshold,
47287
47288 /* For RX and TX Store and Forward Mode config */
47289- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47290- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47291+ .config_rsf_mode = xgbe_config_rsf_mode,
47292+ .config_tsf_mode = xgbe_config_tsf_mode,
47293
47294 /* For TX DMA Operating on Second Frame config */
47295- hw_if->config_osp_mode = xgbe_config_osp_mode;
47296+ .config_osp_mode = xgbe_config_osp_mode,
47297
47298 /* For RX and TX PBL config */
47299- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47300- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47301- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47302- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47303- hw_if->config_pblx8 = xgbe_config_pblx8;
47304+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47305+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47306+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47307+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47308+ .config_pblx8 = xgbe_config_pblx8,
47309
47310 /* For MMC statistics support */
47311- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47312- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47313- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47314+ .tx_mmc_int = xgbe_tx_mmc_int,
47315+ .rx_mmc_int = xgbe_rx_mmc_int,
47316+ .read_mmc_stats = xgbe_read_mmc_stats,
47317
47318 /* For PTP config */
47319- hw_if->config_tstamp = xgbe_config_tstamp;
47320- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47321- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47322- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47323- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47324+ .config_tstamp = xgbe_config_tstamp,
47325+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47326+ .set_tstamp_time = xgbe_set_tstamp_time,
47327+ .get_tstamp_time = xgbe_get_tstamp_time,
47328+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47329
47330 /* For Data Center Bridging config */
47331- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47332- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47333+ .config_dcb_tc = xgbe_config_dcb_tc,
47334+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47335
47336 /* For Receive Side Scaling */
47337- hw_if->enable_rss = xgbe_enable_rss;
47338- hw_if->disable_rss = xgbe_disable_rss;
47339- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
47340- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
47341-
47342- DBGPR("<--xgbe_init_function_ptrs\n");
47343-}
47344+ .enable_rss = xgbe_enable_rss,
47345+ .disable_rss = xgbe_disable_rss,
47346+ .set_rss_hash_key = xgbe_set_rss_hash_key,
47347+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
47348+};
47349diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47350index e5ffb2c..e56d30b 100644
47351--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47352+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47353@@ -239,7 +239,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
47354 * support, tell it now
47355 */
47356 if (ring->tx.xmit_more)
47357- pdata->hw_if.tx_start_xmit(channel, ring);
47358+ pdata->hw_if->tx_start_xmit(channel, ring);
47359
47360 return NETDEV_TX_BUSY;
47361 }
47362@@ -267,7 +267,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47363
47364 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47365 {
47366- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47367+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47368 struct xgbe_channel *channel;
47369 enum xgbe_int int_id;
47370 unsigned int i;
47371@@ -289,7 +289,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47372
47373 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47374 {
47375- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47376+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47377 struct xgbe_channel *channel;
47378 enum xgbe_int int_id;
47379 unsigned int i;
47380@@ -312,7 +312,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47381 static irqreturn_t xgbe_isr(int irq, void *data)
47382 {
47383 struct xgbe_prv_data *pdata = data;
47384- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47385+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47386 struct xgbe_channel *channel;
47387 unsigned int dma_isr, dma_ch_isr;
47388 unsigned int mac_isr, mac_tssr;
47389@@ -611,7 +611,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
47390
47391 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47392 {
47393- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47394+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47395
47396 DBGPR("-->xgbe_init_tx_coalesce\n");
47397
47398@@ -625,7 +625,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47399
47400 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47401 {
47402- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47403+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47404
47405 DBGPR("-->xgbe_init_rx_coalesce\n");
47406
47407@@ -639,7 +639,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47408
47409 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47410 {
47411- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47412+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47413 struct xgbe_channel *channel;
47414 struct xgbe_ring *ring;
47415 struct xgbe_ring_data *rdata;
47416@@ -664,7 +664,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47417
47418 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47419 {
47420- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47421+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47422 struct xgbe_channel *channel;
47423 struct xgbe_ring *ring;
47424 struct xgbe_ring_data *rdata;
47425@@ -690,7 +690,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47426 static void xgbe_adjust_link(struct net_device *netdev)
47427 {
47428 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47429- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47430+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47431 struct phy_device *phydev = pdata->phydev;
47432 int new_state = 0;
47433
47434@@ -798,7 +798,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47435 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47436 {
47437 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47438- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47439+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47440 unsigned long flags;
47441
47442 DBGPR("-->xgbe_powerdown\n");
47443@@ -836,7 +836,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47444 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47445 {
47446 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47447- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47448+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47449 unsigned long flags;
47450
47451 DBGPR("-->xgbe_powerup\n");
47452@@ -873,7 +873,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47453
47454 static int xgbe_start(struct xgbe_prv_data *pdata)
47455 {
47456- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47457+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47458 struct net_device *netdev = pdata->netdev;
47459
47460 DBGPR("-->xgbe_start\n");
47461@@ -899,7 +899,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
47462
47463 static void xgbe_stop(struct xgbe_prv_data *pdata)
47464 {
47465- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47466+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47467 struct xgbe_channel *channel;
47468 struct net_device *netdev = pdata->netdev;
47469 struct netdev_queue *txq;
47470@@ -932,7 +932,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
47471 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
47472 {
47473 struct xgbe_channel *channel;
47474- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47475+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47476 unsigned int i;
47477
47478 DBGPR("-->xgbe_restart_dev\n");
47479@@ -1135,7 +1135,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
47480 return -ERANGE;
47481 }
47482
47483- pdata->hw_if.config_tstamp(pdata, mac_tscr);
47484+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
47485
47486 memcpy(&pdata->tstamp_config, &config, sizeof(config));
47487
47488@@ -1284,8 +1284,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
47489 static int xgbe_open(struct net_device *netdev)
47490 {
47491 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47492- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47493- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47494+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47495+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47496 struct xgbe_channel *channel = NULL;
47497 unsigned int i = 0;
47498 int ret;
47499@@ -1400,8 +1400,8 @@ err_phy_init:
47500 static int xgbe_close(struct net_device *netdev)
47501 {
47502 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47503- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47504- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47505+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47506+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47507 struct xgbe_channel *channel;
47508 unsigned int i;
47509
47510@@ -1442,8 +1442,8 @@ static int xgbe_close(struct net_device *netdev)
47511 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
47512 {
47513 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47514- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47515- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47516+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47517+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47518 struct xgbe_channel *channel;
47519 struct xgbe_ring *ring;
47520 struct xgbe_packet_data *packet;
47521@@ -1518,7 +1518,7 @@ tx_netdev_return:
47522 static void xgbe_set_rx_mode(struct net_device *netdev)
47523 {
47524 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47525- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47526+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47527 unsigned int pr_mode, am_mode;
47528
47529 DBGPR("-->xgbe_set_rx_mode\n");
47530@@ -1537,7 +1537,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
47531 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
47532 {
47533 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47534- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47535+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47536 struct sockaddr *saddr = addr;
47537
47538 DBGPR("-->xgbe_set_mac_address\n");
47539@@ -1604,7 +1604,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
47540
47541 DBGPR("-->%s\n", __func__);
47542
47543- pdata->hw_if.read_mmc_stats(pdata);
47544+ pdata->hw_if->read_mmc_stats(pdata);
47545
47546 s->rx_packets = pstats->rxframecount_gb;
47547 s->rx_bytes = pstats->rxoctetcount_gb;
47548@@ -1631,7 +1631,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
47549 u16 vid)
47550 {
47551 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47552- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47553+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47554
47555 DBGPR("-->%s\n", __func__);
47556
47557@@ -1647,7 +1647,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
47558 u16 vid)
47559 {
47560 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47561- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47562+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47563
47564 DBGPR("-->%s\n", __func__);
47565
47566@@ -1713,7 +1713,7 @@ static int xgbe_set_features(struct net_device *netdev,
47567 netdev_features_t features)
47568 {
47569 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47570- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47571+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47572 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
47573 int ret = 0;
47574
47575@@ -1778,7 +1778,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
47576 static void xgbe_rx_refresh(struct xgbe_channel *channel)
47577 {
47578 struct xgbe_prv_data *pdata = channel->pdata;
47579- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47580+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47581 struct xgbe_ring *ring = channel->rx_ring;
47582 struct xgbe_ring_data *rdata;
47583
47584@@ -1819,8 +1819,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
47585 static int xgbe_tx_poll(struct xgbe_channel *channel)
47586 {
47587 struct xgbe_prv_data *pdata = channel->pdata;
47588- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47589- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47590+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47591+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47592 struct xgbe_ring *ring = channel->tx_ring;
47593 struct xgbe_ring_data *rdata;
47594 struct xgbe_ring_desc *rdesc;
47595@@ -1891,7 +1891,7 @@ unlock:
47596 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
47597 {
47598 struct xgbe_prv_data *pdata = channel->pdata;
47599- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47600+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47601 struct xgbe_ring *ring = channel->rx_ring;
47602 struct xgbe_ring_data *rdata;
47603 struct xgbe_packet_data *packet;
47604diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47605index ebf4893..28108c7 100644
47606--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47607+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47608@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
47609
47610 DBGPR("-->%s\n", __func__);
47611
47612- pdata->hw_if.read_mmc_stats(pdata);
47613+ pdata->hw_if->read_mmc_stats(pdata);
47614 for (i = 0; i < XGBE_STATS_COUNT; i++) {
47615 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
47616 *data++ = *(u64 *)stat;
47617@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
47618 struct ethtool_coalesce *ec)
47619 {
47620 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47621- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47622+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47623 unsigned int riwt;
47624
47625 DBGPR("-->xgbe_get_coalesce\n");
47626@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
47627 struct ethtool_coalesce *ec)
47628 {
47629 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47630- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47631+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47632 unsigned int rx_frames, rx_riwt, rx_usecs;
47633 unsigned int tx_frames, tx_usecs;
47634
47635diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47636index dbd3850..4e31b38 100644
47637--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47638+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47639@@ -155,12 +155,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
47640 DBGPR("<--xgbe_default_config\n");
47641 }
47642
47643-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
47644-{
47645- xgbe_init_function_ptrs_dev(&pdata->hw_if);
47646- xgbe_init_function_ptrs_desc(&pdata->desc_if);
47647-}
47648-
47649 static int xgbe_probe(struct platform_device *pdev)
47650 {
47651 struct xgbe_prv_data *pdata;
47652@@ -281,9 +275,8 @@ static int xgbe_probe(struct platform_device *pdev)
47653 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
47654
47655 /* Set all the function pointers */
47656- xgbe_init_all_fptrs(pdata);
47657- hw_if = &pdata->hw_if;
47658- desc_if = &pdata->desc_if;
47659+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
47660+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
47661
47662 /* Issue software reset to device */
47663 hw_if->exit(pdata);
47664diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47665index 363b210..b241389 100644
47666--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47667+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47668@@ -126,7 +126,7 @@
47669 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
47670 {
47671 struct xgbe_prv_data *pdata = mii->priv;
47672- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47673+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47674 int mmd_data;
47675
47676 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
47677@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
47678 u16 mmd_val)
47679 {
47680 struct xgbe_prv_data *pdata = mii->priv;
47681- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47682+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47683 int mmd_data = mmd_val;
47684
47685 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
47686diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47687index a1bf9d1c..84adcab 100644
47688--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47689+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47690@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
47691 tstamp_cc);
47692 u64 nsec;
47693
47694- nsec = pdata->hw_if.get_tstamp_time(pdata);
47695+ nsec = pdata->hw_if->get_tstamp_time(pdata);
47696
47697 return nsec;
47698 }
47699@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
47700
47701 spin_lock_irqsave(&pdata->tstamp_lock, flags);
47702
47703- pdata->hw_if.update_tstamp_addend(pdata, addend);
47704+ pdata->hw_if->update_tstamp_addend(pdata, addend);
47705
47706 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
47707
47708diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
47709index f9ec762..988c969 100644
47710--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
47711+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
47712@@ -668,8 +668,8 @@ struct xgbe_prv_data {
47713 int dev_irq;
47714 unsigned int per_channel_irq;
47715
47716- struct xgbe_hw_if hw_if;
47717- struct xgbe_desc_if desc_if;
47718+ const struct xgbe_hw_if *hw_if;
47719+ const struct xgbe_desc_if *desc_if;
47720
47721 /* AXI DMA settings */
47722 unsigned int axdomain;
47723@@ -787,6 +787,9 @@ struct xgbe_prv_data {
47724 #endif
47725 };
47726
47727+extern const struct xgbe_hw_if default_xgbe_hw_if;
47728+extern const struct xgbe_desc_if default_xgbe_desc_if;
47729+
47730 /* Function prototypes*/
47731
47732 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
47733diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47734index adcacda..fa6e0ae 100644
47735--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47736+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47737@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
47738 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
47739 {
47740 /* RX_MODE controlling object */
47741- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
47742+ bnx2x_init_rx_mode_obj(bp);
47743
47744 /* multicast configuration controlling object */
47745 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
47746diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47747index 07cdf9b..b08ecc7 100644
47748--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47749+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47750@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
47751 return rc;
47752 }
47753
47754-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47755- struct bnx2x_rx_mode_obj *o)
47756+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
47757 {
47758 if (CHIP_IS_E1x(bp)) {
47759- o->wait_comp = bnx2x_empty_rx_mode_wait;
47760- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
47761+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
47762+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
47763 } else {
47764- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
47765- o->config_rx_mode = bnx2x_set_rx_mode_e2;
47766+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
47767+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
47768 }
47769 }
47770
47771diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47772index 86baecb..ff3bb46 100644
47773--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47774+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47775@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
47776
47777 /********************* RX MODE ****************/
47778
47779-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47780- struct bnx2x_rx_mode_obj *o);
47781+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
47782
47783 /**
47784 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
47785diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
47786index 31c9f82..e65e986 100644
47787--- a/drivers/net/ethernet/broadcom/tg3.h
47788+++ b/drivers/net/ethernet/broadcom/tg3.h
47789@@ -150,6 +150,7 @@
47790 #define CHIPREV_ID_5750_A0 0x4000
47791 #define CHIPREV_ID_5750_A1 0x4001
47792 #define CHIPREV_ID_5750_A3 0x4003
47793+#define CHIPREV_ID_5750_C1 0x4201
47794 #define CHIPREV_ID_5750_C2 0x4202
47795 #define CHIPREV_ID_5752_A0_HW 0x5000
47796 #define CHIPREV_ID_5752_A0 0x6000
47797diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
47798index 903466e..b285864 100644
47799--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
47800+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
47801@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
47802 }
47803
47804 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
47805- bna_cb_ioceth_enable,
47806- bna_cb_ioceth_disable,
47807- bna_cb_ioceth_hbfail,
47808- bna_cb_ioceth_reset
47809+ .enable_cbfn = bna_cb_ioceth_enable,
47810+ .disable_cbfn = bna_cb_ioceth_disable,
47811+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
47812+ .reset_cbfn = bna_cb_ioceth_reset
47813 };
47814
47815 static void bna_attr_init(struct bna_ioceth *ioceth)
47816diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47817index 8cffcdf..aadf043 100644
47818--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47819+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47820@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
47821 */
47822 struct l2t_skb_cb {
47823 arp_failure_handler_func arp_failure_handler;
47824-};
47825+} __no_const;
47826
47827 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
47828
47829diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47830index ccf3436..b720d77 100644
47831--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47832+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47833@@ -2277,7 +2277,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
47834
47835 int i;
47836 struct adapter *ap = netdev2adap(dev);
47837- static const unsigned int *reg_ranges;
47838+ const unsigned int *reg_ranges;
47839 int arr_size = 0, buf_size = 0;
47840
47841 if (is_t4(ap->params.chip)) {
47842diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
47843index badff18..e15c4ec 100644
47844--- a/drivers/net/ethernet/dec/tulip/de4x5.c
47845+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
47846@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47847 for (i=0; i<ETH_ALEN; i++) {
47848 tmp.addr[i] = dev->dev_addr[i];
47849 }
47850- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47851+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47852 break;
47853
47854 case DE4X5_SET_HWADDR: /* Set the hardware address */
47855@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47856 spin_lock_irqsave(&lp->lock, flags);
47857 memcpy(&statbuf, &lp->pktStats, ioc->len);
47858 spin_unlock_irqrestore(&lp->lock, flags);
47859- if (copy_to_user(ioc->data, &statbuf, ioc->len))
47860+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
47861 return -EFAULT;
47862 break;
47863 }
47864diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
47865index d48806b..41cd80f 100644
47866--- a/drivers/net/ethernet/emulex/benet/be_main.c
47867+++ b/drivers/net/ethernet/emulex/benet/be_main.c
47868@@ -537,7 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
47869
47870 if (wrapped)
47871 newacc += 65536;
47872- ACCESS_ONCE(*acc) = newacc;
47873+ ACCESS_ONCE_RW(*acc) = newacc;
47874 }
47875
47876 static void populate_erx_stats(struct be_adapter *adapter,
47877diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
47878index 6d0c5d5..55be363 100644
47879--- a/drivers/net/ethernet/faraday/ftgmac100.c
47880+++ b/drivers/net/ethernet/faraday/ftgmac100.c
47881@@ -30,6 +30,8 @@
47882 #include <linux/netdevice.h>
47883 #include <linux/phy.h>
47884 #include <linux/platform_device.h>
47885+#include <linux/interrupt.h>
47886+#include <linux/irqreturn.h>
47887 #include <net/ip.h>
47888
47889 #include "ftgmac100.h"
47890diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
47891index dce5f7b..2433466 100644
47892--- a/drivers/net/ethernet/faraday/ftmac100.c
47893+++ b/drivers/net/ethernet/faraday/ftmac100.c
47894@@ -31,6 +31,8 @@
47895 #include <linux/module.h>
47896 #include <linux/netdevice.h>
47897 #include <linux/platform_device.h>
47898+#include <linux/interrupt.h>
47899+#include <linux/irqreturn.h>
47900
47901 #include "ftmac100.h"
47902
47903diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47904index 6d1ec92..4d5d97d 100644
47905--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47906+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47907@@ -407,7 +407,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
47908 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
47909
47910 /* Update the base adjustement value. */
47911- ACCESS_ONCE(pf->ptp_base_adj) = incval;
47912+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
47913 smp_mb(); /* Force the above update. */
47914 }
47915
47916diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47917index 5fd4b52..87aa34b 100644
47918--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47919+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47920@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
47921 }
47922
47923 /* update the base incval used to calculate frequency adjustment */
47924- ACCESS_ONCE(adapter->base_incval) = incval;
47925+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
47926 smp_mb();
47927
47928 /* need lock to prevent incorrect read while modifying cyclecounter */
47929diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47930index e3357bf..d4d5348 100644
47931--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47932+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47933@@ -466,8 +466,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
47934 wmb();
47935
47936 /* we want to dirty this cache line once */
47937- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
47938- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
47939+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
47940+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
47941
47942 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
47943
47944diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
47945index 2bbd01f..e8baa64 100644
47946--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
47947+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
47948@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
47949 struct __vxge_hw_fifo *fifo;
47950 struct vxge_hw_fifo_config *config;
47951 u32 txdl_size, txdl_per_memblock;
47952- struct vxge_hw_mempool_cbs fifo_mp_callback;
47953+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
47954+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
47955+ };
47956+
47957 struct __vxge_hw_virtualpath *vpath;
47958
47959 if ((vp == NULL) || (attr == NULL)) {
47960@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
47961 goto exit;
47962 }
47963
47964- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
47965-
47966 fifo->mempool =
47967 __vxge_hw_mempool_create(vpath->hldev,
47968 fifo->config->memblock_size,
47969diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47970index 2bb48d5..d1a865d 100644
47971--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47972+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47973@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
47974 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
47975 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
47976 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
47977- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
47978+ pax_open_kernel();
47979+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
47980+ pax_close_kernel();
47981 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47982 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
47983 max_tx_rings = QLCNIC_MAX_TX_RINGS;
47984diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47985index be7d7a6..a8983f8 100644
47986--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47987+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47988@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
47989 case QLCNIC_NON_PRIV_FUNC:
47990 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
47991 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47992- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
47993+ pax_open_kernel();
47994+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
47995+ pax_close_kernel();
47996 break;
47997 case QLCNIC_PRIV_FUNC:
47998 ahw->op_mode = QLCNIC_PRIV_FUNC;
47999 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48000- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48001+ pax_open_kernel();
48002+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48003+ pax_close_kernel();
48004 break;
48005 case QLCNIC_MGMT_FUNC:
48006 ahw->op_mode = QLCNIC_MGMT_FUNC;
48007 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48008- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48009+ pax_open_kernel();
48010+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48011+ pax_close_kernel();
48012 break;
48013 default:
48014 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48015diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48016index c9f57fb..208bdc1 100644
48017--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48018+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48019@@ -1285,7 +1285,7 @@ flash_temp:
48020 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48021 {
48022 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48023- static const struct qlcnic_dump_operations *fw_dump_ops;
48024+ const struct qlcnic_dump_operations *fw_dump_ops;
48025 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48026 u32 entry_offset, dump, no_entries, buf_offset = 0;
48027 int i, k, ops_cnt, ops_index, dump_size = 0;
48028diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48029index 2e2cf80..ebc796d 100644
48030--- a/drivers/net/ethernet/realtek/r8169.c
48031+++ b/drivers/net/ethernet/realtek/r8169.c
48032@@ -788,22 +788,22 @@ struct rtl8169_private {
48033 struct mdio_ops {
48034 void (*write)(struct rtl8169_private *, int, int);
48035 int (*read)(struct rtl8169_private *, int);
48036- } mdio_ops;
48037+ } __no_const mdio_ops;
48038
48039 struct pll_power_ops {
48040 void (*down)(struct rtl8169_private *);
48041 void (*up)(struct rtl8169_private *);
48042- } pll_power_ops;
48043+ } __no_const pll_power_ops;
48044
48045 struct jumbo_ops {
48046 void (*enable)(struct rtl8169_private *);
48047 void (*disable)(struct rtl8169_private *);
48048- } jumbo_ops;
48049+ } __no_const jumbo_ops;
48050
48051 struct csi_ops {
48052 void (*write)(struct rtl8169_private *, int, int);
48053 u32 (*read)(struct rtl8169_private *, int);
48054- } csi_ops;
48055+ } __no_const csi_ops;
48056
48057 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
48058 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
48059diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
48060index 6b861e3..204ac86 100644
48061--- a/drivers/net/ethernet/sfc/ptp.c
48062+++ b/drivers/net/ethernet/sfc/ptp.c
48063@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
48064 ptp->start.dma_addr);
48065
48066 /* Clear flag that signals MC ready */
48067- ACCESS_ONCE(*start) = 0;
48068+ ACCESS_ONCE_RW(*start) = 0;
48069 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
48070 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
48071 EFX_BUG_ON_PARANOID(rc);
48072diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48073index 08c483b..2c4a553 100644
48074--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48075+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48076@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
48077
48078 writel(value, ioaddr + MMC_CNTRL);
48079
48080- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48081- MMC_CNTRL, value);
48082+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48083+// MMC_CNTRL, value);
48084 }
48085
48086 /* To mask all all interrupts.*/
48087diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
48088index 384ca4f..dd7d4f9 100644
48089--- a/drivers/net/hyperv/hyperv_net.h
48090+++ b/drivers/net/hyperv/hyperv_net.h
48091@@ -171,7 +171,7 @@ struct rndis_device {
48092 enum rndis_device_state state;
48093 bool link_state;
48094 bool link_change;
48095- atomic_t new_req_id;
48096+ atomic_unchecked_t new_req_id;
48097
48098 spinlock_t request_lock;
48099 struct list_head req_list;
48100diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
48101index ec0c40a..c9e42eb 100644
48102--- a/drivers/net/hyperv/rndis_filter.c
48103+++ b/drivers/net/hyperv/rndis_filter.c
48104@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
48105 * template
48106 */
48107 set = &rndis_msg->msg.set_req;
48108- set->req_id = atomic_inc_return(&dev->new_req_id);
48109+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48110
48111 /* Add to the request list */
48112 spin_lock_irqsave(&dev->request_lock, flags);
48113@@ -912,7 +912,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48114
48115 /* Setup the rndis set */
48116 halt = &request->request_msg.msg.halt_req;
48117- halt->req_id = atomic_inc_return(&dev->new_req_id);
48118+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48119
48120 /* Ignore return since this msg is optional. */
48121 rndis_filter_send_request(dev, request);
48122diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
48123index 34f846b..4a0d5b1 100644
48124--- a/drivers/net/ifb.c
48125+++ b/drivers/net/ifb.c
48126@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
48127 return 0;
48128 }
48129
48130-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
48131+static struct rtnl_link_ops ifb_link_ops = {
48132 .kind = "ifb",
48133 .priv_size = sizeof(struct ifb_private),
48134 .setup = ifb_setup,
48135diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48136index 612e073..a9f5eda 100644
48137--- a/drivers/net/macvlan.c
48138+++ b/drivers/net/macvlan.c
48139@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48140 free_nskb:
48141 kfree_skb(nskb);
48142 err:
48143- atomic_long_inc(&skb->dev->rx_dropped);
48144+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48145 }
48146
48147 static void macvlan_flush_sources(struct macvlan_port *port,
48148@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48149 int macvlan_link_register(struct rtnl_link_ops *ops)
48150 {
48151 /* common fields */
48152- ops->priv_size = sizeof(struct macvlan_dev);
48153- ops->validate = macvlan_validate;
48154- ops->maxtype = IFLA_MACVLAN_MAX;
48155- ops->policy = macvlan_policy;
48156- ops->changelink = macvlan_changelink;
48157- ops->get_size = macvlan_get_size;
48158- ops->fill_info = macvlan_fill_info;
48159+ pax_open_kernel();
48160+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48161+ *(void **)&ops->validate = macvlan_validate;
48162+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48163+ *(const void **)&ops->policy = macvlan_policy;
48164+ *(void **)&ops->changelink = macvlan_changelink;
48165+ *(void **)&ops->get_size = macvlan_get_size;
48166+ *(void **)&ops->fill_info = macvlan_fill_info;
48167+ pax_close_kernel();
48168
48169 return rtnl_link_register(ops);
48170 };
48171@@ -1545,7 +1547,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48172 return NOTIFY_DONE;
48173 }
48174
48175-static struct notifier_block macvlan_notifier_block __read_mostly = {
48176+static struct notifier_block macvlan_notifier_block = {
48177 .notifier_call = macvlan_device_event,
48178 };
48179
48180diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48181index 4d050ee..012f6dd 100644
48182--- a/drivers/net/macvtap.c
48183+++ b/drivers/net/macvtap.c
48184@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
48185 dev->tx_queue_len = TUN_READQ_SIZE;
48186 }
48187
48188-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
48189+static struct rtnl_link_ops macvtap_link_ops = {
48190 .kind = "macvtap",
48191 .setup = macvtap_setup,
48192 .newlink = macvtap_newlink,
48193@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48194
48195 ret = 0;
48196 u = q->flags;
48197- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48198+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48199 put_user(u, &ifr->ifr_flags))
48200 ret = -EFAULT;
48201 macvtap_put_vlan(vlan);
48202@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48203 return NOTIFY_DONE;
48204 }
48205
48206-static struct notifier_block macvtap_notifier_block __read_mostly = {
48207+static struct notifier_block macvtap_notifier_block = {
48208 .notifier_call = macvtap_device_event,
48209 };
48210
48211diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
48212index 34924df..a747360 100644
48213--- a/drivers/net/nlmon.c
48214+++ b/drivers/net/nlmon.c
48215@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
48216 return 0;
48217 }
48218
48219-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
48220+static struct rtnl_link_ops nlmon_link_ops = {
48221 .kind = "nlmon",
48222 .priv_size = sizeof(struct nlmon),
48223 .setup = nlmon_setup,
48224diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
48225index 3fc91e8..6c36337 100644
48226--- a/drivers/net/phy/phy_device.c
48227+++ b/drivers/net/phy/phy_device.c
48228@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
48229 * zero on success.
48230 *
48231 */
48232-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48233+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
48234 struct phy_c45_device_ids *c45_ids) {
48235 int phy_reg;
48236 int i, reg_addr;
48237@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48238 * its return value is in turn returned.
48239 *
48240 */
48241-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48242+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
48243 bool is_c45, struct phy_c45_device_ids *c45_ids)
48244 {
48245 int phy_reg;
48246@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48247 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
48248 {
48249 struct phy_c45_device_ids c45_ids = {0};
48250- u32 phy_id = 0;
48251+ int phy_id = 0;
48252 int r;
48253
48254 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
48255diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48256index af034db..1611c0b2 100644
48257--- a/drivers/net/ppp/ppp_generic.c
48258+++ b/drivers/net/ppp/ppp_generic.c
48259@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48260 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48261 struct ppp_stats stats;
48262 struct ppp_comp_stats cstats;
48263- char *vers;
48264
48265 switch (cmd) {
48266 case SIOCGPPPSTATS:
48267@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48268 break;
48269
48270 case SIOCGPPPVER:
48271- vers = PPP_VERSION;
48272- if (copy_to_user(addr, vers, strlen(vers) + 1))
48273+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48274 break;
48275 err = 0;
48276 break;
48277diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48278index 079f7ad..b2a2bfa7 100644
48279--- a/drivers/net/slip/slhc.c
48280+++ b/drivers/net/slip/slhc.c
48281@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48282 register struct tcphdr *thp;
48283 register struct iphdr *ip;
48284 register struct cstate *cs;
48285- int len, hdrlen;
48286+ long len, hdrlen;
48287 unsigned char *cp = icp;
48288
48289 /* We've got a compressed packet; read the change byte */
48290diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48291index 2c087ef..4859007 100644
48292--- a/drivers/net/team/team.c
48293+++ b/drivers/net/team/team.c
48294@@ -2103,7 +2103,7 @@ static unsigned int team_get_num_rx_queues(void)
48295 return TEAM_DEFAULT_NUM_RX_QUEUES;
48296 }
48297
48298-static struct rtnl_link_ops team_link_ops __read_mostly = {
48299+static struct rtnl_link_ops team_link_ops = {
48300 .kind = DRV_NAME,
48301 .priv_size = sizeof(struct team),
48302 .setup = team_setup,
48303@@ -2893,7 +2893,7 @@ static int team_device_event(struct notifier_block *unused,
48304 return NOTIFY_DONE;
48305 }
48306
48307-static struct notifier_block team_notifier_block __read_mostly = {
48308+static struct notifier_block team_notifier_block = {
48309 .notifier_call = team_device_event,
48310 };
48311
48312diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48313index 10f9e40..3515e7e 100644
48314--- a/drivers/net/tun.c
48315+++ b/drivers/net/tun.c
48316@@ -1425,7 +1425,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
48317 return -EINVAL;
48318 }
48319
48320-static struct rtnl_link_ops tun_link_ops __read_mostly = {
48321+static struct rtnl_link_ops tun_link_ops = {
48322 .kind = DRV_NAME,
48323 .priv_size = sizeof(struct tun_struct),
48324 .setup = tun_setup,
48325@@ -1827,7 +1827,7 @@ unlock:
48326 }
48327
48328 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48329- unsigned long arg, int ifreq_len)
48330+ unsigned long arg, size_t ifreq_len)
48331 {
48332 struct tun_file *tfile = file->private_data;
48333 struct tun_struct *tun;
48334@@ -1841,6 +1841,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48335 int le;
48336 int ret;
48337
48338+ if (ifreq_len > sizeof ifr)
48339+ return -EFAULT;
48340+
48341 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48342 if (copy_from_user(&ifr, argp, ifreq_len))
48343 return -EFAULT;
48344diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48345index 9c5aa92..8cd0405 100644
48346--- a/drivers/net/usb/hso.c
48347+++ b/drivers/net/usb/hso.c
48348@@ -71,7 +71,7 @@
48349 #include <asm/byteorder.h>
48350 #include <linux/serial_core.h>
48351 #include <linux/serial.h>
48352-
48353+#include <asm/local.h>
48354
48355 #define MOD_AUTHOR "Option Wireless"
48356 #define MOD_DESCRIPTION "USB High Speed Option driver"
48357@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48358 struct urb *urb;
48359
48360 urb = serial->rx_urb[0];
48361- if (serial->port.count > 0) {
48362+ if (atomic_read(&serial->port.count) > 0) {
48363 count = put_rxbuf_data(urb, serial);
48364 if (count == -1)
48365 return;
48366@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48367 DUMP1(urb->transfer_buffer, urb->actual_length);
48368
48369 /* Anyone listening? */
48370- if (serial->port.count == 0)
48371+ if (atomic_read(&serial->port.count) == 0)
48372 return;
48373
48374 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48375@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48376 tty_port_tty_set(&serial->port, tty);
48377
48378 /* check for port already opened, if not set the termios */
48379- serial->port.count++;
48380- if (serial->port.count == 1) {
48381+ if (atomic_inc_return(&serial->port.count) == 1) {
48382 serial->rx_state = RX_IDLE;
48383 /* Force default termio settings */
48384 _hso_serial_set_termios(tty, NULL);
48385@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48386 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48387 if (result) {
48388 hso_stop_serial_device(serial->parent);
48389- serial->port.count--;
48390+ atomic_dec(&serial->port.count);
48391 kref_put(&serial->parent->ref, hso_serial_ref_free);
48392 }
48393 } else {
48394@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48395
48396 /* reset the rts and dtr */
48397 /* do the actual close */
48398- serial->port.count--;
48399+ atomic_dec(&serial->port.count);
48400
48401- if (serial->port.count <= 0) {
48402- serial->port.count = 0;
48403+ if (atomic_read(&serial->port.count) <= 0) {
48404+ atomic_set(&serial->port.count, 0);
48405 tty_port_tty_set(&serial->port, NULL);
48406 if (!usb_gone)
48407 hso_stop_serial_device(serial->parent);
48408@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48409
48410 /* the actual setup */
48411 spin_lock_irqsave(&serial->serial_lock, flags);
48412- if (serial->port.count)
48413+ if (atomic_read(&serial->port.count))
48414 _hso_serial_set_termios(tty, old);
48415 else
48416 tty->termios = *old;
48417@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
48418 D1("Pending read interrupt on port %d\n", i);
48419 spin_lock(&serial->serial_lock);
48420 if (serial->rx_state == RX_IDLE &&
48421- serial->port.count > 0) {
48422+ atomic_read(&serial->port.count) > 0) {
48423 /* Setup and send a ctrl req read on
48424 * port i */
48425 if (!serial->rx_urb_filled[0]) {
48426@@ -3046,7 +3045,7 @@ static int hso_resume(struct usb_interface *iface)
48427 /* Start all serial ports */
48428 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48429 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48430- if (dev2ser(serial_table[i])->port.count) {
48431+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48432 result =
48433 hso_start_serial_device(serial_table[i], GFP_NOIO);
48434 hso_kick_transmit(dev2ser(serial_table[i]));
48435diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48436index bf405f1..fd847ee 100644
48437--- a/drivers/net/usb/r8152.c
48438+++ b/drivers/net/usb/r8152.c
48439@@ -571,7 +571,7 @@ struct r8152 {
48440 void (*unload)(struct r8152 *);
48441 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
48442 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
48443- } rtl_ops;
48444+ } __no_const rtl_ops;
48445
48446 int intr_interval;
48447 u32 saved_wolopts;
48448diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48449index a2515887..6d13233 100644
48450--- a/drivers/net/usb/sierra_net.c
48451+++ b/drivers/net/usb/sierra_net.c
48452@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48453 /* atomic counter partially included in MAC address to make sure 2 devices
48454 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48455 */
48456-static atomic_t iface_counter = ATOMIC_INIT(0);
48457+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48458
48459 /*
48460 * SYNC Timer Delay definition used to set the expiry time
48461@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48462 dev->net->netdev_ops = &sierra_net_device_ops;
48463
48464 /* change MAC addr to include, ifacenum, and to be unique */
48465- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48466+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48467 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48468
48469 /* we will have to manufacture ethernet headers, prepare template */
48470diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48471index 0ad6c0c..4013638 100644
48472--- a/drivers/net/virtio_net.c
48473+++ b/drivers/net/virtio_net.c
48474@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48475 #define RECEIVE_AVG_WEIGHT 64
48476
48477 /* Minimum alignment for mergeable packet buffers. */
48478-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48479+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48480
48481 #define VIRTNET_DRIVER_VERSION "1.0.0"
48482
48483diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48484index a8c755d..a988b71 100644
48485--- a/drivers/net/vxlan.c
48486+++ b/drivers/net/vxlan.c
48487@@ -2702,7 +2702,7 @@ nla_put_failure:
48488 return -EMSGSIZE;
48489 }
48490
48491-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48492+static struct rtnl_link_ops vxlan_link_ops = {
48493 .kind = "vxlan",
48494 .maxtype = IFLA_VXLAN_MAX,
48495 .policy = vxlan_policy,
48496@@ -2749,7 +2749,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
48497 return NOTIFY_DONE;
48498 }
48499
48500-static struct notifier_block vxlan_notifier_block __read_mostly = {
48501+static struct notifier_block vxlan_notifier_block = {
48502 .notifier_call = vxlan_lowerdev_event,
48503 };
48504
48505diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
48506index 5920c99..ff2e4a5 100644
48507--- a/drivers/net/wan/lmc/lmc_media.c
48508+++ b/drivers/net/wan/lmc/lmc_media.c
48509@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
48510 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
48511
48512 lmc_media_t lmc_ds3_media = {
48513- lmc_ds3_init, /* special media init stuff */
48514- lmc_ds3_default, /* reset to default state */
48515- lmc_ds3_set_status, /* reset status to state provided */
48516- lmc_dummy_set_1, /* set clock source */
48517- lmc_dummy_set2_1, /* set line speed */
48518- lmc_ds3_set_100ft, /* set cable length */
48519- lmc_ds3_set_scram, /* set scrambler */
48520- lmc_ds3_get_link_status, /* get link status */
48521- lmc_dummy_set_1, /* set link status */
48522- lmc_ds3_set_crc_length, /* set CRC length */
48523- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48524- lmc_ds3_watchdog
48525+ .init = lmc_ds3_init, /* special media init stuff */
48526+ .defaults = lmc_ds3_default, /* reset to default state */
48527+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
48528+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
48529+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48530+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
48531+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
48532+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
48533+ .set_link_status = lmc_dummy_set_1, /* set link status */
48534+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
48535+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48536+ .watchdog = lmc_ds3_watchdog
48537 };
48538
48539 lmc_media_t lmc_hssi_media = {
48540- lmc_hssi_init, /* special media init stuff */
48541- lmc_hssi_default, /* reset to default state */
48542- lmc_hssi_set_status, /* reset status to state provided */
48543- lmc_hssi_set_clock, /* set clock source */
48544- lmc_dummy_set2_1, /* set line speed */
48545- lmc_dummy_set_1, /* set cable length */
48546- lmc_dummy_set_1, /* set scrambler */
48547- lmc_hssi_get_link_status, /* get link status */
48548- lmc_hssi_set_link_status, /* set link status */
48549- lmc_hssi_set_crc_length, /* set CRC length */
48550- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48551- lmc_hssi_watchdog
48552+ .init = lmc_hssi_init, /* special media init stuff */
48553+ .defaults = lmc_hssi_default, /* reset to default state */
48554+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
48555+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
48556+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48557+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48558+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48559+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
48560+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
48561+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
48562+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48563+ .watchdog = lmc_hssi_watchdog
48564 };
48565
48566-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
48567- lmc_ssi_default, /* reset to default state */
48568- lmc_ssi_set_status, /* reset status to state provided */
48569- lmc_ssi_set_clock, /* set clock source */
48570- lmc_ssi_set_speed, /* set line speed */
48571- lmc_dummy_set_1, /* set cable length */
48572- lmc_dummy_set_1, /* set scrambler */
48573- lmc_ssi_get_link_status, /* get link status */
48574- lmc_ssi_set_link_status, /* set link status */
48575- lmc_ssi_set_crc_length, /* set CRC length */
48576- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48577- lmc_ssi_watchdog
48578+lmc_media_t lmc_ssi_media = {
48579+ .init = lmc_ssi_init, /* special media init stuff */
48580+ .defaults = lmc_ssi_default, /* reset to default state */
48581+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
48582+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
48583+ .set_speed = lmc_ssi_set_speed, /* set line speed */
48584+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48585+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48586+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
48587+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
48588+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
48589+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48590+ .watchdog = lmc_ssi_watchdog
48591 };
48592
48593 lmc_media_t lmc_t1_media = {
48594- lmc_t1_init, /* special media init stuff */
48595- lmc_t1_default, /* reset to default state */
48596- lmc_t1_set_status, /* reset status to state provided */
48597- lmc_t1_set_clock, /* set clock source */
48598- lmc_dummy_set2_1, /* set line speed */
48599- lmc_dummy_set_1, /* set cable length */
48600- lmc_dummy_set_1, /* set scrambler */
48601- lmc_t1_get_link_status, /* get link status */
48602- lmc_dummy_set_1, /* set link status */
48603- lmc_t1_set_crc_length, /* set CRC length */
48604- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48605- lmc_t1_watchdog
48606+ .init = lmc_t1_init, /* special media init stuff */
48607+ .defaults = lmc_t1_default, /* reset to default state */
48608+ .set_status = lmc_t1_set_status, /* reset status to state provided */
48609+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
48610+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48611+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48612+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48613+ .get_link_status = lmc_t1_get_link_status, /* get link status */
48614+ .set_link_status = lmc_dummy_set_1, /* set link status */
48615+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
48616+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48617+ .watchdog = lmc_t1_watchdog
48618 };
48619
48620 static void
48621diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
48622index feacc3b..5bac0de 100644
48623--- a/drivers/net/wan/z85230.c
48624+++ b/drivers/net/wan/z85230.c
48625@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
48626
48627 struct z8530_irqhandler z8530_sync =
48628 {
48629- z8530_rx,
48630- z8530_tx,
48631- z8530_status
48632+ .rx = z8530_rx,
48633+ .tx = z8530_tx,
48634+ .status = z8530_status
48635 };
48636
48637 EXPORT_SYMBOL(z8530_sync);
48638@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
48639 }
48640
48641 static struct z8530_irqhandler z8530_dma_sync = {
48642- z8530_dma_rx,
48643- z8530_dma_tx,
48644- z8530_dma_status
48645+ .rx = z8530_dma_rx,
48646+ .tx = z8530_dma_tx,
48647+ .status = z8530_dma_status
48648 };
48649
48650 static struct z8530_irqhandler z8530_txdma_sync = {
48651- z8530_rx,
48652- z8530_dma_tx,
48653- z8530_dma_status
48654+ .rx = z8530_rx,
48655+ .tx = z8530_dma_tx,
48656+ .status = z8530_dma_status
48657 };
48658
48659 /**
48660@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
48661
48662 struct z8530_irqhandler z8530_nop=
48663 {
48664- z8530_rx_clear,
48665- z8530_tx_clear,
48666- z8530_status_clear
48667+ .rx = z8530_rx_clear,
48668+ .tx = z8530_tx_clear,
48669+ .status = z8530_status_clear
48670 };
48671
48672
48673diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
48674index 0b60295..b8bfa5b 100644
48675--- a/drivers/net/wimax/i2400m/rx.c
48676+++ b/drivers/net/wimax/i2400m/rx.c
48677@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
48678 if (i2400m->rx_roq == NULL)
48679 goto error_roq_alloc;
48680
48681- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
48682+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
48683 GFP_KERNEL);
48684 if (rd == NULL) {
48685 result = -ENOMEM;
48686diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
48687index e71a2ce..2268d61 100644
48688--- a/drivers/net/wireless/airo.c
48689+++ b/drivers/net/wireless/airo.c
48690@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
48691 struct airo_info *ai = dev->ml_priv;
48692 int ridcode;
48693 int enabled;
48694- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48695+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48696 unsigned char *iobuf;
48697
48698 /* Only super-user can write RIDs */
48699diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
48700index da92bfa..5a9001a 100644
48701--- a/drivers/net/wireless/at76c50x-usb.c
48702+++ b/drivers/net/wireless/at76c50x-usb.c
48703@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
48704 }
48705
48706 /* Convert timeout from the DFU status to jiffies */
48707-static inline unsigned long at76_get_timeout(struct dfu_status *s)
48708+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
48709 {
48710 return msecs_to_jiffies((s->poll_timeout[2] << 16)
48711 | (s->poll_timeout[1] << 8)
48712diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
48713index f1946a6..cd367fb 100644
48714--- a/drivers/net/wireless/ath/ath10k/htc.c
48715+++ b/drivers/net/wireless/ath/ath10k/htc.c
48716@@ -851,7 +851,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
48717 /* registered target arrival callback from the HIF layer */
48718 int ath10k_htc_init(struct ath10k *ar)
48719 {
48720- struct ath10k_hif_cb htc_callbacks;
48721+ static struct ath10k_hif_cb htc_callbacks = {
48722+ .rx_completion = ath10k_htc_rx_completion_handler,
48723+ .tx_completion = ath10k_htc_tx_completion_handler,
48724+ };
48725 struct ath10k_htc_ep *ep = NULL;
48726 struct ath10k_htc *htc = &ar->htc;
48727
48728@@ -860,8 +863,6 @@ int ath10k_htc_init(struct ath10k *ar)
48729 ath10k_htc_reset_endpoint_states(htc);
48730
48731 /* setup HIF layer callbacks */
48732- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
48733- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
48734 htc->ar = ar;
48735
48736 /* Get HIF default pipe for HTC message exchange */
48737diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
48738index 527179c..a890150 100644
48739--- a/drivers/net/wireless/ath/ath10k/htc.h
48740+++ b/drivers/net/wireless/ath/ath10k/htc.h
48741@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
48742
48743 struct ath10k_htc_ops {
48744 void (*target_send_suspend_complete)(struct ath10k *ar);
48745-};
48746+} __no_const;
48747
48748 struct ath10k_htc_ep_ops {
48749 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
48750 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
48751 void (*ep_tx_credits)(struct ath10k *);
48752-};
48753+} __no_const;
48754
48755 /* service connection information */
48756 struct ath10k_htc_svc_conn_req {
48757diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48758index f816909..e56cd8b 100644
48759--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48760+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48761@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48762 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
48763 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
48764
48765- ACCESS_ONCE(ads->ds_link) = i->link;
48766- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
48767+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
48768+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
48769
48770 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
48771 ctl6 = SM(i->keytype, AR_EncrType);
48772@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48773
48774 if ((i->is_first || i->is_last) &&
48775 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
48776- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
48777+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
48778 | set11nTries(i->rates, 1)
48779 | set11nTries(i->rates, 2)
48780 | set11nTries(i->rates, 3)
48781 | (i->dur_update ? AR_DurUpdateEna : 0)
48782 | SM(0, AR_BurstDur);
48783
48784- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
48785+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
48786 | set11nRate(i->rates, 1)
48787 | set11nRate(i->rates, 2)
48788 | set11nRate(i->rates, 3);
48789 } else {
48790- ACCESS_ONCE(ads->ds_ctl2) = 0;
48791- ACCESS_ONCE(ads->ds_ctl3) = 0;
48792+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
48793+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
48794 }
48795
48796 if (!i->is_first) {
48797- ACCESS_ONCE(ads->ds_ctl0) = 0;
48798- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48799- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48800+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
48801+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48802+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48803 return;
48804 }
48805
48806@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48807 break;
48808 }
48809
48810- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48811+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48812 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48813 | SM(i->txpower[0], AR_XmitPower0)
48814 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48815@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48816 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
48817 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
48818
48819- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48820- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48821+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48822+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48823
48824 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
48825 return;
48826
48827- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48828+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48829 | set11nPktDurRTSCTS(i->rates, 1);
48830
48831- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48832+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48833 | set11nPktDurRTSCTS(i->rates, 3);
48834
48835- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48836+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48837 | set11nRateFlags(i->rates, 1)
48838 | set11nRateFlags(i->rates, 2)
48839 | set11nRateFlags(i->rates, 3)
48840 | SM(i->rtscts_rate, AR_RTSCTSRate);
48841
48842- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48843- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48844- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48845+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48846+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48847+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48848 }
48849
48850 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
48851diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48852index da84b70..83e4978 100644
48853--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48854+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48855@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48856 (i->qcu << AR_TxQcuNum_S) | desc_len;
48857
48858 checksum += val;
48859- ACCESS_ONCE(ads->info) = val;
48860+ ACCESS_ONCE_RW(ads->info) = val;
48861
48862 checksum += i->link;
48863- ACCESS_ONCE(ads->link) = i->link;
48864+ ACCESS_ONCE_RW(ads->link) = i->link;
48865
48866 checksum += i->buf_addr[0];
48867- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
48868+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
48869 checksum += i->buf_addr[1];
48870- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
48871+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
48872 checksum += i->buf_addr[2];
48873- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
48874+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
48875 checksum += i->buf_addr[3];
48876- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
48877+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
48878
48879 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
48880- ACCESS_ONCE(ads->ctl3) = val;
48881+ ACCESS_ONCE_RW(ads->ctl3) = val;
48882 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
48883- ACCESS_ONCE(ads->ctl5) = val;
48884+ ACCESS_ONCE_RW(ads->ctl5) = val;
48885 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
48886- ACCESS_ONCE(ads->ctl7) = val;
48887+ ACCESS_ONCE_RW(ads->ctl7) = val;
48888 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
48889- ACCESS_ONCE(ads->ctl9) = val;
48890+ ACCESS_ONCE_RW(ads->ctl9) = val;
48891
48892 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
48893- ACCESS_ONCE(ads->ctl10) = checksum;
48894+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
48895
48896 if (i->is_first || i->is_last) {
48897- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
48898+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
48899 | set11nTries(i->rates, 1)
48900 | set11nTries(i->rates, 2)
48901 | set11nTries(i->rates, 3)
48902 | (i->dur_update ? AR_DurUpdateEna : 0)
48903 | SM(0, AR_BurstDur);
48904
48905- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
48906+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
48907 | set11nRate(i->rates, 1)
48908 | set11nRate(i->rates, 2)
48909 | set11nRate(i->rates, 3);
48910 } else {
48911- ACCESS_ONCE(ads->ctl13) = 0;
48912- ACCESS_ONCE(ads->ctl14) = 0;
48913+ ACCESS_ONCE_RW(ads->ctl13) = 0;
48914+ ACCESS_ONCE_RW(ads->ctl14) = 0;
48915 }
48916
48917 ads->ctl20 = 0;
48918@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48919
48920 ctl17 = SM(i->keytype, AR_EncrType);
48921 if (!i->is_first) {
48922- ACCESS_ONCE(ads->ctl11) = 0;
48923- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
48924- ACCESS_ONCE(ads->ctl15) = 0;
48925- ACCESS_ONCE(ads->ctl16) = 0;
48926- ACCESS_ONCE(ads->ctl17) = ctl17;
48927- ACCESS_ONCE(ads->ctl18) = 0;
48928- ACCESS_ONCE(ads->ctl19) = 0;
48929+ ACCESS_ONCE_RW(ads->ctl11) = 0;
48930+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
48931+ ACCESS_ONCE_RW(ads->ctl15) = 0;
48932+ ACCESS_ONCE_RW(ads->ctl16) = 0;
48933+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
48934+ ACCESS_ONCE_RW(ads->ctl18) = 0;
48935+ ACCESS_ONCE_RW(ads->ctl19) = 0;
48936 return;
48937 }
48938
48939- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
48940+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
48941 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48942 | SM(i->txpower[0], AR_XmitPower0)
48943 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48944@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48945 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
48946 ctl12 |= SM(val, AR_PAPRDChainMask);
48947
48948- ACCESS_ONCE(ads->ctl12) = ctl12;
48949- ACCESS_ONCE(ads->ctl17) = ctl17;
48950+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
48951+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
48952
48953- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
48954+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
48955 | set11nPktDurRTSCTS(i->rates, 1);
48956
48957- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
48958+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
48959 | set11nPktDurRTSCTS(i->rates, 3);
48960
48961- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
48962+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
48963 | set11nRateFlags(i->rates, 1)
48964 | set11nRateFlags(i->rates, 2)
48965 | set11nRateFlags(i->rates, 3)
48966 | SM(i->rtscts_rate, AR_RTSCTSRate);
48967
48968- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
48969+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
48970
48971- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
48972- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
48973- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
48974+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
48975+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
48976+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
48977 }
48978
48979 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
48980diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
48981index 1cbd335..27dfb40 100644
48982--- a/drivers/net/wireless/ath/ath9k/hw.h
48983+++ b/drivers/net/wireless/ath/ath9k/hw.h
48984@@ -640,7 +640,7 @@ struct ath_hw_private_ops {
48985
48986 /* ANI */
48987 void (*ani_cache_ini_regs)(struct ath_hw *ah);
48988-};
48989+} __no_const;
48990
48991 /**
48992 * struct ath_spec_scan - parameters for Atheros spectral scan
48993@@ -716,7 +716,7 @@ struct ath_hw_ops {
48994 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
48995 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
48996 #endif
48997-};
48998+} __no_const;
48999
49000 struct ath_nf_limits {
49001 s16 max;
49002diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
49003index 62b0bf4..4ae094c 100644
49004--- a/drivers/net/wireless/ath/ath9k/main.c
49005+++ b/drivers/net/wireless/ath/ath9k/main.c
49006@@ -2546,16 +2546,18 @@ void ath9k_fill_chanctx_ops(void)
49007 if (!ath9k_is_chanctx_enabled())
49008 return;
49009
49010- ath9k_ops.hw_scan = ath9k_hw_scan;
49011- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49012- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49013- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49014- ath9k_ops.add_chanctx = ath9k_add_chanctx;
49015- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49016- ath9k_ops.change_chanctx = ath9k_change_chanctx;
49017- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49018- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49019- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49020+ pax_open_kernel();
49021+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
49022+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49023+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49024+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49025+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
49026+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49027+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
49028+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49029+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49030+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49031+ pax_close_kernel();
49032 }
49033
49034 #endif
49035diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
49036index 058a9f2..d5cb1ba 100644
49037--- a/drivers/net/wireless/b43/phy_lp.c
49038+++ b/drivers/net/wireless/b43/phy_lp.c
49039@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
49040 {
49041 struct ssb_bus *bus = dev->dev->sdev->bus;
49042
49043- static const struct b206x_channel *chandata = NULL;
49044+ const struct b206x_channel *chandata = NULL;
49045 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
49046 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
49047 u16 old_comm15, scale;
49048diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
49049index dc1d20c..f7a4f06 100644
49050--- a/drivers/net/wireless/iwlegacy/3945-mac.c
49051+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
49052@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49053 */
49054 if (il3945_mod_params.disable_hw_scan) {
49055 D_INFO("Disabling hw_scan\n");
49056- il3945_mac_ops.hw_scan = NULL;
49057+ pax_open_kernel();
49058+ *(void **)&il3945_mac_ops.hw_scan = NULL;
49059+ pax_close_kernel();
49060 }
49061
49062 D_INFO("*** LOAD DRIVER ***\n");
49063diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49064index 0ffb6ff..c0b7f0e 100644
49065--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49066+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49067@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
49068 {
49069 struct iwl_priv *priv = file->private_data;
49070 char buf[64];
49071- int buf_size;
49072+ size_t buf_size;
49073 u32 offset, len;
49074
49075 memset(buf, 0, sizeof(buf));
49076@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
49077 struct iwl_priv *priv = file->private_data;
49078
49079 char buf[8];
49080- int buf_size;
49081+ size_t buf_size;
49082 u32 reset_flag;
49083
49084 memset(buf, 0, sizeof(buf));
49085@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49086 {
49087 struct iwl_priv *priv = file->private_data;
49088 char buf[8];
49089- int buf_size;
49090+ size_t buf_size;
49091 int ht40;
49092
49093 memset(buf, 0, sizeof(buf));
49094@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49095 {
49096 struct iwl_priv *priv = file->private_data;
49097 char buf[8];
49098- int buf_size;
49099+ size_t buf_size;
49100 int value;
49101
49102 memset(buf, 0, sizeof(buf));
49103@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49104 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49105 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49106
49107-static const char *fmt_value = " %-30s %10u\n";
49108-static const char *fmt_hex = " %-30s 0x%02X\n";
49109-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49110-static const char *fmt_header =
49111+static const char fmt_value[] = " %-30s %10u\n";
49112+static const char fmt_hex[] = " %-30s 0x%02X\n";
49113+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49114+static const char fmt_header[] =
49115 "%-32s current cumulative delta max\n";
49116
49117 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49118@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49119 {
49120 struct iwl_priv *priv = file->private_data;
49121 char buf[8];
49122- int buf_size;
49123+ size_t buf_size;
49124 int clear;
49125
49126 memset(buf, 0, sizeof(buf));
49127@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49128 {
49129 struct iwl_priv *priv = file->private_data;
49130 char buf[8];
49131- int buf_size;
49132+ size_t buf_size;
49133 int trace;
49134
49135 memset(buf, 0, sizeof(buf));
49136@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49137 {
49138 struct iwl_priv *priv = file->private_data;
49139 char buf[8];
49140- int buf_size;
49141+ size_t buf_size;
49142 int missed;
49143
49144 memset(buf, 0, sizeof(buf));
49145@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49146
49147 struct iwl_priv *priv = file->private_data;
49148 char buf[8];
49149- int buf_size;
49150+ size_t buf_size;
49151 int plcp;
49152
49153 memset(buf, 0, sizeof(buf));
49154@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49155
49156 struct iwl_priv *priv = file->private_data;
49157 char buf[8];
49158- int buf_size;
49159+ size_t buf_size;
49160 int flush;
49161
49162 memset(buf, 0, sizeof(buf));
49163@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49164
49165 struct iwl_priv *priv = file->private_data;
49166 char buf[8];
49167- int buf_size;
49168+ size_t buf_size;
49169 int rts;
49170
49171 if (!priv->cfg->ht_params)
49172@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49173 {
49174 struct iwl_priv *priv = file->private_data;
49175 char buf[8];
49176- int buf_size;
49177+ size_t buf_size;
49178
49179 memset(buf, 0, sizeof(buf));
49180 buf_size = min(count, sizeof(buf) - 1);
49181@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49182 struct iwl_priv *priv = file->private_data;
49183 u32 event_log_flag;
49184 char buf[8];
49185- int buf_size;
49186+ size_t buf_size;
49187
49188 /* check that the interface is up */
49189 if (!iwl_is_ready(priv))
49190@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49191 struct iwl_priv *priv = file->private_data;
49192 char buf[8];
49193 u32 calib_disabled;
49194- int buf_size;
49195+ size_t buf_size;
49196
49197 memset(buf, 0, sizeof(buf));
49198 buf_size = min(count, sizeof(buf) - 1);
49199diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49200index 523fe0c..0d9473b 100644
49201--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49202+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49203@@ -1781,7 +1781,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49204 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49205
49206 char buf[8];
49207- int buf_size;
49208+ size_t buf_size;
49209 u32 reset_flag;
49210
49211 memset(buf, 0, sizeof(buf));
49212@@ -1802,7 +1802,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49213 {
49214 struct iwl_trans *trans = file->private_data;
49215 char buf[8];
49216- int buf_size;
49217+ size_t buf_size;
49218 int csr;
49219
49220 memset(buf, 0, sizeof(buf));
49221diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49222index ef58a88..fafa731 100644
49223--- a/drivers/net/wireless/mac80211_hwsim.c
49224+++ b/drivers/net/wireless/mac80211_hwsim.c
49225@@ -3066,20 +3066,20 @@ static int __init init_mac80211_hwsim(void)
49226 if (channels < 1)
49227 return -EINVAL;
49228
49229- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49230- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49231- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49232- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49233- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49234- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49235- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49236- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49237- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49238- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49239- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49240- mac80211_hwsim_assign_vif_chanctx;
49241- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49242- mac80211_hwsim_unassign_vif_chanctx;
49243+ pax_open_kernel();
49244+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49245+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49246+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49247+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49248+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49249+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49250+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49251+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49252+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49253+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49254+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49255+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49256+ pax_close_kernel();
49257
49258 spin_lock_init(&hwsim_radio_lock);
49259 INIT_LIST_HEAD(&hwsim_radios);
49260diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49261index 1a4facd..a2ecbbd 100644
49262--- a/drivers/net/wireless/rndis_wlan.c
49263+++ b/drivers/net/wireless/rndis_wlan.c
49264@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49265
49266 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49267
49268- if (rts_threshold < 0 || rts_threshold > 2347)
49269+ if (rts_threshold > 2347)
49270 rts_threshold = 2347;
49271
49272 tmp = cpu_to_le32(rts_threshold);
49273diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49274index 9bb398b..b0cc047 100644
49275--- a/drivers/net/wireless/rt2x00/rt2x00.h
49276+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49277@@ -375,7 +375,7 @@ struct rt2x00_intf {
49278 * for hardware which doesn't support hardware
49279 * sequence counting.
49280 */
49281- atomic_t seqno;
49282+ atomic_unchecked_t seqno;
49283 };
49284
49285 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49286diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49287index 66ff364..3ce34f7 100644
49288--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49289+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49290@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49291 * sequence counter given by mac80211.
49292 */
49293 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49294- seqno = atomic_add_return(0x10, &intf->seqno);
49295+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49296 else
49297- seqno = atomic_read(&intf->seqno);
49298+ seqno = atomic_read_unchecked(&intf->seqno);
49299
49300 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49301 hdr->seq_ctrl |= cpu_to_le16(seqno);
49302diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49303index b661f896..ddf7d2b 100644
49304--- a/drivers/net/wireless/ti/wl1251/sdio.c
49305+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49306@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49307
49308 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49309
49310- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49311- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49312+ pax_open_kernel();
49313+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49314+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49315+ pax_close_kernel();
49316
49317 wl1251_info("using dedicated interrupt line");
49318 } else {
49319- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49320- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49321+ pax_open_kernel();
49322+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49323+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49324+ pax_close_kernel();
49325
49326 wl1251_info("using SDIO interrupt");
49327 }
49328diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49329index d6d0d6d..60c23a0 100644
49330--- a/drivers/net/wireless/ti/wl12xx/main.c
49331+++ b/drivers/net/wireless/ti/wl12xx/main.c
49332@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49333 sizeof(wl->conf.mem));
49334
49335 /* read data preparation is only needed by wl127x */
49336- wl->ops->prepare_read = wl127x_prepare_read;
49337+ pax_open_kernel();
49338+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49339+ pax_close_kernel();
49340
49341 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49342 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49343@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49344 sizeof(wl->conf.mem));
49345
49346 /* read data preparation is only needed by wl127x */
49347- wl->ops->prepare_read = wl127x_prepare_read;
49348+ pax_open_kernel();
49349+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49350+ pax_close_kernel();
49351
49352 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49353 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49354diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49355index 8e56261..9140678 100644
49356--- a/drivers/net/wireless/ti/wl18xx/main.c
49357+++ b/drivers/net/wireless/ti/wl18xx/main.c
49358@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49359 }
49360
49361 if (!checksum_param) {
49362- wl18xx_ops.set_rx_csum = NULL;
49363- wl18xx_ops.init_vif = NULL;
49364+ pax_open_kernel();
49365+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49366+ *(void **)&wl18xx_ops.init_vif = NULL;
49367+ pax_close_kernel();
49368 }
49369
49370 /* Enable 11a Band only if we have 5G antennas */
49371diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49372index a912dc0..a8225ba 100644
49373--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49374+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49375@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49376 {
49377 struct zd_usb *usb = urb->context;
49378 struct zd_usb_interrupt *intr = &usb->intr;
49379- int len;
49380+ unsigned int len;
49381 u16 int_num;
49382
49383 ZD_ASSERT(in_interrupt());
49384diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49385index ce2e2cf..f81e500 100644
49386--- a/drivers/nfc/nfcwilink.c
49387+++ b/drivers/nfc/nfcwilink.c
49388@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49389
49390 static int nfcwilink_probe(struct platform_device *pdev)
49391 {
49392- static struct nfcwilink *drv;
49393+ struct nfcwilink *drv;
49394 int rc;
49395 __u32 protocols;
49396
49397diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
49398index f2596c8..50d53af 100644
49399--- a/drivers/nfc/st21nfca/st21nfca.c
49400+++ b/drivers/nfc/st21nfca/st21nfca.c
49401@@ -559,7 +559,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
49402 goto exit;
49403 }
49404
49405- gate = uid_skb->data;
49406+ memcpy(gate, uid_skb->data, uid_skb->len);
49407 *len = uid_skb->len;
49408 exit:
49409 kfree_skb(uid_skb);
49410diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
49411index 5100742..6ad4e6d 100644
49412--- a/drivers/of/fdt.c
49413+++ b/drivers/of/fdt.c
49414@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
49415 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
49416 return 0;
49417 }
49418- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49419+ pax_open_kernel();
49420+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49421+ pax_close_kernel();
49422 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
49423 }
49424 late_initcall(of_fdt_raw_init);
49425diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49426index d93b2b6..ae50401 100644
49427--- a/drivers/oprofile/buffer_sync.c
49428+++ b/drivers/oprofile/buffer_sync.c
49429@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49430 if (cookie == NO_COOKIE)
49431 offset = pc;
49432 if (cookie == INVALID_COOKIE) {
49433- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49434+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49435 offset = pc;
49436 }
49437 if (cookie != last_cookie) {
49438@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49439 /* add userspace sample */
49440
49441 if (!mm) {
49442- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49443+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49444 return 0;
49445 }
49446
49447 cookie = lookup_dcookie(mm, s->eip, &offset);
49448
49449 if (cookie == INVALID_COOKIE) {
49450- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49451+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49452 return 0;
49453 }
49454
49455@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49456 /* ignore backtraces if failed to add a sample */
49457 if (state == sb_bt_start) {
49458 state = sb_bt_ignore;
49459- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49460+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49461 }
49462 }
49463 release_mm(mm);
49464diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49465index c0cc4e7..44d4e54 100644
49466--- a/drivers/oprofile/event_buffer.c
49467+++ b/drivers/oprofile/event_buffer.c
49468@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49469 }
49470
49471 if (buffer_pos == buffer_size) {
49472- atomic_inc(&oprofile_stats.event_lost_overflow);
49473+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49474 return;
49475 }
49476
49477diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49478index ed2c3ec..deda85a 100644
49479--- a/drivers/oprofile/oprof.c
49480+++ b/drivers/oprofile/oprof.c
49481@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49482 if (oprofile_ops.switch_events())
49483 return;
49484
49485- atomic_inc(&oprofile_stats.multiplex_counter);
49486+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49487 start_switch_worker();
49488 }
49489
49490diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49491index ee2cfce..7f8f699 100644
49492--- a/drivers/oprofile/oprofile_files.c
49493+++ b/drivers/oprofile/oprofile_files.c
49494@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49495
49496 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49497
49498-static ssize_t timeout_read(struct file *file, char __user *buf,
49499+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49500 size_t count, loff_t *offset)
49501 {
49502 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
49503diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
49504index 59659ce..6c860a0 100644
49505--- a/drivers/oprofile/oprofile_stats.c
49506+++ b/drivers/oprofile/oprofile_stats.c
49507@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
49508 cpu_buf->sample_invalid_eip = 0;
49509 }
49510
49511- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
49512- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
49513- atomic_set(&oprofile_stats.event_lost_overflow, 0);
49514- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
49515- atomic_set(&oprofile_stats.multiplex_counter, 0);
49516+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
49517+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
49518+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
49519+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
49520+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
49521 }
49522
49523
49524diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
49525index 1fc622b..8c48fc3 100644
49526--- a/drivers/oprofile/oprofile_stats.h
49527+++ b/drivers/oprofile/oprofile_stats.h
49528@@ -13,11 +13,11 @@
49529 #include <linux/atomic.h>
49530
49531 struct oprofile_stat_struct {
49532- atomic_t sample_lost_no_mm;
49533- atomic_t sample_lost_no_mapping;
49534- atomic_t bt_lost_no_mapping;
49535- atomic_t event_lost_overflow;
49536- atomic_t multiplex_counter;
49537+ atomic_unchecked_t sample_lost_no_mm;
49538+ atomic_unchecked_t sample_lost_no_mapping;
49539+ atomic_unchecked_t bt_lost_no_mapping;
49540+ atomic_unchecked_t event_lost_overflow;
49541+ atomic_unchecked_t multiplex_counter;
49542 };
49543
49544 extern struct oprofile_stat_struct oprofile_stats;
49545diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
49546index 3f49345..c750d0b 100644
49547--- a/drivers/oprofile/oprofilefs.c
49548+++ b/drivers/oprofile/oprofilefs.c
49549@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
49550
49551 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
49552 {
49553- atomic_t *val = file->private_data;
49554- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
49555+ atomic_unchecked_t *val = file->private_data;
49556+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
49557 }
49558
49559
49560@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
49561
49562
49563 int oprofilefs_create_ro_atomic(struct dentry *root,
49564- char const *name, atomic_t *val)
49565+ char const *name, atomic_unchecked_t *val)
49566 {
49567 return __oprofilefs_create_file(root, name,
49568 &atomic_ro_fops, 0444, val);
49569diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
49570index bdef916..88c7dee 100644
49571--- a/drivers/oprofile/timer_int.c
49572+++ b/drivers/oprofile/timer_int.c
49573@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
49574 return NOTIFY_OK;
49575 }
49576
49577-static struct notifier_block __refdata oprofile_cpu_notifier = {
49578+static struct notifier_block oprofile_cpu_notifier = {
49579 .notifier_call = oprofile_cpu_notify,
49580 };
49581
49582diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
49583index 3b47080..6cd05dd 100644
49584--- a/drivers/parport/procfs.c
49585+++ b/drivers/parport/procfs.c
49586@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
49587
49588 *ppos += len;
49589
49590- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
49591+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
49592 }
49593
49594 #ifdef CONFIG_PARPORT_1284
49595@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
49596
49597 *ppos += len;
49598
49599- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
49600+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
49601 }
49602 #endif /* IEEE1284.3 support. */
49603
49604diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
49605index 6ca2399..68d866b 100644
49606--- a/drivers/pci/hotplug/acpiphp_ibm.c
49607+++ b/drivers/pci/hotplug/acpiphp_ibm.c
49608@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
49609 goto init_cleanup;
49610 }
49611
49612- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49613+ pax_open_kernel();
49614+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49615+ pax_close_kernel();
49616 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
49617
49618 return retval;
49619diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
49620index 66b7bbe..26bee78 100644
49621--- a/drivers/pci/hotplug/cpcihp_generic.c
49622+++ b/drivers/pci/hotplug/cpcihp_generic.c
49623@@ -73,7 +73,6 @@ static u16 port;
49624 static unsigned int enum_bit;
49625 static u8 enum_mask;
49626
49627-static struct cpci_hp_controller_ops generic_hpc_ops;
49628 static struct cpci_hp_controller generic_hpc;
49629
49630 static int __init validate_parameters(void)
49631@@ -139,6 +138,10 @@ static int query_enum(void)
49632 return ((value & enum_mask) == enum_mask);
49633 }
49634
49635+static struct cpci_hp_controller_ops generic_hpc_ops = {
49636+ .query_enum = query_enum,
49637+};
49638+
49639 static int __init cpcihp_generic_init(void)
49640 {
49641 int status;
49642@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
49643 pci_dev_put(dev);
49644
49645 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
49646- generic_hpc_ops.query_enum = query_enum;
49647 generic_hpc.ops = &generic_hpc_ops;
49648
49649 status = cpci_hp_register_controller(&generic_hpc);
49650diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
49651index 7ecf34e..effed62 100644
49652--- a/drivers/pci/hotplug/cpcihp_zt5550.c
49653+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
49654@@ -59,7 +59,6 @@
49655 /* local variables */
49656 static bool debug;
49657 static bool poll;
49658-static struct cpci_hp_controller_ops zt5550_hpc_ops;
49659 static struct cpci_hp_controller zt5550_hpc;
49660
49661 /* Primary cPCI bus bridge device */
49662@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
49663 return 0;
49664 }
49665
49666+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
49667+ .query_enum = zt5550_hc_query_enum,
49668+};
49669+
49670 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
49671 {
49672 int status;
49673@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
49674 dbg("returned from zt5550_hc_config");
49675
49676 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
49677- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
49678 zt5550_hpc.ops = &zt5550_hpc_ops;
49679 if (!poll) {
49680 zt5550_hpc.irq = hc_dev->irq;
49681 zt5550_hpc.irq_flags = IRQF_SHARED;
49682 zt5550_hpc.dev_id = hc_dev;
49683
49684- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49685- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49686- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49687+ pax_open_kernel();
49688+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49689+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49690+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49691+ pax_open_kernel();
49692 } else {
49693 info("using ENUM# polling mode");
49694 }
49695diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
49696index 1e08ff8c..3cd145f 100644
49697--- a/drivers/pci/hotplug/cpqphp_nvram.c
49698+++ b/drivers/pci/hotplug/cpqphp_nvram.c
49699@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
49700
49701 void compaq_nvram_init (void __iomem *rom_start)
49702 {
49703+#ifndef CONFIG_PAX_KERNEXEC
49704 if (rom_start)
49705 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
49706+#endif
49707
49708 dbg("int15 entry = %p\n", compaq_int15_entry_point);
49709
49710diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
49711index 56d8486..f26113f 100644
49712--- a/drivers/pci/hotplug/pci_hotplug_core.c
49713+++ b/drivers/pci/hotplug/pci_hotplug_core.c
49714@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
49715 return -EINVAL;
49716 }
49717
49718- slot->ops->owner = owner;
49719- slot->ops->mod_name = mod_name;
49720+ pax_open_kernel();
49721+ *(struct module **)&slot->ops->owner = owner;
49722+ *(const char **)&slot->ops->mod_name = mod_name;
49723+ pax_close_kernel();
49724
49725 mutex_lock(&pci_hp_mutex);
49726 /*
49727diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
49728index 07aa722..84514b4 100644
49729--- a/drivers/pci/hotplug/pciehp_core.c
49730+++ b/drivers/pci/hotplug/pciehp_core.c
49731@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
49732 struct slot *slot = ctrl->slot;
49733 struct hotplug_slot *hotplug = NULL;
49734 struct hotplug_slot_info *info = NULL;
49735- struct hotplug_slot_ops *ops = NULL;
49736+ hotplug_slot_ops_no_const *ops = NULL;
49737 char name[SLOT_NAME_SIZE];
49738 int retval = -ENOMEM;
49739
49740diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
49741index fd60806..ab6c565 100644
49742--- a/drivers/pci/msi.c
49743+++ b/drivers/pci/msi.c
49744@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
49745 {
49746 struct attribute **msi_attrs;
49747 struct attribute *msi_attr;
49748- struct device_attribute *msi_dev_attr;
49749- struct attribute_group *msi_irq_group;
49750+ device_attribute_no_const *msi_dev_attr;
49751+ attribute_group_no_const *msi_irq_group;
49752 const struct attribute_group **msi_irq_groups;
49753 struct msi_desc *entry;
49754 int ret = -ENOMEM;
49755@@ -573,7 +573,7 @@ error_attrs:
49756 count = 0;
49757 msi_attr = msi_attrs[count];
49758 while (msi_attr) {
49759- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
49760+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
49761 kfree(msi_attr->name);
49762 kfree(msi_dev_attr);
49763 ++count;
49764diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
49765index 312f23a..d21181c 100644
49766--- a/drivers/pci/pci-sysfs.c
49767+++ b/drivers/pci/pci-sysfs.c
49768@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
49769 {
49770 /* allocate attribute structure, piggyback attribute name */
49771 int name_len = write_combine ? 13 : 10;
49772- struct bin_attribute *res_attr;
49773+ bin_attribute_no_const *res_attr;
49774 int retval;
49775
49776 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
49777@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
49778 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
49779 {
49780 int retval;
49781- struct bin_attribute *attr;
49782+ bin_attribute_no_const *attr;
49783
49784 /* If the device has VPD, try to expose it in sysfs. */
49785 if (dev->vpd) {
49786@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
49787 {
49788 int retval;
49789 int rom_size = 0;
49790- struct bin_attribute *attr;
49791+ bin_attribute_no_const *attr;
49792
49793 if (!sysfs_initialized)
49794 return -EACCES;
49795diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
49796index d54632a..198c84d 100644
49797--- a/drivers/pci/pci.h
49798+++ b/drivers/pci/pci.h
49799@@ -93,7 +93,7 @@ struct pci_vpd_ops {
49800 struct pci_vpd {
49801 unsigned int len;
49802 const struct pci_vpd_ops *ops;
49803- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
49804+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
49805 };
49806
49807 int pci_vpd_pci22_init(struct pci_dev *dev);
49808diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
49809index e1e7026..d28dd33 100644
49810--- a/drivers/pci/pcie/aspm.c
49811+++ b/drivers/pci/pcie/aspm.c
49812@@ -27,9 +27,9 @@
49813 #define MODULE_PARAM_PREFIX "pcie_aspm."
49814
49815 /* Note: those are not register definitions */
49816-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
49817-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
49818-#define ASPM_STATE_L1 (4) /* L1 state */
49819+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
49820+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
49821+#define ASPM_STATE_L1 (4U) /* L1 state */
49822 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
49823 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
49824
49825diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
49826index 23212f8..65e945b 100644
49827--- a/drivers/pci/probe.c
49828+++ b/drivers/pci/probe.c
49829@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
49830 u16 orig_cmd;
49831 struct pci_bus_region region, inverted_region;
49832
49833- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
49834+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
49835
49836 /* No printks while decoding is disabled! */
49837 if (!dev->mmio_always_on) {
49838diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
49839index 3f155e7..0f4b1f0 100644
49840--- a/drivers/pci/proc.c
49841+++ b/drivers/pci/proc.c
49842@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
49843 static int __init pci_proc_init(void)
49844 {
49845 struct pci_dev *dev = NULL;
49846+
49847+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49848+#ifdef CONFIG_GRKERNSEC_PROC_USER
49849+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
49850+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49851+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49852+#endif
49853+#else
49854 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
49855+#endif
49856 proc_create("devices", 0, proc_bus_pci_dir,
49857 &proc_bus_pci_dev_operations);
49858 proc_initialized = 1;
49859diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
49860index b84fdd6..b89d829 100644
49861--- a/drivers/platform/chrome/chromeos_laptop.c
49862+++ b/drivers/platform/chrome/chromeos_laptop.c
49863@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
49864 .callback = chromeos_laptop_dmi_matched, \
49865 .driver_data = (void *)&board_
49866
49867-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
49868+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
49869 {
49870 .ident = "Samsung Series 5 550",
49871 .matches = {
49872diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
49873index 1e1e594..8fe59c5 100644
49874--- a/drivers/platform/x86/alienware-wmi.c
49875+++ b/drivers/platform/x86/alienware-wmi.c
49876@@ -150,7 +150,7 @@ struct wmax_led_args {
49877 } __packed;
49878
49879 static struct platform_device *platform_device;
49880-static struct device_attribute *zone_dev_attrs;
49881+static device_attribute_no_const *zone_dev_attrs;
49882 static struct attribute **zone_attrs;
49883 static struct platform_zone *zone_data;
49884
49885@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
49886 }
49887 };
49888
49889-static struct attribute_group zone_attribute_group = {
49890+static attribute_group_no_const zone_attribute_group = {
49891 .name = "rgb_zones",
49892 };
49893
49894diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
49895index 7543a56..367ca8ed 100644
49896--- a/drivers/platform/x86/asus-wmi.c
49897+++ b/drivers/platform/x86/asus-wmi.c
49898@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
49899 int err;
49900 u32 retval = -1;
49901
49902+#ifdef CONFIG_GRKERNSEC_KMEM
49903+ return -EPERM;
49904+#endif
49905+
49906 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
49907
49908 if (err < 0)
49909@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
49910 int err;
49911 u32 retval = -1;
49912
49913+#ifdef CONFIG_GRKERNSEC_KMEM
49914+ return -EPERM;
49915+#endif
49916+
49917 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
49918 &retval);
49919
49920@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
49921 union acpi_object *obj;
49922 acpi_status status;
49923
49924+#ifdef CONFIG_GRKERNSEC_KMEM
49925+ return -EPERM;
49926+#endif
49927+
49928 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
49929 1, asus->debug.method_id,
49930 &input, &output);
49931diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
49932index 0859877..1cf7d08 100644
49933--- a/drivers/platform/x86/msi-laptop.c
49934+++ b/drivers/platform/x86/msi-laptop.c
49935@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
49936
49937 if (!quirks->ec_read_only) {
49938 /* allow userland write sysfs file */
49939- dev_attr_bluetooth.store = store_bluetooth;
49940- dev_attr_wlan.store = store_wlan;
49941- dev_attr_threeg.store = store_threeg;
49942- dev_attr_bluetooth.attr.mode |= S_IWUSR;
49943- dev_attr_wlan.attr.mode |= S_IWUSR;
49944- dev_attr_threeg.attr.mode |= S_IWUSR;
49945+ pax_open_kernel();
49946+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
49947+ *(void **)&dev_attr_wlan.store = store_wlan;
49948+ *(void **)&dev_attr_threeg.store = store_threeg;
49949+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
49950+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
49951+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
49952+ pax_close_kernel();
49953 }
49954
49955 /* disable hardware control by fn key */
49956diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
49957index 6d2bac0..ec2b029 100644
49958--- a/drivers/platform/x86/msi-wmi.c
49959+++ b/drivers/platform/x86/msi-wmi.c
49960@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
49961 static void msi_wmi_notify(u32 value, void *context)
49962 {
49963 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
49964- static struct key_entry *key;
49965+ struct key_entry *key;
49966 union acpi_object *obj;
49967 acpi_status status;
49968
49969diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
49970index 6dd1c0e..5d602c7 100644
49971--- a/drivers/platform/x86/sony-laptop.c
49972+++ b/drivers/platform/x86/sony-laptop.c
49973@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
49974 }
49975
49976 /* High speed charging function */
49977-static struct device_attribute *hsc_handle;
49978+static device_attribute_no_const *hsc_handle;
49979
49980 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
49981 struct device_attribute *attr,
49982@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
49983 }
49984
49985 /* low battery function */
49986-static struct device_attribute *lowbatt_handle;
49987+static device_attribute_no_const *lowbatt_handle;
49988
49989 static ssize_t sony_nc_lowbatt_store(struct device *dev,
49990 struct device_attribute *attr,
49991@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
49992 }
49993
49994 /* fan speed function */
49995-static struct device_attribute *fan_handle, *hsf_handle;
49996+static device_attribute_no_const *fan_handle, *hsf_handle;
49997
49998 static ssize_t sony_nc_hsfan_store(struct device *dev,
49999 struct device_attribute *attr,
50000@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
50001 }
50002
50003 /* USB charge function */
50004-static struct device_attribute *uc_handle;
50005+static device_attribute_no_const *uc_handle;
50006
50007 static ssize_t sony_nc_usb_charge_store(struct device *dev,
50008 struct device_attribute *attr,
50009@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
50010 }
50011
50012 /* Panel ID function */
50013-static struct device_attribute *panel_handle;
50014+static device_attribute_no_const *panel_handle;
50015
50016 static ssize_t sony_nc_panelid_show(struct device *dev,
50017 struct device_attribute *attr, char *buffer)
50018@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
50019 }
50020
50021 /* smart connect function */
50022-static struct device_attribute *sc_handle;
50023+static device_attribute_no_const *sc_handle;
50024
50025 static ssize_t sony_nc_smart_conn_store(struct device *dev,
50026 struct device_attribute *attr,
50027diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50028index c3d11fa..f83cded 100644
50029--- a/drivers/platform/x86/thinkpad_acpi.c
50030+++ b/drivers/platform/x86/thinkpad_acpi.c
50031@@ -2092,7 +2092,7 @@ static int hotkey_mask_get(void)
50032 return 0;
50033 }
50034
50035-void static hotkey_mask_warn_incomplete_mask(void)
50036+static void hotkey_mask_warn_incomplete_mask(void)
50037 {
50038 /* log only what the user can fix... */
50039 const u32 wantedmask = hotkey_driver_mask &
50040@@ -2436,10 +2436,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
50041 && !tp_features.bright_unkfw)
50042 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
50043 }
50044+}
50045
50046 #undef TPACPI_COMPARE_KEY
50047 #undef TPACPI_MAY_SEND_KEY
50048-}
50049
50050 /*
50051 * Polling driver
50052diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
50053index 438d4c7..ca8a2fb 100644
50054--- a/drivers/pnp/pnpbios/bioscalls.c
50055+++ b/drivers/pnp/pnpbios/bioscalls.c
50056@@ -59,7 +59,7 @@ do { \
50057 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
50058 } while(0)
50059
50060-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
50061+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
50062 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
50063
50064 /*
50065@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50066
50067 cpu = get_cpu();
50068 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
50069+
50070+ pax_open_kernel();
50071 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
50072+ pax_close_kernel();
50073
50074 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
50075 spin_lock_irqsave(&pnp_bios_lock, flags);
50076@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50077 :"memory");
50078 spin_unlock_irqrestore(&pnp_bios_lock, flags);
50079
50080+ pax_open_kernel();
50081 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
50082+ pax_close_kernel();
50083+
50084 put_cpu();
50085
50086 /* If we get here and this is set then the PnP BIOS faulted on us. */
50087@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
50088 return status;
50089 }
50090
50091-void pnpbios_calls_init(union pnp_bios_install_struct *header)
50092+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
50093 {
50094 int i;
50095
50096@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50097 pnp_bios_callpoint.offset = header->fields.pm16offset;
50098 pnp_bios_callpoint.segment = PNP_CS16;
50099
50100+ pax_open_kernel();
50101+
50102 for_each_possible_cpu(i) {
50103 struct desc_struct *gdt = get_cpu_gdt_table(i);
50104 if (!gdt)
50105@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50106 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50107 (unsigned long)__va(header->fields.pm16dseg));
50108 }
50109+
50110+ pax_close_kernel();
50111 }
50112diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50113index 0c52e2a..3421ab7 100644
50114--- a/drivers/power/pda_power.c
50115+++ b/drivers/power/pda_power.c
50116@@ -37,7 +37,11 @@ static int polling;
50117
50118 #if IS_ENABLED(CONFIG_USB_PHY)
50119 static struct usb_phy *transceiver;
50120-static struct notifier_block otg_nb;
50121+static int otg_handle_notification(struct notifier_block *nb,
50122+ unsigned long event, void *unused);
50123+static struct notifier_block otg_nb = {
50124+ .notifier_call = otg_handle_notification
50125+};
50126 #endif
50127
50128 static struct regulator *ac_draw;
50129@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50130
50131 #if IS_ENABLED(CONFIG_USB_PHY)
50132 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50133- otg_nb.notifier_call = otg_handle_notification;
50134 ret = usb_register_notifier(transceiver, &otg_nb);
50135 if (ret) {
50136 dev_err(dev, "failure to register otg notifier\n");
50137diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50138index cc439fd..8fa30df 100644
50139--- a/drivers/power/power_supply.h
50140+++ b/drivers/power/power_supply.h
50141@@ -16,12 +16,12 @@ struct power_supply;
50142
50143 #ifdef CONFIG_SYSFS
50144
50145-extern void power_supply_init_attrs(struct device_type *dev_type);
50146+extern void power_supply_init_attrs(void);
50147 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50148
50149 #else
50150
50151-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50152+static inline void power_supply_init_attrs(void) {}
50153 #define power_supply_uevent NULL
50154
50155 #endif /* CONFIG_SYSFS */
50156diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50157index 694e8cd..9f03483 100644
50158--- a/drivers/power/power_supply_core.c
50159+++ b/drivers/power/power_supply_core.c
50160@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50161 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50162 EXPORT_SYMBOL_GPL(power_supply_notifier);
50163
50164-static struct device_type power_supply_dev_type;
50165+extern const struct attribute_group *power_supply_attr_groups[];
50166+static struct device_type power_supply_dev_type = {
50167+ .groups = power_supply_attr_groups,
50168+};
50169
50170 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50171 struct power_supply *supply)
50172@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
50173 return PTR_ERR(power_supply_class);
50174
50175 power_supply_class->dev_uevent = power_supply_uevent;
50176- power_supply_init_attrs(&power_supply_dev_type);
50177+ power_supply_init_attrs();
50178
50179 return 0;
50180 }
50181diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50182index 62653f5..d0bb485 100644
50183--- a/drivers/power/power_supply_sysfs.c
50184+++ b/drivers/power/power_supply_sysfs.c
50185@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
50186 .is_visible = power_supply_attr_is_visible,
50187 };
50188
50189-static const struct attribute_group *power_supply_attr_groups[] = {
50190+const struct attribute_group *power_supply_attr_groups[] = {
50191 &power_supply_attr_group,
50192 NULL,
50193 };
50194
50195-void power_supply_init_attrs(struct device_type *dev_type)
50196+void power_supply_init_attrs(void)
50197 {
50198 int i;
50199
50200- dev_type->groups = power_supply_attr_groups;
50201-
50202 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50203 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50204 }
50205diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50206index 84419af..268ede8 100644
50207--- a/drivers/powercap/powercap_sys.c
50208+++ b/drivers/powercap/powercap_sys.c
50209@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50210 struct device_attribute name_attr;
50211 };
50212
50213+static ssize_t show_constraint_name(struct device *dev,
50214+ struct device_attribute *dev_attr,
50215+ char *buf);
50216+
50217 static struct powercap_constraint_attr
50218- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50219+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50220+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50221+ .power_limit_attr = {
50222+ .attr = {
50223+ .name = NULL,
50224+ .mode = S_IWUSR | S_IRUGO
50225+ },
50226+ .show = show_constraint_power_limit_uw,
50227+ .store = store_constraint_power_limit_uw
50228+ },
50229+
50230+ .time_window_attr = {
50231+ .attr = {
50232+ .name = NULL,
50233+ .mode = S_IWUSR | S_IRUGO
50234+ },
50235+ .show = show_constraint_time_window_us,
50236+ .store = store_constraint_time_window_us
50237+ },
50238+
50239+ .max_power_attr = {
50240+ .attr = {
50241+ .name = NULL,
50242+ .mode = S_IRUGO
50243+ },
50244+ .show = show_constraint_max_power_uw,
50245+ .store = NULL
50246+ },
50247+
50248+ .min_power_attr = {
50249+ .attr = {
50250+ .name = NULL,
50251+ .mode = S_IRUGO
50252+ },
50253+ .show = show_constraint_min_power_uw,
50254+ .store = NULL
50255+ },
50256+
50257+ .max_time_window_attr = {
50258+ .attr = {
50259+ .name = NULL,
50260+ .mode = S_IRUGO
50261+ },
50262+ .show = show_constraint_max_time_window_us,
50263+ .store = NULL
50264+ },
50265+
50266+ .min_time_window_attr = {
50267+ .attr = {
50268+ .name = NULL,
50269+ .mode = S_IRUGO
50270+ },
50271+ .show = show_constraint_min_time_window_us,
50272+ .store = NULL
50273+ },
50274+
50275+ .name_attr = {
50276+ .attr = {
50277+ .name = NULL,
50278+ .mode = S_IRUGO
50279+ },
50280+ .show = show_constraint_name,
50281+ .store = NULL
50282+ }
50283+ }
50284+};
50285
50286 /* A list of powercap control_types */
50287 static LIST_HEAD(powercap_cntrl_list);
50288@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50289 }
50290
50291 static int create_constraint_attribute(int id, const char *name,
50292- int mode,
50293- struct device_attribute *dev_attr,
50294- ssize_t (*show)(struct device *,
50295- struct device_attribute *, char *),
50296- ssize_t (*store)(struct device *,
50297- struct device_attribute *,
50298- const char *, size_t)
50299- )
50300+ struct device_attribute *dev_attr)
50301 {
50302+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50303
50304- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50305- id, name);
50306- if (!dev_attr->attr.name)
50307+ if (!name)
50308 return -ENOMEM;
50309- dev_attr->attr.mode = mode;
50310- dev_attr->show = show;
50311- dev_attr->store = store;
50312+
50313+ pax_open_kernel();
50314+ *(const char **)&dev_attr->attr.name = name;
50315+ pax_close_kernel();
50316
50317 return 0;
50318 }
50319@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50320
50321 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50322 ret = create_constraint_attribute(i, "power_limit_uw",
50323- S_IWUSR | S_IRUGO,
50324- &constraint_attrs[i].power_limit_attr,
50325- show_constraint_power_limit_uw,
50326- store_constraint_power_limit_uw);
50327+ &constraint_attrs[i].power_limit_attr);
50328 if (ret)
50329 goto err_alloc;
50330 ret = create_constraint_attribute(i, "time_window_us",
50331- S_IWUSR | S_IRUGO,
50332- &constraint_attrs[i].time_window_attr,
50333- show_constraint_time_window_us,
50334- store_constraint_time_window_us);
50335+ &constraint_attrs[i].time_window_attr);
50336 if (ret)
50337 goto err_alloc;
50338- ret = create_constraint_attribute(i, "name", S_IRUGO,
50339- &constraint_attrs[i].name_attr,
50340- show_constraint_name,
50341- NULL);
50342+ ret = create_constraint_attribute(i, "name",
50343+ &constraint_attrs[i].name_attr);
50344 if (ret)
50345 goto err_alloc;
50346- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50347- &constraint_attrs[i].max_power_attr,
50348- show_constraint_max_power_uw,
50349- NULL);
50350+ ret = create_constraint_attribute(i, "max_power_uw",
50351+ &constraint_attrs[i].max_power_attr);
50352 if (ret)
50353 goto err_alloc;
50354- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50355- &constraint_attrs[i].min_power_attr,
50356- show_constraint_min_power_uw,
50357- NULL);
50358+ ret = create_constraint_attribute(i, "min_power_uw",
50359+ &constraint_attrs[i].min_power_attr);
50360 if (ret)
50361 goto err_alloc;
50362 ret = create_constraint_attribute(i, "max_time_window_us",
50363- S_IRUGO,
50364- &constraint_attrs[i].max_time_window_attr,
50365- show_constraint_max_time_window_us,
50366- NULL);
50367+ &constraint_attrs[i].max_time_window_attr);
50368 if (ret)
50369 goto err_alloc;
50370 ret = create_constraint_attribute(i, "min_time_window_us",
50371- S_IRUGO,
50372- &constraint_attrs[i].min_time_window_attr,
50373- show_constraint_min_time_window_us,
50374- NULL);
50375+ &constraint_attrs[i].min_time_window_attr);
50376 if (ret)
50377 goto err_alloc;
50378
50379@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50380 power_zone->zone_dev_attrs[count++] =
50381 &dev_attr_max_energy_range_uj.attr;
50382 if (power_zone->ops->get_energy_uj) {
50383+ pax_open_kernel();
50384 if (power_zone->ops->reset_energy_uj)
50385- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50386+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50387 else
50388- dev_attr_energy_uj.attr.mode = S_IRUGO;
50389+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50390+ pax_close_kernel();
50391 power_zone->zone_dev_attrs[count++] =
50392 &dev_attr_energy_uj.attr;
50393 }
50394diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50395index 9c5d414..c7900ce 100644
50396--- a/drivers/ptp/ptp_private.h
50397+++ b/drivers/ptp/ptp_private.h
50398@@ -51,7 +51,7 @@ struct ptp_clock {
50399 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50400 wait_queue_head_t tsev_wq;
50401 int defunct; /* tells readers to go away when clock is being removed */
50402- struct device_attribute *pin_dev_attr;
50403+ device_attribute_no_const *pin_dev_attr;
50404 struct attribute **pin_attr;
50405 struct attribute_group pin_attr_group;
50406 };
50407diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50408index 302e626..12579af 100644
50409--- a/drivers/ptp/ptp_sysfs.c
50410+++ b/drivers/ptp/ptp_sysfs.c
50411@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50412 goto no_pin_attr;
50413
50414 for (i = 0; i < n_pins; i++) {
50415- struct device_attribute *da = &ptp->pin_dev_attr[i];
50416+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50417 sysfs_attr_init(&da->attr);
50418 da->attr.name = info->pin_config[i].name;
50419 da->attr.mode = 0644;
50420diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50421index a5761d0..a2a4540 100644
50422--- a/drivers/regulator/core.c
50423+++ b/drivers/regulator/core.c
50424@@ -3591,7 +3591,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50425 {
50426 const struct regulation_constraints *constraints = NULL;
50427 const struct regulator_init_data *init_data;
50428- static atomic_t regulator_no = ATOMIC_INIT(0);
50429+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
50430 struct regulator_dev *rdev;
50431 struct device *dev;
50432 int ret, i;
50433@@ -3665,7 +3665,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50434 rdev->dev.class = &regulator_class;
50435 rdev->dev.parent = dev;
50436 dev_set_name(&rdev->dev, "regulator.%d",
50437- atomic_inc_return(&regulator_no) - 1);
50438+ atomic_inc_return_unchecked(&regulator_no) - 1);
50439 ret = device_register(&rdev->dev);
50440 if (ret != 0) {
50441 put_device(&rdev->dev);
50442diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50443index 7eee2ca..4024513 100644
50444--- a/drivers/regulator/max8660.c
50445+++ b/drivers/regulator/max8660.c
50446@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50447 max8660->shadow_regs[MAX8660_OVER1] = 5;
50448 } else {
50449 /* Otherwise devices can be toggled via software */
50450- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50451- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50452+ pax_open_kernel();
50453+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50454+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50455+ pax_close_kernel();
50456 }
50457
50458 /*
50459diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50460index c3d55c2..0dddfe6 100644
50461--- a/drivers/regulator/max8973-regulator.c
50462+++ b/drivers/regulator/max8973-regulator.c
50463@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50464 if (!pdata || !pdata->enable_ext_control) {
50465 max->desc.enable_reg = MAX8973_VOUT;
50466 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50467- max->ops.enable = regulator_enable_regmap;
50468- max->ops.disable = regulator_disable_regmap;
50469- max->ops.is_enabled = regulator_is_enabled_regmap;
50470+ pax_open_kernel();
50471+ *(void **)&max->ops.enable = regulator_enable_regmap;
50472+ *(void **)&max->ops.disable = regulator_disable_regmap;
50473+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50474+ pax_close_kernel();
50475 }
50476
50477 if (pdata) {
50478diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50479index 0d17c92..a29f627 100644
50480--- a/drivers/regulator/mc13892-regulator.c
50481+++ b/drivers/regulator/mc13892-regulator.c
50482@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50483 mc13xxx_unlock(mc13892);
50484
50485 /* update mc13892_vcam ops */
50486- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50487+ pax_open_kernel();
50488+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50489 sizeof(struct regulator_ops));
50490- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50491- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50492+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50493+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50494+ pax_close_kernel();
50495 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
50496
50497 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50498diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
50499index 5b2e761..c8c8a4a 100644
50500--- a/drivers/rtc/rtc-cmos.c
50501+++ b/drivers/rtc/rtc-cmos.c
50502@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
50503 hpet_rtc_timer_init();
50504
50505 /* export at least the first block of NVRAM */
50506- nvram.size = address_space - NVRAM_OFFSET;
50507+ pax_open_kernel();
50508+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
50509+ pax_close_kernel();
50510 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
50511 if (retval < 0) {
50512 dev_dbg(dev, "can't create nvram file? %d\n", retval);
50513diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
50514index d049393..bb20be0 100644
50515--- a/drivers/rtc/rtc-dev.c
50516+++ b/drivers/rtc/rtc-dev.c
50517@@ -16,6 +16,7 @@
50518 #include <linux/module.h>
50519 #include <linux/rtc.h>
50520 #include <linux/sched.h>
50521+#include <linux/grsecurity.h>
50522 #include "rtc-core.h"
50523
50524 static dev_t rtc_devt;
50525@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
50526 if (copy_from_user(&tm, uarg, sizeof(tm)))
50527 return -EFAULT;
50528
50529+ gr_log_timechange();
50530+
50531 return rtc_set_time(rtc, &tm);
50532
50533 case RTC_PIE_ON:
50534diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50535index 4ffabb3..1f87fca 100644
50536--- a/drivers/rtc/rtc-ds1307.c
50537+++ b/drivers/rtc/rtc-ds1307.c
50538@@ -107,7 +107,7 @@ struct ds1307 {
50539 u8 offset; /* register's offset */
50540 u8 regs[11];
50541 u16 nvram_offset;
50542- struct bin_attribute *nvram;
50543+ bin_attribute_no_const *nvram;
50544 enum ds_type type;
50545 unsigned long flags;
50546 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
50547diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
50548index 90abb5b..e0bf6dd 100644
50549--- a/drivers/rtc/rtc-m48t59.c
50550+++ b/drivers/rtc/rtc-m48t59.c
50551@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
50552 if (IS_ERR(m48t59->rtc))
50553 return PTR_ERR(m48t59->rtc);
50554
50555- m48t59_nvram_attr.size = pdata->offset;
50556+ pax_open_kernel();
50557+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
50558+ pax_close_kernel();
50559
50560 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
50561 if (ret)
50562diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
50563index e693af6..2e525b6 100644
50564--- a/drivers/scsi/bfa/bfa_fcpim.h
50565+++ b/drivers/scsi/bfa/bfa_fcpim.h
50566@@ -36,7 +36,7 @@ struct bfa_iotag_s {
50567
50568 struct bfa_itn_s {
50569 bfa_isr_func_t isr;
50570-};
50571+} __no_const;
50572
50573 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
50574 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
50575diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
50576index 0f19455..ef7adb5 100644
50577--- a/drivers/scsi/bfa/bfa_fcs.c
50578+++ b/drivers/scsi/bfa/bfa_fcs.c
50579@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
50580 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
50581
50582 static struct bfa_fcs_mod_s fcs_modules[] = {
50583- { bfa_fcs_port_attach, NULL, NULL },
50584- { bfa_fcs_uf_attach, NULL, NULL },
50585- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50586- bfa_fcs_fabric_modexit },
50587+ {
50588+ .attach = bfa_fcs_port_attach,
50589+ .modinit = NULL,
50590+ .modexit = NULL
50591+ },
50592+ {
50593+ .attach = bfa_fcs_uf_attach,
50594+ .modinit = NULL,
50595+ .modexit = NULL
50596+ },
50597+ {
50598+ .attach = bfa_fcs_fabric_attach,
50599+ .modinit = bfa_fcs_fabric_modinit,
50600+ .modexit = bfa_fcs_fabric_modexit
50601+ },
50602 };
50603
50604 /*
50605diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
50606index ff75ef8..2dfe00a 100644
50607--- a/drivers/scsi/bfa/bfa_fcs_lport.c
50608+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
50609@@ -89,15 +89,26 @@ static struct {
50610 void (*offline) (struct bfa_fcs_lport_s *port);
50611 } __port_action[] = {
50612 {
50613- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
50614- bfa_fcs_lport_unknown_offline}, {
50615- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
50616- bfa_fcs_lport_fab_offline}, {
50617- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
50618- bfa_fcs_lport_n2n_offline}, {
50619- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
50620- bfa_fcs_lport_loop_offline},
50621- };
50622+ .init = bfa_fcs_lport_unknown_init,
50623+ .online = bfa_fcs_lport_unknown_online,
50624+ .offline = bfa_fcs_lport_unknown_offline
50625+ },
50626+ {
50627+ .init = bfa_fcs_lport_fab_init,
50628+ .online = bfa_fcs_lport_fab_online,
50629+ .offline = bfa_fcs_lport_fab_offline
50630+ },
50631+ {
50632+ .init = bfa_fcs_lport_n2n_init,
50633+ .online = bfa_fcs_lport_n2n_online,
50634+ .offline = bfa_fcs_lport_n2n_offline
50635+ },
50636+ {
50637+ .init = bfa_fcs_lport_loop_init,
50638+ .online = bfa_fcs_lport_loop_online,
50639+ .offline = bfa_fcs_lport_loop_offline
50640+ },
50641+};
50642
50643 /*
50644 * fcs_port_sm FCS logical port state machine
50645diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
50646index a38aafa0..fe8f03b 100644
50647--- a/drivers/scsi/bfa/bfa_ioc.h
50648+++ b/drivers/scsi/bfa/bfa_ioc.h
50649@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
50650 bfa_ioc_disable_cbfn_t disable_cbfn;
50651 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
50652 bfa_ioc_reset_cbfn_t reset_cbfn;
50653-};
50654+} __no_const;
50655
50656 /*
50657 * IOC event notification mechanism.
50658@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
50659 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
50660 enum bfi_ioc_state fwstate);
50661 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
50662-};
50663+} __no_const;
50664
50665 /*
50666 * Queue element to wait for room in request queue. FIFO order is
50667diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
50668index a14c784..6de6790 100644
50669--- a/drivers/scsi/bfa/bfa_modules.h
50670+++ b/drivers/scsi/bfa/bfa_modules.h
50671@@ -78,12 +78,12 @@ enum {
50672 \
50673 extern struct bfa_module_s hal_mod_ ## __mod; \
50674 struct bfa_module_s hal_mod_ ## __mod = { \
50675- bfa_ ## __mod ## _meminfo, \
50676- bfa_ ## __mod ## _attach, \
50677- bfa_ ## __mod ## _detach, \
50678- bfa_ ## __mod ## _start, \
50679- bfa_ ## __mod ## _stop, \
50680- bfa_ ## __mod ## _iocdisable, \
50681+ .meminfo = bfa_ ## __mod ## _meminfo, \
50682+ .attach = bfa_ ## __mod ## _attach, \
50683+ .detach = bfa_ ## __mod ## _detach, \
50684+ .start = bfa_ ## __mod ## _start, \
50685+ .stop = bfa_ ## __mod ## _stop, \
50686+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
50687 }
50688
50689 #define BFA_CACHELINE_SZ (256)
50690diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
50691index 045c4e1..13de803 100644
50692--- a/drivers/scsi/fcoe/fcoe_sysfs.c
50693+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
50694@@ -33,8 +33,8 @@
50695 */
50696 #include "libfcoe.h"
50697
50698-static atomic_t ctlr_num;
50699-static atomic_t fcf_num;
50700+static atomic_unchecked_t ctlr_num;
50701+static atomic_unchecked_t fcf_num;
50702
50703 /*
50704 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
50705@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
50706 if (!ctlr)
50707 goto out;
50708
50709- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
50710+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
50711 ctlr->f = f;
50712 ctlr->mode = FIP_CONN_TYPE_FABRIC;
50713 INIT_LIST_HEAD(&ctlr->fcfs);
50714@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
50715 fcf->dev.parent = &ctlr->dev;
50716 fcf->dev.bus = &fcoe_bus_type;
50717 fcf->dev.type = &fcoe_fcf_device_type;
50718- fcf->id = atomic_inc_return(&fcf_num) - 1;
50719+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
50720 fcf->state = FCOE_FCF_STATE_UNKNOWN;
50721
50722 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
50723@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
50724 {
50725 int error;
50726
50727- atomic_set(&ctlr_num, 0);
50728- atomic_set(&fcf_num, 0);
50729+ atomic_set_unchecked(&ctlr_num, 0);
50730+ atomic_set_unchecked(&fcf_num, 0);
50731
50732 error = bus_register(&fcoe_bus_type);
50733 if (error)
50734diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
50735index 8bb173e..20236b4 100644
50736--- a/drivers/scsi/hosts.c
50737+++ b/drivers/scsi/hosts.c
50738@@ -42,7 +42,7 @@
50739 #include "scsi_logging.h"
50740
50741
50742-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50743+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50744
50745
50746 static void scsi_host_cls_release(struct device *dev)
50747@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
50748 * subtract one because we increment first then return, but we need to
50749 * know what the next host number was before increment
50750 */
50751- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
50752+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
50753 shost->dma_channel = 0xff;
50754
50755 /* These three are default values which can be overridden */
50756diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
50757index 6bb4611..0203251 100644
50758--- a/drivers/scsi/hpsa.c
50759+++ b/drivers/scsi/hpsa.c
50760@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
50761 struct reply_queue_buffer *rq = &h->reply_queue[q];
50762
50763 if (h->transMethod & CFGTBL_Trans_io_accel1)
50764- return h->access.command_completed(h, q);
50765+ return h->access->command_completed(h, q);
50766
50767 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
50768- return h->access.command_completed(h, q);
50769+ return h->access->command_completed(h, q);
50770
50771 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
50772 a = rq->head[rq->current_entry];
50773@@ -5360,7 +5360,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50774 while (!list_empty(&h->reqQ)) {
50775 c = list_entry(h->reqQ.next, struct CommandList, list);
50776 /* can't do anything if fifo is full */
50777- if ((h->access.fifo_full(h))) {
50778+ if ((h->access->fifo_full(h))) {
50779 h->fifo_recently_full = 1;
50780 dev_warn(&h->pdev->dev, "fifo full\n");
50781 break;
50782@@ -5376,7 +5376,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50783 atomic_inc(&h->commands_outstanding);
50784 spin_unlock_irqrestore(&h->lock, *flags);
50785 /* Tell the controller execute command */
50786- h->access.submit_command(h, c);
50787+ h->access->submit_command(h, c);
50788 spin_lock_irqsave(&h->lock, *flags);
50789 }
50790 }
50791@@ -5392,17 +5392,17 @@ static void lock_and_start_io(struct ctlr_info *h)
50792
50793 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
50794 {
50795- return h->access.command_completed(h, q);
50796+ return h->access->command_completed(h, q);
50797 }
50798
50799 static inline bool interrupt_pending(struct ctlr_info *h)
50800 {
50801- return h->access.intr_pending(h);
50802+ return h->access->intr_pending(h);
50803 }
50804
50805 static inline long interrupt_not_for_us(struct ctlr_info *h)
50806 {
50807- return (h->access.intr_pending(h) == 0) ||
50808+ return (h->access->intr_pending(h) == 0) ||
50809 (h->interrupts_enabled == 0);
50810 }
50811
50812@@ -6343,7 +6343,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
50813 if (prod_index < 0)
50814 return -ENODEV;
50815 h->product_name = products[prod_index].product_name;
50816- h->access = *(products[prod_index].access);
50817+ h->access = products[prod_index].access;
50818
50819 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
50820 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
50821@@ -6690,7 +6690,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
50822 unsigned long flags;
50823 u32 lockup_detected;
50824
50825- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50826+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50827 spin_lock_irqsave(&h->lock, flags);
50828 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
50829 if (!lockup_detected) {
50830@@ -6937,7 +6937,7 @@ reinit_after_soft_reset:
50831 }
50832
50833 /* make sure the board interrupts are off */
50834- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50835+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50836
50837 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
50838 goto clean2;
50839@@ -6972,7 +6972,7 @@ reinit_after_soft_reset:
50840 * fake ones to scoop up any residual completions.
50841 */
50842 spin_lock_irqsave(&h->lock, flags);
50843- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50844+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50845 spin_unlock_irqrestore(&h->lock, flags);
50846 free_irqs(h);
50847 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
50848@@ -6991,9 +6991,9 @@ reinit_after_soft_reset:
50849 dev_info(&h->pdev->dev, "Board READY.\n");
50850 dev_info(&h->pdev->dev,
50851 "Waiting for stale completions to drain.\n");
50852- h->access.set_intr_mask(h, HPSA_INTR_ON);
50853+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50854 msleep(10000);
50855- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50856+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50857
50858 rc = controller_reset_failed(h->cfgtable);
50859 if (rc)
50860@@ -7019,7 +7019,7 @@ reinit_after_soft_reset:
50861 h->drv_req_rescan = 0;
50862
50863 /* Turn the interrupts on so we can service requests */
50864- h->access.set_intr_mask(h, HPSA_INTR_ON);
50865+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50866
50867 hpsa_hba_inquiry(h);
50868 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
50869@@ -7084,7 +7084,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
50870 * To write all data in the battery backed cache to disks
50871 */
50872 hpsa_flush_cache(h);
50873- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50874+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50875 hpsa_free_irqs_and_disable_msix(h);
50876 }
50877
50878@@ -7202,7 +7202,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50879 CFGTBL_Trans_enable_directed_msix |
50880 (trans_support & (CFGTBL_Trans_io_accel1 |
50881 CFGTBL_Trans_io_accel2));
50882- struct access_method access = SA5_performant_access;
50883+ struct access_method *access = &SA5_performant_access;
50884
50885 /* This is a bit complicated. There are 8 registers on
50886 * the controller which we write to to tell it 8 different
50887@@ -7244,7 +7244,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50888 * perform the superfluous readl() after each command submission.
50889 */
50890 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
50891- access = SA5_performant_access_no_read;
50892+ access = &SA5_performant_access_no_read;
50893
50894 /* Controller spec: zero out this buffer. */
50895 for (i = 0; i < h->nreply_queues; i++)
50896@@ -7274,12 +7274,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50897 * enable outbound interrupt coalescing in accelerator mode;
50898 */
50899 if (trans_support & CFGTBL_Trans_io_accel1) {
50900- access = SA5_ioaccel_mode1_access;
50901+ access = &SA5_ioaccel_mode1_access;
50902 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50903 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50904 } else {
50905 if (trans_support & CFGTBL_Trans_io_accel2) {
50906- access = SA5_ioaccel_mode2_access;
50907+ access = &SA5_ioaccel_mode2_access;
50908 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50909 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50910 }
50911diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
50912index 8e06d9e..396e0a1 100644
50913--- a/drivers/scsi/hpsa.h
50914+++ b/drivers/scsi/hpsa.h
50915@@ -127,7 +127,7 @@ struct ctlr_info {
50916 unsigned int msix_vector;
50917 unsigned int msi_vector;
50918 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
50919- struct access_method access;
50920+ struct access_method *access;
50921 char hba_mode_enabled;
50922
50923 /* queue and queue Info */
50924@@ -523,43 +523,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
50925 }
50926
50927 static struct access_method SA5_access = {
50928- SA5_submit_command,
50929- SA5_intr_mask,
50930- SA5_fifo_full,
50931- SA5_intr_pending,
50932- SA5_completed,
50933+ .submit_command = SA5_submit_command,
50934+ .set_intr_mask = SA5_intr_mask,
50935+ .fifo_full = SA5_fifo_full,
50936+ .intr_pending = SA5_intr_pending,
50937+ .command_completed = SA5_completed,
50938 };
50939
50940 static struct access_method SA5_ioaccel_mode1_access = {
50941- SA5_submit_command,
50942- SA5_performant_intr_mask,
50943- SA5_fifo_full,
50944- SA5_ioaccel_mode1_intr_pending,
50945- SA5_ioaccel_mode1_completed,
50946+ .submit_command = SA5_submit_command,
50947+ .set_intr_mask = SA5_performant_intr_mask,
50948+ .fifo_full = SA5_fifo_full,
50949+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
50950+ .command_completed = SA5_ioaccel_mode1_completed,
50951 };
50952
50953 static struct access_method SA5_ioaccel_mode2_access = {
50954- SA5_submit_command_ioaccel2,
50955- SA5_performant_intr_mask,
50956- SA5_fifo_full,
50957- SA5_performant_intr_pending,
50958- SA5_performant_completed,
50959+ .submit_command = SA5_submit_command_ioaccel2,
50960+ .set_intr_mask = SA5_performant_intr_mask,
50961+ .fifo_full = SA5_fifo_full,
50962+ .intr_pending = SA5_performant_intr_pending,
50963+ .command_completed = SA5_performant_completed,
50964 };
50965
50966 static struct access_method SA5_performant_access = {
50967- SA5_submit_command,
50968- SA5_performant_intr_mask,
50969- SA5_fifo_full,
50970- SA5_performant_intr_pending,
50971- SA5_performant_completed,
50972+ .submit_command = SA5_submit_command,
50973+ .set_intr_mask = SA5_performant_intr_mask,
50974+ .fifo_full = SA5_fifo_full,
50975+ .intr_pending = SA5_performant_intr_pending,
50976+ .command_completed = SA5_performant_completed,
50977 };
50978
50979 static struct access_method SA5_performant_access_no_read = {
50980- SA5_submit_command_no_read,
50981- SA5_performant_intr_mask,
50982- SA5_fifo_full,
50983- SA5_performant_intr_pending,
50984- SA5_performant_completed,
50985+ .submit_command = SA5_submit_command_no_read,
50986+ .set_intr_mask = SA5_performant_intr_mask,
50987+ .fifo_full = SA5_fifo_full,
50988+ .intr_pending = SA5_performant_intr_pending,
50989+ .command_completed = SA5_performant_completed,
50990 };
50991
50992 struct board_type {
50993diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
50994index 1b3a094..068e683 100644
50995--- a/drivers/scsi/libfc/fc_exch.c
50996+++ b/drivers/scsi/libfc/fc_exch.c
50997@@ -101,12 +101,12 @@ struct fc_exch_mgr {
50998 u16 pool_max_index;
50999
51000 struct {
51001- atomic_t no_free_exch;
51002- atomic_t no_free_exch_xid;
51003- atomic_t xid_not_found;
51004- atomic_t xid_busy;
51005- atomic_t seq_not_found;
51006- atomic_t non_bls_resp;
51007+ atomic_unchecked_t no_free_exch;
51008+ atomic_unchecked_t no_free_exch_xid;
51009+ atomic_unchecked_t xid_not_found;
51010+ atomic_unchecked_t xid_busy;
51011+ atomic_unchecked_t seq_not_found;
51012+ atomic_unchecked_t non_bls_resp;
51013 } stats;
51014 };
51015
51016@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
51017 /* allocate memory for exchange */
51018 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
51019 if (!ep) {
51020- atomic_inc(&mp->stats.no_free_exch);
51021+ atomic_inc_unchecked(&mp->stats.no_free_exch);
51022 goto out;
51023 }
51024 memset(ep, 0, sizeof(*ep));
51025@@ -874,7 +874,7 @@ out:
51026 return ep;
51027 err:
51028 spin_unlock_bh(&pool->lock);
51029- atomic_inc(&mp->stats.no_free_exch_xid);
51030+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
51031 mempool_free(ep, mp->ep_pool);
51032 return NULL;
51033 }
51034@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51035 xid = ntohs(fh->fh_ox_id); /* we originated exch */
51036 ep = fc_exch_find(mp, xid);
51037 if (!ep) {
51038- atomic_inc(&mp->stats.xid_not_found);
51039+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51040 reject = FC_RJT_OX_ID;
51041 goto out;
51042 }
51043@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51044 ep = fc_exch_find(mp, xid);
51045 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
51046 if (ep) {
51047- atomic_inc(&mp->stats.xid_busy);
51048+ atomic_inc_unchecked(&mp->stats.xid_busy);
51049 reject = FC_RJT_RX_ID;
51050 goto rel;
51051 }
51052@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51053 }
51054 xid = ep->xid; /* get our XID */
51055 } else if (!ep) {
51056- atomic_inc(&mp->stats.xid_not_found);
51057+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51058 reject = FC_RJT_RX_ID; /* XID not found */
51059 goto out;
51060 }
51061@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51062 } else {
51063 sp = &ep->seq;
51064 if (sp->id != fh->fh_seq_id) {
51065- atomic_inc(&mp->stats.seq_not_found);
51066+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51067 if (f_ctl & FC_FC_END_SEQ) {
51068 /*
51069 * Update sequence_id based on incoming last
51070@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51071
51072 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
51073 if (!ep) {
51074- atomic_inc(&mp->stats.xid_not_found);
51075+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51076 goto out;
51077 }
51078 if (ep->esb_stat & ESB_ST_COMPLETE) {
51079- atomic_inc(&mp->stats.xid_not_found);
51080+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51081 goto rel;
51082 }
51083 if (ep->rxid == FC_XID_UNKNOWN)
51084 ep->rxid = ntohs(fh->fh_rx_id);
51085 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
51086- atomic_inc(&mp->stats.xid_not_found);
51087+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51088 goto rel;
51089 }
51090 if (ep->did != ntoh24(fh->fh_s_id) &&
51091 ep->did != FC_FID_FLOGI) {
51092- atomic_inc(&mp->stats.xid_not_found);
51093+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51094 goto rel;
51095 }
51096 sof = fr_sof(fp);
51097@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51098 sp->ssb_stat |= SSB_ST_RESP;
51099 sp->id = fh->fh_seq_id;
51100 } else if (sp->id != fh->fh_seq_id) {
51101- atomic_inc(&mp->stats.seq_not_found);
51102+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51103 goto rel;
51104 }
51105
51106@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51107 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51108
51109 if (!sp)
51110- atomic_inc(&mp->stats.xid_not_found);
51111+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51112 else
51113- atomic_inc(&mp->stats.non_bls_resp);
51114+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51115
51116 fc_frame_free(fp);
51117 }
51118@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51119
51120 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51121 mp = ema->mp;
51122- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51123+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51124 st->fc_no_free_exch_xid +=
51125- atomic_read(&mp->stats.no_free_exch_xid);
51126- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51127- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51128- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51129- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51130+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51131+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51132+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51133+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51134+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51135 }
51136 }
51137 EXPORT_SYMBOL(fc_exch_update_stats);
51138diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51139index 932d9cc..50c7ee9 100644
51140--- a/drivers/scsi/libsas/sas_ata.c
51141+++ b/drivers/scsi/libsas/sas_ata.c
51142@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
51143 .postreset = ata_std_postreset,
51144 .error_handler = ata_std_error_handler,
51145 .post_internal_cmd = sas_ata_post_internal,
51146- .qc_defer = ata_std_qc_defer,
51147+ .qc_defer = ata_std_qc_defer,
51148 .qc_prep = ata_noop_qc_prep,
51149 .qc_issue = sas_ata_qc_issue,
51150 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51151diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51152index 434e903..5a4a79b 100644
51153--- a/drivers/scsi/lpfc/lpfc.h
51154+++ b/drivers/scsi/lpfc/lpfc.h
51155@@ -430,7 +430,7 @@ struct lpfc_vport {
51156 struct dentry *debug_nodelist;
51157 struct dentry *vport_debugfs_root;
51158 struct lpfc_debugfs_trc *disc_trc;
51159- atomic_t disc_trc_cnt;
51160+ atomic_unchecked_t disc_trc_cnt;
51161 #endif
51162 uint8_t stat_data_enabled;
51163 uint8_t stat_data_blocked;
51164@@ -880,8 +880,8 @@ struct lpfc_hba {
51165 struct timer_list fabric_block_timer;
51166 unsigned long bit_flags;
51167 #define FABRIC_COMANDS_BLOCKED 0
51168- atomic_t num_rsrc_err;
51169- atomic_t num_cmd_success;
51170+ atomic_unchecked_t num_rsrc_err;
51171+ atomic_unchecked_t num_cmd_success;
51172 unsigned long last_rsrc_error_time;
51173 unsigned long last_ramp_down_time;
51174 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51175@@ -916,7 +916,7 @@ struct lpfc_hba {
51176
51177 struct dentry *debug_slow_ring_trc;
51178 struct lpfc_debugfs_trc *slow_ring_trc;
51179- atomic_t slow_ring_trc_cnt;
51180+ atomic_unchecked_t slow_ring_trc_cnt;
51181 /* iDiag debugfs sub-directory */
51182 struct dentry *idiag_root;
51183 struct dentry *idiag_pci_cfg;
51184diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51185index 5633e7d..8272114 100644
51186--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51187+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51188@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51189
51190 #include <linux/debugfs.h>
51191
51192-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51193+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51194 static unsigned long lpfc_debugfs_start_time = 0L;
51195
51196 /* iDiag */
51197@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51198 lpfc_debugfs_enable = 0;
51199
51200 len = 0;
51201- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51202+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51203 (lpfc_debugfs_max_disc_trc - 1);
51204 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51205 dtp = vport->disc_trc + i;
51206@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51207 lpfc_debugfs_enable = 0;
51208
51209 len = 0;
51210- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51211+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51212 (lpfc_debugfs_max_slow_ring_trc - 1);
51213 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51214 dtp = phba->slow_ring_trc + i;
51215@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51216 !vport || !vport->disc_trc)
51217 return;
51218
51219- index = atomic_inc_return(&vport->disc_trc_cnt) &
51220+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51221 (lpfc_debugfs_max_disc_trc - 1);
51222 dtp = vport->disc_trc + index;
51223 dtp->fmt = fmt;
51224 dtp->data1 = data1;
51225 dtp->data2 = data2;
51226 dtp->data3 = data3;
51227- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51228+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51229 dtp->jif = jiffies;
51230 #endif
51231 return;
51232@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51233 !phba || !phba->slow_ring_trc)
51234 return;
51235
51236- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51237+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51238 (lpfc_debugfs_max_slow_ring_trc - 1);
51239 dtp = phba->slow_ring_trc + index;
51240 dtp->fmt = fmt;
51241 dtp->data1 = data1;
51242 dtp->data2 = data2;
51243 dtp->data3 = data3;
51244- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51245+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51246 dtp->jif = jiffies;
51247 #endif
51248 return;
51249@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51250 "slow_ring buffer\n");
51251 goto debug_failed;
51252 }
51253- atomic_set(&phba->slow_ring_trc_cnt, 0);
51254+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51255 memset(phba->slow_ring_trc, 0,
51256 (sizeof(struct lpfc_debugfs_trc) *
51257 lpfc_debugfs_max_slow_ring_trc));
51258@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51259 "buffer\n");
51260 goto debug_failed;
51261 }
51262- atomic_set(&vport->disc_trc_cnt, 0);
51263+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51264
51265 snprintf(name, sizeof(name), "discovery_trace");
51266 vport->debug_disc_trc =
51267diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51268index 0b2c53a..aec2b45 100644
51269--- a/drivers/scsi/lpfc/lpfc_init.c
51270+++ b/drivers/scsi/lpfc/lpfc_init.c
51271@@ -11290,8 +11290,10 @@ lpfc_init(void)
51272 "misc_register returned with status %d", error);
51273
51274 if (lpfc_enable_npiv) {
51275- lpfc_transport_functions.vport_create = lpfc_vport_create;
51276- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51277+ pax_open_kernel();
51278+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51279+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51280+ pax_close_kernel();
51281 }
51282 lpfc_transport_template =
51283 fc_attach_transport(&lpfc_transport_functions);
51284diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51285index 4f9222e..f1850e3 100644
51286--- a/drivers/scsi/lpfc/lpfc_scsi.c
51287+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51288@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51289 unsigned long expires;
51290
51291 spin_lock_irqsave(&phba->hbalock, flags);
51292- atomic_inc(&phba->num_rsrc_err);
51293+ atomic_inc_unchecked(&phba->num_rsrc_err);
51294 phba->last_rsrc_error_time = jiffies;
51295
51296 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
51297@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51298 unsigned long num_rsrc_err, num_cmd_success;
51299 int i;
51300
51301- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51302- num_cmd_success = atomic_read(&phba->num_cmd_success);
51303+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51304+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51305
51306 /*
51307 * The error and success command counters are global per
51308@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51309 }
51310 }
51311 lpfc_destroy_vport_work_array(phba, vports);
51312- atomic_set(&phba->num_rsrc_err, 0);
51313- atomic_set(&phba->num_cmd_success, 0);
51314+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51315+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51316 }
51317
51318 /**
51319diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51320index 6a1c036..38e0e8d 100644
51321--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51322+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51323@@ -1508,7 +1508,7 @@ _scsih_get_resync(struct device *dev)
51324 {
51325 struct scsi_device *sdev = to_scsi_device(dev);
51326 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51327- static struct _raid_device *raid_device;
51328+ struct _raid_device *raid_device;
51329 unsigned long flags;
51330 Mpi2RaidVolPage0_t vol_pg0;
51331 Mpi2ConfigReply_t mpi_reply;
51332@@ -1560,7 +1560,7 @@ _scsih_get_state(struct device *dev)
51333 {
51334 struct scsi_device *sdev = to_scsi_device(dev);
51335 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51336- static struct _raid_device *raid_device;
51337+ struct _raid_device *raid_device;
51338 unsigned long flags;
51339 Mpi2RaidVolPage0_t vol_pg0;
51340 Mpi2ConfigReply_t mpi_reply;
51341@@ -6602,7 +6602,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51342 Mpi2EventDataIrOperationStatus_t *event_data =
51343 (Mpi2EventDataIrOperationStatus_t *)
51344 fw_event->event_data;
51345- static struct _raid_device *raid_device;
51346+ struct _raid_device *raid_device;
51347 unsigned long flags;
51348 u16 handle;
51349
51350@@ -7073,7 +7073,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51351 u64 sas_address;
51352 struct _sas_device *sas_device;
51353 struct _sas_node *expander_device;
51354- static struct _raid_device *raid_device;
51355+ struct _raid_device *raid_device;
51356 u8 retry_count;
51357 unsigned long flags;
51358
51359diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51360index 8c27b6a..607f56e 100644
51361--- a/drivers/scsi/pmcraid.c
51362+++ b/drivers/scsi/pmcraid.c
51363@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51364 res->scsi_dev = scsi_dev;
51365 scsi_dev->hostdata = res;
51366 res->change_detected = 0;
51367- atomic_set(&res->read_failures, 0);
51368- atomic_set(&res->write_failures, 0);
51369+ atomic_set_unchecked(&res->read_failures, 0);
51370+ atomic_set_unchecked(&res->write_failures, 0);
51371 rc = 0;
51372 }
51373 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51374@@ -2646,9 +2646,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51375
51376 /* If this was a SCSI read/write command keep count of errors */
51377 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51378- atomic_inc(&res->read_failures);
51379+ atomic_inc_unchecked(&res->read_failures);
51380 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51381- atomic_inc(&res->write_failures);
51382+ atomic_inc_unchecked(&res->write_failures);
51383
51384 if (!RES_IS_GSCSI(res->cfg_entry) &&
51385 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51386@@ -3474,7 +3474,7 @@ static int pmcraid_queuecommand_lck(
51387 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51388 * hrrq_id assigned here in queuecommand
51389 */
51390- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51391+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51392 pinstance->num_hrrq;
51393 cmd->cmd_done = pmcraid_io_done;
51394
51395@@ -3788,7 +3788,7 @@ static long pmcraid_ioctl_passthrough(
51396 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51397 * hrrq_id assigned here in queuecommand
51398 */
51399- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51400+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51401 pinstance->num_hrrq;
51402
51403 if (request_size) {
51404@@ -4426,7 +4426,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51405
51406 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51407 /* add resources only after host is added into system */
51408- if (!atomic_read(&pinstance->expose_resources))
51409+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51410 return;
51411
51412 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51413@@ -5243,8 +5243,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51414 init_waitqueue_head(&pinstance->reset_wait_q);
51415
51416 atomic_set(&pinstance->outstanding_cmds, 0);
51417- atomic_set(&pinstance->last_message_id, 0);
51418- atomic_set(&pinstance->expose_resources, 0);
51419+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51420+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51421
51422 INIT_LIST_HEAD(&pinstance->free_res_q);
51423 INIT_LIST_HEAD(&pinstance->used_res_q);
51424@@ -5957,7 +5957,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51425 /* Schedule worker thread to handle CCN and take care of adding and
51426 * removing devices to OS
51427 */
51428- atomic_set(&pinstance->expose_resources, 1);
51429+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51430 schedule_work(&pinstance->worker_q);
51431 return rc;
51432
51433diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51434index e1d150f..6c6df44 100644
51435--- a/drivers/scsi/pmcraid.h
51436+++ b/drivers/scsi/pmcraid.h
51437@@ -748,7 +748,7 @@ struct pmcraid_instance {
51438 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51439
51440 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51441- atomic_t last_message_id;
51442+ atomic_unchecked_t last_message_id;
51443
51444 /* configuration table */
51445 struct pmcraid_config_table *cfg_table;
51446@@ -777,7 +777,7 @@ struct pmcraid_instance {
51447 atomic_t outstanding_cmds;
51448
51449 /* should add/delete resources to mid-layer now ?*/
51450- atomic_t expose_resources;
51451+ atomic_unchecked_t expose_resources;
51452
51453
51454
51455@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51456 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51457 };
51458 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51459- atomic_t read_failures; /* count of failed READ commands */
51460- atomic_t write_failures; /* count of failed WRITE commands */
51461+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51462+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51463
51464 /* To indicate add/delete/modify during CCN */
51465 u8 change_detected;
51466diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51467index 82b92c4..3178171 100644
51468--- a/drivers/scsi/qla2xxx/qla_attr.c
51469+++ b/drivers/scsi/qla2xxx/qla_attr.c
51470@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51471 return 0;
51472 }
51473
51474-struct fc_function_template qla2xxx_transport_functions = {
51475+fc_function_template_no_const qla2xxx_transport_functions = {
51476
51477 .show_host_node_name = 1,
51478 .show_host_port_name = 1,
51479@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51480 .bsg_timeout = qla24xx_bsg_timeout,
51481 };
51482
51483-struct fc_function_template qla2xxx_transport_vport_functions = {
51484+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51485
51486 .show_host_node_name = 1,
51487 .show_host_port_name = 1,
51488diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51489index 7686bfe..4710893 100644
51490--- a/drivers/scsi/qla2xxx/qla_gbl.h
51491+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51492@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
51493 struct device_attribute;
51494 extern struct device_attribute *qla2x00_host_attrs[];
51495 struct fc_function_template;
51496-extern struct fc_function_template qla2xxx_transport_functions;
51497-extern struct fc_function_template qla2xxx_transport_vport_functions;
51498+extern fc_function_template_no_const qla2xxx_transport_functions;
51499+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
51500 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
51501 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
51502 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
51503diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
51504index cce1cbc..5b9f0fe 100644
51505--- a/drivers/scsi/qla2xxx/qla_os.c
51506+++ b/drivers/scsi/qla2xxx/qla_os.c
51507@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
51508 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
51509 /* Ok, a 64bit DMA mask is applicable. */
51510 ha->flags.enable_64bit_addressing = 1;
51511- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51512- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51513+ pax_open_kernel();
51514+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51515+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51516+ pax_close_kernel();
51517 return;
51518 }
51519 }
51520diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
51521index 8f6d0fb..1b21097 100644
51522--- a/drivers/scsi/qla4xxx/ql4_def.h
51523+++ b/drivers/scsi/qla4xxx/ql4_def.h
51524@@ -305,7 +305,7 @@ struct ddb_entry {
51525 * (4000 only) */
51526 atomic_t relogin_timer; /* Max Time to wait for
51527 * relogin to complete */
51528- atomic_t relogin_retry_count; /* Num of times relogin has been
51529+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
51530 * retried */
51531 uint32_t default_time2wait; /* Default Min time between
51532 * relogins (+aens) */
51533diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
51534index 6d25879..3031a9f 100644
51535--- a/drivers/scsi/qla4xxx/ql4_os.c
51536+++ b/drivers/scsi/qla4xxx/ql4_os.c
51537@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
51538 */
51539 if (!iscsi_is_session_online(cls_sess)) {
51540 /* Reset retry relogin timer */
51541- atomic_inc(&ddb_entry->relogin_retry_count);
51542+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
51543 DEBUG2(ql4_printk(KERN_INFO, ha,
51544 "%s: index[%d] relogin timed out-retrying"
51545 " relogin (%d), retry (%d)\n", __func__,
51546 ddb_entry->fw_ddb_index,
51547- atomic_read(&ddb_entry->relogin_retry_count),
51548+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
51549 ddb_entry->default_time2wait + 4));
51550 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
51551 atomic_set(&ddb_entry->retry_relogin_timer,
51552@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
51553
51554 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
51555 atomic_set(&ddb_entry->relogin_timer, 0);
51556- atomic_set(&ddb_entry->relogin_retry_count, 0);
51557+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
51558 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
51559 ddb_entry->default_relogin_timeout =
51560 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
51561diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
51562index 17bb541..85f4508 100644
51563--- a/drivers/scsi/scsi_lib.c
51564+++ b/drivers/scsi/scsi_lib.c
51565@@ -1595,7 +1595,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
51566 shost = sdev->host;
51567 scsi_init_cmd_errh(cmd);
51568 cmd->result = DID_NO_CONNECT << 16;
51569- atomic_inc(&cmd->device->iorequest_cnt);
51570+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51571
51572 /*
51573 * SCSI request completion path will do scsi_device_unbusy(),
51574@@ -1618,9 +1618,9 @@ static void scsi_softirq_done(struct request *rq)
51575
51576 INIT_LIST_HEAD(&cmd->eh_entry);
51577
51578- atomic_inc(&cmd->device->iodone_cnt);
51579+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
51580 if (cmd->result)
51581- atomic_inc(&cmd->device->ioerr_cnt);
51582+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
51583
51584 disposition = scsi_decide_disposition(cmd);
51585 if (disposition != SUCCESS &&
51586@@ -1661,7 +1661,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
51587 struct Scsi_Host *host = cmd->device->host;
51588 int rtn = 0;
51589
51590- atomic_inc(&cmd->device->iorequest_cnt);
51591+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51592
51593 /* check if the device is still usable */
51594 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
51595diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
51596index 1ac38e7..6acc656 100644
51597--- a/drivers/scsi/scsi_sysfs.c
51598+++ b/drivers/scsi/scsi_sysfs.c
51599@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
51600 char *buf) \
51601 { \
51602 struct scsi_device *sdev = to_scsi_device(dev); \
51603- unsigned long long count = atomic_read(&sdev->field); \
51604+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
51605 return snprintf(buf, 20, "0x%llx\n", count); \
51606 } \
51607 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
51608diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
51609index 5d6f348..18778a6b 100644
51610--- a/drivers/scsi/scsi_transport_fc.c
51611+++ b/drivers/scsi/scsi_transport_fc.c
51612@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
51613 * Netlink Infrastructure
51614 */
51615
51616-static atomic_t fc_event_seq;
51617+static atomic_unchecked_t fc_event_seq;
51618
51619 /**
51620 * fc_get_event_number - Obtain the next sequential FC event number
51621@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
51622 u32
51623 fc_get_event_number(void)
51624 {
51625- return atomic_add_return(1, &fc_event_seq);
51626+ return atomic_add_return_unchecked(1, &fc_event_seq);
51627 }
51628 EXPORT_SYMBOL(fc_get_event_number);
51629
51630@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
51631 {
51632 int error;
51633
51634- atomic_set(&fc_event_seq, 0);
51635+ atomic_set_unchecked(&fc_event_seq, 0);
51636
51637 error = transport_class_register(&fc_host_class);
51638 if (error)
51639@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
51640 char *cp;
51641
51642 *val = simple_strtoul(buf, &cp, 0);
51643- if ((*cp && (*cp != '\n')) || (*val < 0))
51644+ if (*cp && (*cp != '\n'))
51645 return -EINVAL;
51646 /*
51647 * Check for overflow; dev_loss_tmo is u32
51648diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
51649index 67d43e3..8cee73c 100644
51650--- a/drivers/scsi/scsi_transport_iscsi.c
51651+++ b/drivers/scsi/scsi_transport_iscsi.c
51652@@ -79,7 +79,7 @@ struct iscsi_internal {
51653 struct transport_container session_cont;
51654 };
51655
51656-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
51657+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
51658 static struct workqueue_struct *iscsi_eh_timer_workq;
51659
51660 static DEFINE_IDA(iscsi_sess_ida);
51661@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
51662 int err;
51663
51664 ihost = shost->shost_data;
51665- session->sid = atomic_add_return(1, &iscsi_session_nr);
51666+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
51667
51668 if (target_id == ISCSI_MAX_TARGET) {
51669 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
51670@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
51671 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
51672 ISCSI_TRANSPORT_VERSION);
51673
51674- atomic_set(&iscsi_session_nr, 0);
51675+ atomic_set_unchecked(&iscsi_session_nr, 0);
51676
51677 err = class_register(&iscsi_transport_class);
51678 if (err)
51679diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
51680index ae45bd9..c32a586 100644
51681--- a/drivers/scsi/scsi_transport_srp.c
51682+++ b/drivers/scsi/scsi_transport_srp.c
51683@@ -35,7 +35,7 @@
51684 #include "scsi_priv.h"
51685
51686 struct srp_host_attrs {
51687- atomic_t next_port_id;
51688+ atomic_unchecked_t next_port_id;
51689 };
51690 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
51691
51692@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
51693 struct Scsi_Host *shost = dev_to_shost(dev);
51694 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
51695
51696- atomic_set(&srp_host->next_port_id, 0);
51697+ atomic_set_unchecked(&srp_host->next_port_id, 0);
51698 return 0;
51699 }
51700
51701@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
51702 rport_fast_io_fail_timedout);
51703 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
51704
51705- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
51706+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
51707 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
51708
51709 transport_setup_device(&rport->dev);
51710diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
51711index 05ea0d4..5af8049 100644
51712--- a/drivers/scsi/sd.c
51713+++ b/drivers/scsi/sd.c
51714@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
51715 sdkp->disk = gd;
51716 sdkp->index = index;
51717 atomic_set(&sdkp->openers, 0);
51718- atomic_set(&sdkp->device->ioerr_cnt, 0);
51719+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
51720
51721 if (!sdp->request_queue->rq_timeout) {
51722 if (sdp->type != TYPE_MOD)
51723diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
51724index dbf8e77..0d565c7 100644
51725--- a/drivers/scsi/sg.c
51726+++ b/drivers/scsi/sg.c
51727@@ -1098,7 +1098,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
51728 sdp->disk->disk_name,
51729 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
51730 NULL,
51731- (char *)arg);
51732+ (char __user *)arg);
51733 case BLKTRACESTART:
51734 return blk_trace_startstop(sdp->device->request_queue, 1);
51735 case BLKTRACESTOP:
51736diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
51737index 011a336..fb2b7a0 100644
51738--- a/drivers/soc/tegra/fuse/fuse-tegra.c
51739+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
51740@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
51741 return i;
51742 }
51743
51744-static struct bin_attribute fuse_bin_attr = {
51745+static bin_attribute_no_const fuse_bin_attr = {
51746 .attr = { .name = "fuse", .mode = S_IRUGO, },
51747 .read = fuse_read,
51748 };
51749diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
51750index 66a70e9..f82cea4 100644
51751--- a/drivers/spi/spi.c
51752+++ b/drivers/spi/spi.c
51753@@ -2238,7 +2238,7 @@ int spi_bus_unlock(struct spi_master *master)
51754 EXPORT_SYMBOL_GPL(spi_bus_unlock);
51755
51756 /* portable code must never pass more than 32 bytes */
51757-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
51758+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
51759
51760 static u8 *buf;
51761
51762diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
51763index b41429f..2de5373 100644
51764--- a/drivers/staging/android/timed_output.c
51765+++ b/drivers/staging/android/timed_output.c
51766@@ -25,7 +25,7 @@
51767 #include "timed_output.h"
51768
51769 static struct class *timed_output_class;
51770-static atomic_t device_count;
51771+static atomic_unchecked_t device_count;
51772
51773 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
51774 char *buf)
51775@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
51776 timed_output_class = class_create(THIS_MODULE, "timed_output");
51777 if (IS_ERR(timed_output_class))
51778 return PTR_ERR(timed_output_class);
51779- atomic_set(&device_count, 0);
51780+ atomic_set_unchecked(&device_count, 0);
51781 timed_output_class->dev_groups = timed_output_groups;
51782 }
51783
51784@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
51785 if (ret < 0)
51786 return ret;
51787
51788- tdev->index = atomic_inc_return(&device_count);
51789+ tdev->index = atomic_inc_return_unchecked(&device_count);
51790 tdev->dev = device_create(timed_output_class, NULL,
51791 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
51792 if (IS_ERR(tdev->dev))
51793diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
51794index f143cb6..6fb8255 100644
51795--- a/drivers/staging/comedi/comedi_fops.c
51796+++ b/drivers/staging/comedi/comedi_fops.c
51797@@ -273,8 +273,8 @@ static void comedi_file_reset(struct file *file)
51798 }
51799 cfp->last_attached = dev->attached;
51800 cfp->last_detach_count = dev->detach_count;
51801- ACCESS_ONCE(cfp->read_subdev) = read_s;
51802- ACCESS_ONCE(cfp->write_subdev) = write_s;
51803+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
51804+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
51805 }
51806
51807 static void comedi_file_check(struct file *file)
51808@@ -1885,7 +1885,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51809 !(s_old->async->cmd.flags & CMDF_WRITE))
51810 return -EBUSY;
51811
51812- ACCESS_ONCE(cfp->read_subdev) = s_new;
51813+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
51814 return 0;
51815 }
51816
51817@@ -1927,7 +1927,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51818 (s_old->async->cmd.flags & CMDF_WRITE))
51819 return -EBUSY;
51820
51821- ACCESS_ONCE(cfp->write_subdev) = s_new;
51822+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
51823 return 0;
51824 }
51825
51826diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
51827index 001348c..cfaac8a 100644
51828--- a/drivers/staging/gdm724x/gdm_tty.c
51829+++ b/drivers/staging/gdm724x/gdm_tty.c
51830@@ -44,7 +44,7 @@
51831 #define gdm_tty_send_control(n, r, v, d, l) (\
51832 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
51833
51834-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
51835+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
51836
51837 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
51838 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
51839diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
51840index 503b2d7..c904931 100644
51841--- a/drivers/staging/line6/driver.c
51842+++ b/drivers/staging/line6/driver.c
51843@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51844 {
51845 struct usb_device *usbdev = line6->usbdev;
51846 int ret;
51847- unsigned char len;
51848+ unsigned char *plen;
51849
51850 /* query the serial number: */
51851 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51852@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51853 return ret;
51854 }
51855
51856+ plen = kmalloc(1, GFP_KERNEL);
51857+ if (plen == NULL)
51858+ return -ENOMEM;
51859+
51860 /* Wait for data length. We'll get 0xff until length arrives. */
51861 do {
51862 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51863 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51864 USB_DIR_IN,
51865- 0x0012, 0x0000, &len, 1,
51866+ 0x0012, 0x0000, plen, 1,
51867 LINE6_TIMEOUT * HZ);
51868 if (ret < 0) {
51869 dev_err(line6->ifcdev,
51870 "receive length failed (error %d)\n", ret);
51871+ kfree(plen);
51872 return ret;
51873 }
51874- } while (len == 0xff);
51875+ } while (*plen == 0xff);
51876
51877- if (len != datalen) {
51878+ if (*plen != datalen) {
51879 /* should be equal or something went wrong */
51880 dev_err(line6->ifcdev,
51881 "length mismatch (expected %d, got %d)\n",
51882- (int)datalen, (int)len);
51883+ (int)datalen, (int)*plen);
51884+ kfree(plen);
51885 return -EINVAL;
51886 }
51887+ kfree(plen);
51888
51889 /* receive the result: */
51890 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51891@@ -520,7 +527,7 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51892 {
51893 struct usb_device *usbdev = line6->usbdev;
51894 int ret;
51895- unsigned char status;
51896+ unsigned char *status;
51897
51898 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51899 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
51900@@ -533,26 +540,34 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51901 return ret;
51902 }
51903
51904+ status = kmalloc(1, GFP_KERNEL);
51905+ if (status == NULL)
51906+ return -ENOMEM;
51907+
51908 do {
51909 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
51910 0x67,
51911 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51912 USB_DIR_IN,
51913 0x0012, 0x0000,
51914- &status, 1, LINE6_TIMEOUT * HZ);
51915+ status, 1, LINE6_TIMEOUT * HZ);
51916
51917 if (ret < 0) {
51918 dev_err(line6->ifcdev,
51919 "receiving status failed (error %d)\n", ret);
51920+ kfree(status);
51921 return ret;
51922 }
51923- } while (status == 0xff);
51924+ } while (*status == 0xff);
51925
51926- if (status != 0) {
51927+ if (*status != 0) {
51928 dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
51929+ kfree(status);
51930 return -EINVAL;
51931 }
51932
51933+ kfree(status);
51934+
51935 return 0;
51936 }
51937
51938diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
51939index 6943715..0a93632 100644
51940--- a/drivers/staging/line6/toneport.c
51941+++ b/drivers/staging/line6/toneport.c
51942@@ -11,6 +11,7 @@
51943 */
51944
51945 #include <linux/wait.h>
51946+#include <linux/slab.h>
51947 #include <sound/control.h>
51948
51949 #include "audio.h"
51950@@ -307,14 +308,20 @@ static void toneport_destruct(struct usb_interface *interface)
51951 */
51952 static void toneport_setup(struct usb_line6_toneport *toneport)
51953 {
51954- int ticks;
51955+ int *ticks;
51956 struct usb_line6 *line6 = &toneport->line6;
51957 struct usb_device *usbdev = line6->usbdev;
51958 u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
51959
51960+ ticks = kmalloc(sizeof(int), GFP_KERNEL);
51961+ if (ticks == NULL)
51962+ return;
51963+
51964 /* sync time on device with host: */
51965- ticks = (int)get_seconds();
51966- line6_write_data(line6, 0x80c6, &ticks, 4);
51967+ *ticks = (int)get_seconds();
51968+ line6_write_data(line6, 0x80c6, ticks, sizeof(int));
51969+
51970+ kfree(ticks);
51971
51972 /* enable device: */
51973 toneport_send_cmd(usbdev, 0x0301, 0x0000);
51974diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
51975index 463da07..e791ce9 100644
51976--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
51977+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
51978@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
51979 return 0;
51980 }
51981
51982-sfw_test_client_ops_t brw_test_client;
51983-void brw_init_test_client(void)
51984-{
51985- brw_test_client.tso_init = brw_client_init;
51986- brw_test_client.tso_fini = brw_client_fini;
51987- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
51988- brw_test_client.tso_done_rpc = brw_client_done_rpc;
51989+sfw_test_client_ops_t brw_test_client = {
51990+ .tso_init = brw_client_init,
51991+ .tso_fini = brw_client_fini,
51992+ .tso_prep_rpc = brw_client_prep_rpc,
51993+ .tso_done_rpc = brw_client_done_rpc,
51994 };
51995
51996 srpc_service_t brw_test_service;
51997diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
51998index cc9d182..8fabce3 100644
51999--- a/drivers/staging/lustre/lnet/selftest/framework.c
52000+++ b/drivers/staging/lustre/lnet/selftest/framework.c
52001@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
52002
52003 extern sfw_test_client_ops_t ping_test_client;
52004 extern srpc_service_t ping_test_service;
52005-extern void ping_init_test_client(void);
52006 extern void ping_init_test_service(void);
52007
52008 extern sfw_test_client_ops_t brw_test_client;
52009 extern srpc_service_t brw_test_service;
52010-extern void brw_init_test_client(void);
52011 extern void brw_init_test_service(void);
52012
52013
52014@@ -1675,12 +1673,10 @@ sfw_startup (void)
52015 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
52016 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
52017
52018- brw_init_test_client();
52019 brw_init_test_service();
52020 rc = sfw_register_test(&brw_test_service, &brw_test_client);
52021 LASSERT (rc == 0);
52022
52023- ping_init_test_client();
52024 ping_init_test_service();
52025 rc = sfw_register_test(&ping_test_service, &ping_test_client);
52026 LASSERT (rc == 0);
52027diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
52028index d8c0df6..5041cbb 100644
52029--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
52030+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
52031@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
52032 return 0;
52033 }
52034
52035-sfw_test_client_ops_t ping_test_client;
52036-void ping_init_test_client(void)
52037-{
52038- ping_test_client.tso_init = ping_client_init;
52039- ping_test_client.tso_fini = ping_client_fini;
52040- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
52041- ping_test_client.tso_done_rpc = ping_client_done_rpc;
52042-}
52043+sfw_test_client_ops_t ping_test_client = {
52044+ .tso_init = ping_client_init,
52045+ .tso_fini = ping_client_fini,
52046+ .tso_prep_rpc = ping_client_prep_rpc,
52047+ .tso_done_rpc = ping_client_done_rpc,
52048+};
52049
52050 srpc_service_t ping_test_service;
52051 void ping_init_test_service(void)
52052diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52053index 83bc0a9..12ba00a 100644
52054--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
52055+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52056@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
52057 ldlm_completion_callback lcs_completion;
52058 ldlm_blocking_callback lcs_blocking;
52059 ldlm_glimpse_callback lcs_glimpse;
52060-};
52061+} __no_const;
52062
52063 /* ldlm_lockd.c */
52064 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
52065diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
52066index 2a88b80..62e7e5f 100644
52067--- a/drivers/staging/lustre/lustre/include/obd.h
52068+++ b/drivers/staging/lustre/lustre/include/obd.h
52069@@ -1362,7 +1362,7 @@ struct md_ops {
52070 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
52071 * wrapper function in include/linux/obd_class.h.
52072 */
52073-};
52074+} __no_const;
52075
52076 struct lsm_operations {
52077 void (*lsm_free)(struct lov_stripe_md *);
52078diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52079index a4c252f..b21acac 100644
52080--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52081+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52082@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
52083 int added = (mode == LCK_NL);
52084 int overlaps = 0;
52085 int splitted = 0;
52086- const struct ldlm_callback_suite null_cbs = { NULL };
52087+ const struct ldlm_callback_suite null_cbs = { };
52088
52089 CDEBUG(D_DLMTRACE,
52090 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
52091diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52092index 83d3f08..b03adad 100644
52093--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52094+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52095@@ -236,7 +236,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
52096 void __user *buffer, size_t *lenp, loff_t *ppos)
52097 {
52098 int rc, max_delay_cs;
52099- struct ctl_table dummy = *table;
52100+ ctl_table_no_const dummy = *table;
52101 long d;
52102
52103 dummy.data = &max_delay_cs;
52104@@ -268,7 +268,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
52105 void __user *buffer, size_t *lenp, loff_t *ppos)
52106 {
52107 int rc, min_delay_cs;
52108- struct ctl_table dummy = *table;
52109+ ctl_table_no_const dummy = *table;
52110 long d;
52111
52112 dummy.data = &min_delay_cs;
52113@@ -300,7 +300,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
52114 void __user *buffer, size_t *lenp, loff_t *ppos)
52115 {
52116 int rc, backoff;
52117- struct ctl_table dummy = *table;
52118+ ctl_table_no_const dummy = *table;
52119
52120 dummy.data = &backoff;
52121 dummy.proc_handler = &proc_dointvec;
52122diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52123index 2c4fc74..b04ca79 100644
52124--- a/drivers/staging/lustre/lustre/libcfs/module.c
52125+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52126@@ -315,11 +315,11 @@ out:
52127
52128
52129 struct cfs_psdev_ops libcfs_psdev_ops = {
52130- libcfs_psdev_open,
52131- libcfs_psdev_release,
52132- NULL,
52133- NULL,
52134- libcfs_ioctl
52135+ .p_open = libcfs_psdev_open,
52136+ .p_close = libcfs_psdev_release,
52137+ .p_read = NULL,
52138+ .p_write = NULL,
52139+ .p_ioctl = libcfs_ioctl
52140 };
52141
52142 extern int insert_proc(void);
52143diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52144index fcbe836..8a7ada4 100644
52145--- a/drivers/staging/octeon/ethernet-rx.c
52146+++ b/drivers/staging/octeon/ethernet-rx.c
52147@@ -352,14 +352,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52148 /* Increment RX stats for virtual ports */
52149 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52150 #ifdef CONFIG_64BIT
52151- atomic64_add(1,
52152+ atomic64_add_unchecked(1,
52153 (atomic64_t *)&priv->stats.rx_packets);
52154- atomic64_add(skb->len,
52155+ atomic64_add_unchecked(skb->len,
52156 (atomic64_t *)&priv->stats.rx_bytes);
52157 #else
52158- atomic_add(1,
52159+ atomic_add_unchecked(1,
52160 (atomic_t *)&priv->stats.rx_packets);
52161- atomic_add(skb->len,
52162+ atomic_add_unchecked(skb->len,
52163 (atomic_t *)&priv->stats.rx_bytes);
52164 #endif
52165 }
52166@@ -371,10 +371,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52167 dev->name);
52168 */
52169 #ifdef CONFIG_64BIT
52170- atomic64_add(1,
52171+ atomic64_add_unchecked(1,
52172 (atomic64_t *)&priv->stats.rx_dropped);
52173 #else
52174- atomic_add(1,
52175+ atomic_add_unchecked(1,
52176 (atomic_t *)&priv->stats.rx_dropped);
52177 #endif
52178 dev_kfree_skb_irq(skb);
52179diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52180index ee32149..052d1836 100644
52181--- a/drivers/staging/octeon/ethernet.c
52182+++ b/drivers/staging/octeon/ethernet.c
52183@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52184 * since the RX tasklet also increments it.
52185 */
52186 #ifdef CONFIG_64BIT
52187- atomic64_add(rx_status.dropped_packets,
52188- (atomic64_t *)&priv->stats.rx_dropped);
52189+ atomic64_add_unchecked(rx_status.dropped_packets,
52190+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52191 #else
52192- atomic_add(rx_status.dropped_packets,
52193- (atomic_t *)&priv->stats.rx_dropped);
52194+ atomic_add_unchecked(rx_status.dropped_packets,
52195+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52196 #endif
52197 }
52198
52199diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52200index 3b476d8..f522d68 100644
52201--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52202+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52203@@ -225,7 +225,7 @@ struct hal_ops {
52204
52205 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52206 void (*hal_reset_security_engine)(struct adapter *adapter);
52207-};
52208+} __no_const;
52209
52210 enum rt_eeprom_type {
52211 EEPROM_93C46,
52212diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52213index 070cc03..6806e37 100644
52214--- a/drivers/staging/rtl8712/rtl871x_io.h
52215+++ b/drivers/staging/rtl8712/rtl871x_io.h
52216@@ -108,7 +108,7 @@ struct _io_ops {
52217 u8 *pmem);
52218 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52219 u8 *pmem);
52220-};
52221+} __no_const;
52222
52223 struct io_req {
52224 struct list_head list;
52225diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52226index 46dad63..fe4acdc 100644
52227--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52228+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52229@@ -226,7 +226,7 @@ struct visorchipset_busdev_notifiers {
52230 void (*device_resume)(ulong bus_no, ulong dev_no);
52231 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
52232 ulong *max_size);
52233-};
52234+} __no_const;
52235
52236 /* These functions live inside visorchipset, and will be called to indicate
52237 * responses to specific events (by code outside of visorchipset).
52238@@ -241,7 +241,7 @@ struct visorchipset_busdev_responders {
52239 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
52240 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
52241 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
52242-};
52243+} __no_const;
52244
52245 /** Register functions (in the bus driver) to get called by visorchipset
52246 * whenever a bus or device appears for which this service partition is
52247diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52248index 9512af6..045bf5a 100644
52249--- a/drivers/target/sbp/sbp_target.c
52250+++ b/drivers/target/sbp/sbp_target.c
52251@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52252
52253 #define SESSION_MAINTENANCE_INTERVAL HZ
52254
52255-static atomic_t login_id = ATOMIC_INIT(0);
52256+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52257
52258 static void session_maintenance_work(struct work_struct *);
52259 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52260@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52261 login->lun = se_lun;
52262 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52263 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52264- login->login_id = atomic_inc_return(&login_id);
52265+ login->login_id = atomic_inc_return_unchecked(&login_id);
52266
52267 login->tgt_agt = sbp_target_agent_register(login);
52268 if (IS_ERR(login->tgt_agt)) {
52269diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52270index 54da2a4..3dd6f57 100644
52271--- a/drivers/target/target_core_device.c
52272+++ b/drivers/target/target_core_device.c
52273@@ -1469,7 +1469,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52274 spin_lock_init(&dev->se_tmr_lock);
52275 spin_lock_init(&dev->qf_cmd_lock);
52276 sema_init(&dev->caw_sem, 1);
52277- atomic_set(&dev->dev_ordered_id, 0);
52278+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52279 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52280 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52281 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52282diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52283index ac3cbab..f0d1dd2 100644
52284--- a/drivers/target/target_core_transport.c
52285+++ b/drivers/target/target_core_transport.c
52286@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52287 * Used to determine when ORDERED commands should go from
52288 * Dormant to Active status.
52289 */
52290- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52291+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52292 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52293 cmd->se_ordered_id, cmd->sam_task_attr,
52294 dev->transport->name);
52295diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
52296index 65a98a9..d93d3a8 100644
52297--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
52298+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
52299@@ -277,8 +277,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
52300 platform_set_drvdata(pdev, priv);
52301
52302 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
52303- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52304- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52305+ pax_open_kernel();
52306+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52307+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52308+ pax_close_kernel();
52309 }
52310 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
52311 priv, &int3400_thermal_ops,
52312diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52313index d717f3d..cae1cc3e 100644
52314--- a/drivers/thermal/of-thermal.c
52315+++ b/drivers/thermal/of-thermal.c
52316@@ -31,6 +31,7 @@
52317 #include <linux/export.h>
52318 #include <linux/string.h>
52319 #include <linux/thermal.h>
52320+#include <linux/mm.h>
52321
52322 #include "thermal_core.h"
52323
52324@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52325 tz->ops = ops;
52326 tz->sensor_data = data;
52327
52328- tzd->ops->get_temp = of_thermal_get_temp;
52329- tzd->ops->get_trend = of_thermal_get_trend;
52330- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52331+ pax_open_kernel();
52332+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52333+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52334+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52335+ pax_close_kernel();
52336 mutex_unlock(&tzd->lock);
52337
52338 return tzd;
52339@@ -541,9 +544,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52340 return;
52341
52342 mutex_lock(&tzd->lock);
52343- tzd->ops->get_temp = NULL;
52344- tzd->ops->get_trend = NULL;
52345- tzd->ops->set_emul_temp = NULL;
52346+ pax_open_kernel();
52347+ *(void **)&tzd->ops->get_temp = NULL;
52348+ *(void **)&tzd->ops->get_trend = NULL;
52349+ *(void **)&tzd->ops->set_emul_temp = NULL;
52350+ pax_close_kernel();
52351
52352 tz->ops = NULL;
52353 tz->sensor_data = NULL;
52354diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52355index fd66f57..48e6376 100644
52356--- a/drivers/tty/cyclades.c
52357+++ b/drivers/tty/cyclades.c
52358@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52359 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52360 info->port.count);
52361 #endif
52362- info->port.count++;
52363+ atomic_inc(&info->port.count);
52364 #ifdef CY_DEBUG_COUNT
52365 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52366- current->pid, info->port.count);
52367+ current->pid, atomic_read(&info->port.count));
52368 #endif
52369
52370 /*
52371@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52372 for (j = 0; j < cy_card[i].nports; j++) {
52373 info = &cy_card[i].ports[j];
52374
52375- if (info->port.count) {
52376+ if (atomic_read(&info->port.count)) {
52377 /* XXX is the ldisc num worth this? */
52378 struct tty_struct *tty;
52379 struct tty_ldisc *ld;
52380diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52381index 4fcec1d..5a036f7 100644
52382--- a/drivers/tty/hvc/hvc_console.c
52383+++ b/drivers/tty/hvc/hvc_console.c
52384@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52385
52386 spin_lock_irqsave(&hp->port.lock, flags);
52387 /* Check and then increment for fast path open. */
52388- if (hp->port.count++ > 0) {
52389+ if (atomic_inc_return(&hp->port.count) > 1) {
52390 spin_unlock_irqrestore(&hp->port.lock, flags);
52391 hvc_kick();
52392 return 0;
52393@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52394
52395 spin_lock_irqsave(&hp->port.lock, flags);
52396
52397- if (--hp->port.count == 0) {
52398+ if (atomic_dec_return(&hp->port.count) == 0) {
52399 spin_unlock_irqrestore(&hp->port.lock, flags);
52400 /* We are done with the tty pointer now. */
52401 tty_port_tty_set(&hp->port, NULL);
52402@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52403 */
52404 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52405 } else {
52406- if (hp->port.count < 0)
52407+ if (atomic_read(&hp->port.count) < 0)
52408 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52409- hp->vtermno, hp->port.count);
52410+ hp->vtermno, atomic_read(&hp->port.count));
52411 spin_unlock_irqrestore(&hp->port.lock, flags);
52412 }
52413 }
52414@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52415 * open->hangup case this can be called after the final close so prevent
52416 * that from happening for now.
52417 */
52418- if (hp->port.count <= 0) {
52419+ if (atomic_read(&hp->port.count) <= 0) {
52420 spin_unlock_irqrestore(&hp->port.lock, flags);
52421 return;
52422 }
52423
52424- hp->port.count = 0;
52425+ atomic_set(&hp->port.count, 0);
52426 spin_unlock_irqrestore(&hp->port.lock, flags);
52427 tty_port_tty_set(&hp->port, NULL);
52428
52429@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52430 return -EPIPE;
52431
52432 /* FIXME what's this (unprotected) check for? */
52433- if (hp->port.count <= 0)
52434+ if (atomic_read(&hp->port.count) <= 0)
52435 return -EIO;
52436
52437 spin_lock_irqsave(&hp->lock, flags);
52438diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52439index 81ff7e1..dfb7b71 100644
52440--- a/drivers/tty/hvc/hvcs.c
52441+++ b/drivers/tty/hvc/hvcs.c
52442@@ -83,6 +83,7 @@
52443 #include <asm/hvcserver.h>
52444 #include <asm/uaccess.h>
52445 #include <asm/vio.h>
52446+#include <asm/local.h>
52447
52448 /*
52449 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52450@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52451
52452 spin_lock_irqsave(&hvcsd->lock, flags);
52453
52454- if (hvcsd->port.count > 0) {
52455+ if (atomic_read(&hvcsd->port.count) > 0) {
52456 spin_unlock_irqrestore(&hvcsd->lock, flags);
52457 printk(KERN_INFO "HVCS: vterm state unchanged. "
52458 "The hvcs device node is still in use.\n");
52459@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52460 }
52461 }
52462
52463- hvcsd->port.count = 0;
52464+ atomic_set(&hvcsd->port.count, 0);
52465 hvcsd->port.tty = tty;
52466 tty->driver_data = hvcsd;
52467
52468@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52469 unsigned long flags;
52470
52471 spin_lock_irqsave(&hvcsd->lock, flags);
52472- hvcsd->port.count++;
52473+ atomic_inc(&hvcsd->port.count);
52474 hvcsd->todo_mask |= HVCS_SCHED_READ;
52475 spin_unlock_irqrestore(&hvcsd->lock, flags);
52476
52477@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52478 hvcsd = tty->driver_data;
52479
52480 spin_lock_irqsave(&hvcsd->lock, flags);
52481- if (--hvcsd->port.count == 0) {
52482+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52483
52484 vio_disable_interrupts(hvcsd->vdev);
52485
52486@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52487
52488 free_irq(irq, hvcsd);
52489 return;
52490- } else if (hvcsd->port.count < 0) {
52491+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52492 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52493 " is missmanaged.\n",
52494- hvcsd->vdev->unit_address, hvcsd->port.count);
52495+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52496 }
52497
52498 spin_unlock_irqrestore(&hvcsd->lock, flags);
52499@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52500
52501 spin_lock_irqsave(&hvcsd->lock, flags);
52502 /* Preserve this so that we know how many kref refs to put */
52503- temp_open_count = hvcsd->port.count;
52504+ temp_open_count = atomic_read(&hvcsd->port.count);
52505
52506 /*
52507 * Don't kref put inside the spinlock because the destruction
52508@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52509 tty->driver_data = NULL;
52510 hvcsd->port.tty = NULL;
52511
52512- hvcsd->port.count = 0;
52513+ atomic_set(&hvcsd->port.count, 0);
52514
52515 /* This will drop any buffered data on the floor which is OK in a hangup
52516 * scenario. */
52517@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52518 * the middle of a write operation? This is a crummy place to do this
52519 * but we want to keep it all in the spinlock.
52520 */
52521- if (hvcsd->port.count <= 0) {
52522+ if (atomic_read(&hvcsd->port.count) <= 0) {
52523 spin_unlock_irqrestore(&hvcsd->lock, flags);
52524 return -ENODEV;
52525 }
52526@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52527 {
52528 struct hvcs_struct *hvcsd = tty->driver_data;
52529
52530- if (!hvcsd || hvcsd->port.count <= 0)
52531+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52532 return 0;
52533
52534 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52535diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52536index 4190199..06d5bfa 100644
52537--- a/drivers/tty/hvc/hvsi.c
52538+++ b/drivers/tty/hvc/hvsi.c
52539@@ -85,7 +85,7 @@ struct hvsi_struct {
52540 int n_outbuf;
52541 uint32_t vtermno;
52542 uint32_t virq;
52543- atomic_t seqno; /* HVSI packet sequence number */
52544+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52545 uint16_t mctrl;
52546 uint8_t state; /* HVSI protocol state */
52547 uint8_t flags;
52548@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52549
52550 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52551 packet.hdr.len = sizeof(struct hvsi_query_response);
52552- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52553+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52554 packet.verb = VSV_SEND_VERSION_NUMBER;
52555 packet.u.version = HVSI_VERSION;
52556 packet.query_seqno = query_seqno+1;
52557@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52558
52559 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52560 packet.hdr.len = sizeof(struct hvsi_query);
52561- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52562+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52563 packet.verb = verb;
52564
52565 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52566@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52567 int wrote;
52568
52569 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52570- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52571+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52572 packet.hdr.len = sizeof(struct hvsi_control);
52573 packet.verb = VSV_SET_MODEM_CTL;
52574 packet.mask = HVSI_TSDTR;
52575@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52576 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52577
52578 packet.hdr.type = VS_DATA_PACKET_HEADER;
52579- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52580+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52581 packet.hdr.len = count + sizeof(struct hvsi_header);
52582 memcpy(&packet.data, buf, count);
52583
52584@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
52585 struct hvsi_control packet __ALIGNED__;
52586
52587 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
52588- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52589+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52590 packet.hdr.len = 6;
52591 packet.verb = VSV_CLOSE_PROTOCOL;
52592
52593@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
52594
52595 tty_port_tty_set(&hp->port, tty);
52596 spin_lock_irqsave(&hp->lock, flags);
52597- hp->port.count++;
52598+ atomic_inc(&hp->port.count);
52599 atomic_set(&hp->seqno, 0);
52600 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
52601 spin_unlock_irqrestore(&hp->lock, flags);
52602@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52603
52604 spin_lock_irqsave(&hp->lock, flags);
52605
52606- if (--hp->port.count == 0) {
52607+ if (atomic_dec_return(&hp->port.count) == 0) {
52608 tty_port_tty_set(&hp->port, NULL);
52609 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
52610
52611@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52612
52613 spin_lock_irqsave(&hp->lock, flags);
52614 }
52615- } else if (hp->port.count < 0)
52616+ } else if (atomic_read(&hp->port.count) < 0)
52617 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
52618- hp - hvsi_ports, hp->port.count);
52619+ hp - hvsi_ports, atomic_read(&hp->port.count));
52620
52621 spin_unlock_irqrestore(&hp->lock, flags);
52622 }
52623@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
52624 tty_port_tty_set(&hp->port, NULL);
52625
52626 spin_lock_irqsave(&hp->lock, flags);
52627- hp->port.count = 0;
52628+ atomic_set(&hp->port.count, 0);
52629 hp->n_outbuf = 0;
52630 spin_unlock_irqrestore(&hp->lock, flags);
52631 }
52632diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
52633index a270f04..7c77b5d 100644
52634--- a/drivers/tty/hvc/hvsi_lib.c
52635+++ b/drivers/tty/hvc/hvsi_lib.c
52636@@ -8,7 +8,7 @@
52637
52638 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
52639 {
52640- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
52641+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
52642
52643 /* Assumes that always succeeds, works in practice */
52644 return pv->put_chars(pv->termno, (char *)packet, packet->len);
52645@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
52646
52647 /* Reset state */
52648 pv->established = 0;
52649- atomic_set(&pv->seqno, 0);
52650+ atomic_set_unchecked(&pv->seqno, 0);
52651
52652 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
52653
52654diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
52655index 345cebb..d5a1e9e 100644
52656--- a/drivers/tty/ipwireless/tty.c
52657+++ b/drivers/tty/ipwireless/tty.c
52658@@ -28,6 +28,7 @@
52659 #include <linux/tty_driver.h>
52660 #include <linux/tty_flip.h>
52661 #include <linux/uaccess.h>
52662+#include <asm/local.h>
52663
52664 #include "tty.h"
52665 #include "network.h"
52666@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52667 return -ENODEV;
52668
52669 mutex_lock(&tty->ipw_tty_mutex);
52670- if (tty->port.count == 0)
52671+ if (atomic_read(&tty->port.count) == 0)
52672 tty->tx_bytes_queued = 0;
52673
52674- tty->port.count++;
52675+ atomic_inc(&tty->port.count);
52676
52677 tty->port.tty = linux_tty;
52678 linux_tty->driver_data = tty;
52679@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52680
52681 static void do_ipw_close(struct ipw_tty *tty)
52682 {
52683- tty->port.count--;
52684-
52685- if (tty->port.count == 0) {
52686+ if (atomic_dec_return(&tty->port.count) == 0) {
52687 struct tty_struct *linux_tty = tty->port.tty;
52688
52689 if (linux_tty != NULL) {
52690@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
52691 return;
52692
52693 mutex_lock(&tty->ipw_tty_mutex);
52694- if (tty->port.count == 0) {
52695+ if (atomic_read(&tty->port.count) == 0) {
52696 mutex_unlock(&tty->ipw_tty_mutex);
52697 return;
52698 }
52699@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
52700
52701 mutex_lock(&tty->ipw_tty_mutex);
52702
52703- if (!tty->port.count) {
52704+ if (!atomic_read(&tty->port.count)) {
52705 mutex_unlock(&tty->ipw_tty_mutex);
52706 return;
52707 }
52708@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
52709 return -ENODEV;
52710
52711 mutex_lock(&tty->ipw_tty_mutex);
52712- if (!tty->port.count) {
52713+ if (!atomic_read(&tty->port.count)) {
52714 mutex_unlock(&tty->ipw_tty_mutex);
52715 return -EINVAL;
52716 }
52717@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
52718 if (!tty)
52719 return -ENODEV;
52720
52721- if (!tty->port.count)
52722+ if (!atomic_read(&tty->port.count))
52723 return -EINVAL;
52724
52725 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
52726@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
52727 if (!tty)
52728 return 0;
52729
52730- if (!tty->port.count)
52731+ if (!atomic_read(&tty->port.count))
52732 return 0;
52733
52734 return tty->tx_bytes_queued;
52735@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
52736 if (!tty)
52737 return -ENODEV;
52738
52739- if (!tty->port.count)
52740+ if (!atomic_read(&tty->port.count))
52741 return -EINVAL;
52742
52743 return get_control_lines(tty);
52744@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
52745 if (!tty)
52746 return -ENODEV;
52747
52748- if (!tty->port.count)
52749+ if (!atomic_read(&tty->port.count))
52750 return -EINVAL;
52751
52752 return set_control_lines(tty, set, clear);
52753@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
52754 if (!tty)
52755 return -ENODEV;
52756
52757- if (!tty->port.count)
52758+ if (!atomic_read(&tty->port.count))
52759 return -EINVAL;
52760
52761 /* FIXME: Exactly how is the tty object locked here .. */
52762@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
52763 * are gone */
52764 mutex_lock(&ttyj->ipw_tty_mutex);
52765 }
52766- while (ttyj->port.count)
52767+ while (atomic_read(&ttyj->port.count))
52768 do_ipw_close(ttyj);
52769 ipwireless_disassociate_network_ttys(network,
52770 ttyj->channel_idx);
52771diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
52772index 14c54e0..1efd4f2 100644
52773--- a/drivers/tty/moxa.c
52774+++ b/drivers/tty/moxa.c
52775@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
52776 }
52777
52778 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
52779- ch->port.count++;
52780+ atomic_inc(&ch->port.count);
52781 tty->driver_data = ch;
52782 tty_port_tty_set(&ch->port, tty);
52783 mutex_lock(&ch->port.mutex);
52784diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
52785index c434376..114ce13 100644
52786--- a/drivers/tty/n_gsm.c
52787+++ b/drivers/tty/n_gsm.c
52788@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
52789 spin_lock_init(&dlci->lock);
52790 mutex_init(&dlci->mutex);
52791 dlci->fifo = &dlci->_fifo;
52792- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
52793+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
52794 kfree(dlci);
52795 return NULL;
52796 }
52797@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
52798 struct gsm_dlci *dlci = tty->driver_data;
52799 struct tty_port *port = &dlci->port;
52800
52801- port->count++;
52802+ atomic_inc(&port->count);
52803 tty_port_tty_set(port, tty);
52804
52805 dlci->modem_rx = 0;
52806diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
52807index 4ddfa60..1b7e112 100644
52808--- a/drivers/tty/n_tty.c
52809+++ b/drivers/tty/n_tty.c
52810@@ -115,7 +115,7 @@ struct n_tty_data {
52811 int minimum_to_wake;
52812
52813 /* consumer-published */
52814- size_t read_tail;
52815+ size_t read_tail __intentional_overflow(-1);
52816 size_t line_start;
52817
52818 /* protected by output lock */
52819@@ -2503,6 +2503,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
52820 {
52821 *ops = tty_ldisc_N_TTY;
52822 ops->owner = NULL;
52823- ops->refcount = ops->flags = 0;
52824+ atomic_set(&ops->refcount, 0);
52825+ ops->flags = 0;
52826 }
52827 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
52828diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
52829index 6e1f150..c3ba598 100644
52830--- a/drivers/tty/pty.c
52831+++ b/drivers/tty/pty.c
52832@@ -850,8 +850,10 @@ static void __init unix98_pty_init(void)
52833 panic("Couldn't register Unix98 pts driver");
52834
52835 /* Now create the /dev/ptmx special device */
52836+ pax_open_kernel();
52837 tty_default_fops(&ptmx_fops);
52838- ptmx_fops.open = ptmx_open;
52839+ *(void **)&ptmx_fops.open = ptmx_open;
52840+ pax_close_kernel();
52841
52842 cdev_init(&ptmx_cdev, &ptmx_fops);
52843 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
52844diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
52845index 383c4c7..d408e21 100644
52846--- a/drivers/tty/rocket.c
52847+++ b/drivers/tty/rocket.c
52848@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52849 tty->driver_data = info;
52850 tty_port_tty_set(port, tty);
52851
52852- if (port->count++ == 0) {
52853+ if (atomic_inc_return(&port->count) == 1) {
52854 atomic_inc(&rp_num_ports_open);
52855
52856 #ifdef ROCKET_DEBUG_OPEN
52857@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52858 #endif
52859 }
52860 #ifdef ROCKET_DEBUG_OPEN
52861- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
52862+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
52863 #endif
52864
52865 /*
52866@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
52867 spin_unlock_irqrestore(&info->port.lock, flags);
52868 return;
52869 }
52870- if (info->port.count)
52871+ if (atomic_read(&info->port.count))
52872 atomic_dec(&rp_num_ports_open);
52873 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
52874 spin_unlock_irqrestore(&info->port.lock, flags);
52875diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
52876index aa28209..e08fb85 100644
52877--- a/drivers/tty/serial/ioc4_serial.c
52878+++ b/drivers/tty/serial/ioc4_serial.c
52879@@ -437,7 +437,7 @@ struct ioc4_soft {
52880 } is_intr_info[MAX_IOC4_INTR_ENTS];
52881
52882 /* Number of entries active in the above array */
52883- atomic_t is_num_intrs;
52884+ atomic_unchecked_t is_num_intrs;
52885 } is_intr_type[IOC4_NUM_INTR_TYPES];
52886
52887 /* is_ir_lock must be held while
52888@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
52889 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
52890 || (type == IOC4_OTHER_INTR_TYPE)));
52891
52892- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
52893+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
52894 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
52895
52896 /* Save off the lower level interrupt handler */
52897@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
52898
52899 soft = arg;
52900 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
52901- num_intrs = (int)atomic_read(
52902+ num_intrs = (int)atomic_read_unchecked(
52903 &soft->is_intr_type[intr_type].is_num_intrs);
52904
52905 this_mir = this_ir = pending_intrs(soft, intr_type);
52906diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
52907index 129dc5b..1da5bb8 100644
52908--- a/drivers/tty/serial/kgdb_nmi.c
52909+++ b/drivers/tty/serial/kgdb_nmi.c
52910@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
52911 * I/O utilities that messages sent to the console will automatically
52912 * be displayed on the dbg_io.
52913 */
52914- dbg_io_ops->is_console = true;
52915+ pax_open_kernel();
52916+ *(int *)&dbg_io_ops->is_console = true;
52917+ pax_close_kernel();
52918
52919 return 0;
52920 }
52921diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
52922index a260cde..6b2b5ce 100644
52923--- a/drivers/tty/serial/kgdboc.c
52924+++ b/drivers/tty/serial/kgdboc.c
52925@@ -24,8 +24,9 @@
52926 #define MAX_CONFIG_LEN 40
52927
52928 static struct kgdb_io kgdboc_io_ops;
52929+static struct kgdb_io kgdboc_io_ops_console;
52930
52931-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
52932+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
52933 static int configured = -1;
52934
52935 static char config[MAX_CONFIG_LEN];
52936@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
52937 kgdboc_unregister_kbd();
52938 if (configured == 1)
52939 kgdb_unregister_io_module(&kgdboc_io_ops);
52940+ else if (configured == 2)
52941+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
52942 }
52943
52944 static int configure_kgdboc(void)
52945@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
52946 int err;
52947 char *cptr = config;
52948 struct console *cons;
52949+ int is_console = 0;
52950
52951 err = kgdboc_option_setup(config);
52952 if (err || !strlen(config) || isspace(config[0]))
52953 goto noconfig;
52954
52955 err = -ENODEV;
52956- kgdboc_io_ops.is_console = 0;
52957 kgdb_tty_driver = NULL;
52958
52959 kgdboc_use_kms = 0;
52960@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
52961 int idx;
52962 if (cons->device && cons->device(cons, &idx) == p &&
52963 idx == tty_line) {
52964- kgdboc_io_ops.is_console = 1;
52965+ is_console = 1;
52966 break;
52967 }
52968 cons = cons->next;
52969@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
52970 kgdb_tty_line = tty_line;
52971
52972 do_register:
52973- err = kgdb_register_io_module(&kgdboc_io_ops);
52974+ if (is_console) {
52975+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
52976+ configured = 2;
52977+ } else {
52978+ err = kgdb_register_io_module(&kgdboc_io_ops);
52979+ configured = 1;
52980+ }
52981 if (err)
52982 goto noconfig;
52983
52984@@ -205,8 +214,6 @@ do_register:
52985 if (err)
52986 goto nmi_con_failed;
52987
52988- configured = 1;
52989-
52990 return 0;
52991
52992 nmi_con_failed:
52993@@ -223,7 +230,7 @@ noconfig:
52994 static int __init init_kgdboc(void)
52995 {
52996 /* Already configured? */
52997- if (configured == 1)
52998+ if (configured >= 1)
52999 return 0;
53000
53001 return configure_kgdboc();
53002@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
53003 if (config[len - 1] == '\n')
53004 config[len - 1] = '\0';
53005
53006- if (configured == 1)
53007+ if (configured >= 1)
53008 cleanup_kgdboc();
53009
53010 /* Go and configure with the new params. */
53011@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
53012 .post_exception = kgdboc_post_exp_handler,
53013 };
53014
53015+static struct kgdb_io kgdboc_io_ops_console = {
53016+ .name = "kgdboc",
53017+ .read_char = kgdboc_get_char,
53018+ .write_char = kgdboc_put_char,
53019+ .pre_exception = kgdboc_pre_exp_handler,
53020+ .post_exception = kgdboc_post_exp_handler,
53021+ .is_console = 1
53022+};
53023+
53024 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
53025 /* This is only available if kgdboc is a built in for early debugging */
53026 static int __init kgdboc_early_init(char *opt)
53027diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
53028index c88b522..e763029 100644
53029--- a/drivers/tty/serial/msm_serial.c
53030+++ b/drivers/tty/serial/msm_serial.c
53031@@ -1028,7 +1028,7 @@ static struct uart_driver msm_uart_driver = {
53032 .cons = MSM_CONSOLE,
53033 };
53034
53035-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
53036+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
53037
53038 static const struct of_device_id msm_uartdm_table[] = {
53039 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
53040@@ -1052,7 +1052,7 @@ static int msm_serial_probe(struct platform_device *pdev)
53041 line = pdev->id;
53042
53043 if (line < 0)
53044- line = atomic_inc_return(&msm_uart_next_id) - 1;
53045+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
53046
53047 if (unlikely(line < 0 || line >= UART_NR))
53048 return -ENXIO;
53049diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
53050index 107e807..d4a02fa 100644
53051--- a/drivers/tty/serial/samsung.c
53052+++ b/drivers/tty/serial/samsung.c
53053@@ -480,11 +480,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
53054 }
53055 }
53056
53057+static int s3c64xx_serial_startup(struct uart_port *port);
53058 static int s3c24xx_serial_startup(struct uart_port *port)
53059 {
53060 struct s3c24xx_uart_port *ourport = to_ourport(port);
53061 int ret;
53062
53063+ /* Startup sequence is different for s3c64xx and higher SoC's */
53064+ if (s3c24xx_serial_has_interrupt_mask(port))
53065+ return s3c64xx_serial_startup(port);
53066+
53067 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
53068 port, (unsigned long long)port->mapbase, port->membase);
53069
53070@@ -1169,10 +1174,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
53071 /* setup info for port */
53072 port->dev = &platdev->dev;
53073
53074- /* Startup sequence is different for s3c64xx and higher SoC's */
53075- if (s3c24xx_serial_has_interrupt_mask(port))
53076- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
53077-
53078 port->uartclk = 1;
53079
53080 if (cfg->uart_flags & UPF_CONS_FLOW) {
53081diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53082index 984605b..e538330 100644
53083--- a/drivers/tty/serial/serial_core.c
53084+++ b/drivers/tty/serial/serial_core.c
53085@@ -1396,7 +1396,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53086 state = drv->state + tty->index;
53087 port = &state->port;
53088 spin_lock_irq(&port->lock);
53089- --port->count;
53090+ atomic_dec(&port->count);
53091 spin_unlock_irq(&port->lock);
53092 return;
53093 }
53094@@ -1406,7 +1406,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53095
53096 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
53097
53098- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53099+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53100 return;
53101
53102 /*
53103@@ -1530,7 +1530,7 @@ static void uart_hangup(struct tty_struct *tty)
53104 uart_flush_buffer(tty);
53105 uart_shutdown(tty, state);
53106 spin_lock_irqsave(&port->lock, flags);
53107- port->count = 0;
53108+ atomic_set(&port->count, 0);
53109 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53110 spin_unlock_irqrestore(&port->lock, flags);
53111 tty_port_tty_set(port, NULL);
53112@@ -1617,7 +1617,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53113 pr_debug("uart_open(%d) called\n", line);
53114
53115 spin_lock_irq(&port->lock);
53116- ++port->count;
53117+ atomic_inc(&port->count);
53118 spin_unlock_irq(&port->lock);
53119
53120 /*
53121diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53122index b799170..87dafd5 100644
53123--- a/drivers/tty/synclink.c
53124+++ b/drivers/tty/synclink.c
53125@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53126
53127 if (debug_level >= DEBUG_LEVEL_INFO)
53128 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53129- __FILE__,__LINE__, info->device_name, info->port.count);
53130+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53131
53132 if (tty_port_close_start(&info->port, tty, filp) == 0)
53133 goto cleanup;
53134@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53135 cleanup:
53136 if (debug_level >= DEBUG_LEVEL_INFO)
53137 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53138- tty->driver->name, info->port.count);
53139+ tty->driver->name, atomic_read(&info->port.count));
53140
53141 } /* end of mgsl_close() */
53142
53143@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53144
53145 mgsl_flush_buffer(tty);
53146 shutdown(info);
53147-
53148- info->port.count = 0;
53149+
53150+ atomic_set(&info->port.count, 0);
53151 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53152 info->port.tty = NULL;
53153
53154@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53155
53156 if (debug_level >= DEBUG_LEVEL_INFO)
53157 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53158- __FILE__,__LINE__, tty->driver->name, port->count );
53159+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53160
53161 spin_lock_irqsave(&info->irq_spinlock, flags);
53162- port->count--;
53163+ atomic_dec(&port->count);
53164 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53165 port->blocked_open++;
53166
53167@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53168
53169 if (debug_level >= DEBUG_LEVEL_INFO)
53170 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53171- __FILE__,__LINE__, tty->driver->name, port->count );
53172+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53173
53174 tty_unlock(tty);
53175 schedule();
53176@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53177
53178 /* FIXME: Racy on hangup during close wait */
53179 if (!tty_hung_up_p(filp))
53180- port->count++;
53181+ atomic_inc(&port->count);
53182 port->blocked_open--;
53183
53184 if (debug_level >= DEBUG_LEVEL_INFO)
53185 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53186- __FILE__,__LINE__, tty->driver->name, port->count );
53187+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53188
53189 if (!retval)
53190 port->flags |= ASYNC_NORMAL_ACTIVE;
53191@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53192
53193 if (debug_level >= DEBUG_LEVEL_INFO)
53194 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53195- __FILE__,__LINE__,tty->driver->name, info->port.count);
53196+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53197
53198 /* If port is closing, signal caller to try again */
53199 if (info->port.flags & ASYNC_CLOSING){
53200@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53201 spin_unlock_irqrestore(&info->netlock, flags);
53202 goto cleanup;
53203 }
53204- info->port.count++;
53205+ atomic_inc(&info->port.count);
53206 spin_unlock_irqrestore(&info->netlock, flags);
53207
53208- if (info->port.count == 1) {
53209+ if (atomic_read(&info->port.count) == 1) {
53210 /* 1st open on this device, init hardware */
53211 retval = startup(info);
53212 if (retval < 0)
53213@@ -3442,8 +3442,8 @@ cleanup:
53214 if (retval) {
53215 if (tty->count == 1)
53216 info->port.tty = NULL; /* tty layer will release tty struct */
53217- if(info->port.count)
53218- info->port.count--;
53219+ if (atomic_read(&info->port.count))
53220+ atomic_dec(&info->port.count);
53221 }
53222
53223 return retval;
53224@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53225 unsigned short new_crctype;
53226
53227 /* return error if TTY interface open */
53228- if (info->port.count)
53229+ if (atomic_read(&info->port.count))
53230 return -EBUSY;
53231
53232 switch (encoding)
53233@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53234
53235 /* arbitrate between network and tty opens */
53236 spin_lock_irqsave(&info->netlock, flags);
53237- if (info->port.count != 0 || info->netcount != 0) {
53238+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53239 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53240 spin_unlock_irqrestore(&info->netlock, flags);
53241 return -EBUSY;
53242@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53243 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53244
53245 /* return error if TTY interface open */
53246- if (info->port.count)
53247+ if (atomic_read(&info->port.count))
53248 return -EBUSY;
53249
53250 if (cmd != SIOCWANDEV)
53251diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53252index 0e8c39b..e0cb171 100644
53253--- a/drivers/tty/synclink_gt.c
53254+++ b/drivers/tty/synclink_gt.c
53255@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53256 tty->driver_data = info;
53257 info->port.tty = tty;
53258
53259- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53260+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53261
53262 /* If port is closing, signal caller to try again */
53263 if (info->port.flags & ASYNC_CLOSING){
53264@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53265 mutex_unlock(&info->port.mutex);
53266 goto cleanup;
53267 }
53268- info->port.count++;
53269+ atomic_inc(&info->port.count);
53270 spin_unlock_irqrestore(&info->netlock, flags);
53271
53272- if (info->port.count == 1) {
53273+ if (atomic_read(&info->port.count) == 1) {
53274 /* 1st open on this device, init hardware */
53275 retval = startup(info);
53276 if (retval < 0) {
53277@@ -715,8 +715,8 @@ cleanup:
53278 if (retval) {
53279 if (tty->count == 1)
53280 info->port.tty = NULL; /* tty layer will release tty struct */
53281- if(info->port.count)
53282- info->port.count--;
53283+ if(atomic_read(&info->port.count))
53284+ atomic_dec(&info->port.count);
53285 }
53286
53287 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53288@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53289
53290 if (sanity_check(info, tty->name, "close"))
53291 return;
53292- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53293+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53294
53295 if (tty_port_close_start(&info->port, tty, filp) == 0)
53296 goto cleanup;
53297@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53298 tty_port_close_end(&info->port, tty);
53299 info->port.tty = NULL;
53300 cleanup:
53301- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53302+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53303 }
53304
53305 static void hangup(struct tty_struct *tty)
53306@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53307 shutdown(info);
53308
53309 spin_lock_irqsave(&info->port.lock, flags);
53310- info->port.count = 0;
53311+ atomic_set(&info->port.count, 0);
53312 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53313 info->port.tty = NULL;
53314 spin_unlock_irqrestore(&info->port.lock, flags);
53315@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53316 unsigned short new_crctype;
53317
53318 /* return error if TTY interface open */
53319- if (info->port.count)
53320+ if (atomic_read(&info->port.count))
53321 return -EBUSY;
53322
53323 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53324@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53325
53326 /* arbitrate between network and tty opens */
53327 spin_lock_irqsave(&info->netlock, flags);
53328- if (info->port.count != 0 || info->netcount != 0) {
53329+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53330 DBGINFO(("%s hdlc_open busy\n", dev->name));
53331 spin_unlock_irqrestore(&info->netlock, flags);
53332 return -EBUSY;
53333@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53334 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53335
53336 /* return error if TTY interface open */
53337- if (info->port.count)
53338+ if (atomic_read(&info->port.count))
53339 return -EBUSY;
53340
53341 if (cmd != SIOCWANDEV)
53342@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53343 if (port == NULL)
53344 continue;
53345 spin_lock(&port->lock);
53346- if ((port->port.count || port->netcount) &&
53347+ if ((atomic_read(&port->port.count) || port->netcount) &&
53348 port->pending_bh && !port->bh_running &&
53349 !port->bh_requested) {
53350 DBGISR(("%s bh queued\n", port->device_name));
53351@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53352 add_wait_queue(&port->open_wait, &wait);
53353
53354 spin_lock_irqsave(&info->lock, flags);
53355- port->count--;
53356+ atomic_dec(&port->count);
53357 spin_unlock_irqrestore(&info->lock, flags);
53358 port->blocked_open++;
53359
53360@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53361 remove_wait_queue(&port->open_wait, &wait);
53362
53363 if (!tty_hung_up_p(filp))
53364- port->count++;
53365+ atomic_inc(&port->count);
53366 port->blocked_open--;
53367
53368 if (!retval)
53369diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53370index c3f9091..abe4601 100644
53371--- a/drivers/tty/synclinkmp.c
53372+++ b/drivers/tty/synclinkmp.c
53373@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53374
53375 if (debug_level >= DEBUG_LEVEL_INFO)
53376 printk("%s(%d):%s open(), old ref count = %d\n",
53377- __FILE__,__LINE__,tty->driver->name, info->port.count);
53378+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53379
53380 /* If port is closing, signal caller to try again */
53381 if (info->port.flags & ASYNC_CLOSING){
53382@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53383 spin_unlock_irqrestore(&info->netlock, flags);
53384 goto cleanup;
53385 }
53386- info->port.count++;
53387+ atomic_inc(&info->port.count);
53388 spin_unlock_irqrestore(&info->netlock, flags);
53389
53390- if (info->port.count == 1) {
53391+ if (atomic_read(&info->port.count) == 1) {
53392 /* 1st open on this device, init hardware */
53393 retval = startup(info);
53394 if (retval < 0)
53395@@ -796,8 +796,8 @@ cleanup:
53396 if (retval) {
53397 if (tty->count == 1)
53398 info->port.tty = NULL; /* tty layer will release tty struct */
53399- if(info->port.count)
53400- info->port.count--;
53401+ if(atomic_read(&info->port.count))
53402+ atomic_dec(&info->port.count);
53403 }
53404
53405 return retval;
53406@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53407
53408 if (debug_level >= DEBUG_LEVEL_INFO)
53409 printk("%s(%d):%s close() entry, count=%d\n",
53410- __FILE__,__LINE__, info->device_name, info->port.count);
53411+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53412
53413 if (tty_port_close_start(&info->port, tty, filp) == 0)
53414 goto cleanup;
53415@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53416 cleanup:
53417 if (debug_level >= DEBUG_LEVEL_INFO)
53418 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53419- tty->driver->name, info->port.count);
53420+ tty->driver->name, atomic_read(&info->port.count));
53421 }
53422
53423 /* Called by tty_hangup() when a hangup is signaled.
53424@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53425 shutdown(info);
53426
53427 spin_lock_irqsave(&info->port.lock, flags);
53428- info->port.count = 0;
53429+ atomic_set(&info->port.count, 0);
53430 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53431 info->port.tty = NULL;
53432 spin_unlock_irqrestore(&info->port.lock, flags);
53433@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53434 unsigned short new_crctype;
53435
53436 /* return error if TTY interface open */
53437- if (info->port.count)
53438+ if (atomic_read(&info->port.count))
53439 return -EBUSY;
53440
53441 switch (encoding)
53442@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53443
53444 /* arbitrate between network and tty opens */
53445 spin_lock_irqsave(&info->netlock, flags);
53446- if (info->port.count != 0 || info->netcount != 0) {
53447+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53448 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53449 spin_unlock_irqrestore(&info->netlock, flags);
53450 return -EBUSY;
53451@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53452 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53453
53454 /* return error if TTY interface open */
53455- if (info->port.count)
53456+ if (atomic_read(&info->port.count))
53457 return -EBUSY;
53458
53459 if (cmd != SIOCWANDEV)
53460@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53461 * do not request bottom half processing if the
53462 * device is not open in a normal mode.
53463 */
53464- if ( port && (port->port.count || port->netcount) &&
53465+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53466 port->pending_bh && !port->bh_running &&
53467 !port->bh_requested ) {
53468 if ( debug_level >= DEBUG_LEVEL_ISR )
53469@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53470
53471 if (debug_level >= DEBUG_LEVEL_INFO)
53472 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53473- __FILE__,__LINE__, tty->driver->name, port->count );
53474+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53475
53476 spin_lock_irqsave(&info->lock, flags);
53477- port->count--;
53478+ atomic_dec(&port->count);
53479 spin_unlock_irqrestore(&info->lock, flags);
53480 port->blocked_open++;
53481
53482@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53483
53484 if (debug_level >= DEBUG_LEVEL_INFO)
53485 printk("%s(%d):%s block_til_ready() count=%d\n",
53486- __FILE__,__LINE__, tty->driver->name, port->count );
53487+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53488
53489 tty_unlock(tty);
53490 schedule();
53491@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53492 set_current_state(TASK_RUNNING);
53493 remove_wait_queue(&port->open_wait, &wait);
53494 if (!tty_hung_up_p(filp))
53495- port->count++;
53496+ atomic_inc(&port->count);
53497 port->blocked_open--;
53498
53499 if (debug_level >= DEBUG_LEVEL_INFO)
53500 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53501- __FILE__,__LINE__, tty->driver->name, port->count );
53502+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53503
53504 if (!retval)
53505 port->flags |= ASYNC_NORMAL_ACTIVE;
53506diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53507index 42bad18..447d7a2 100644
53508--- a/drivers/tty/sysrq.c
53509+++ b/drivers/tty/sysrq.c
53510@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53511 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53512 size_t count, loff_t *ppos)
53513 {
53514- if (count) {
53515+ if (count && capable(CAP_SYS_ADMIN)) {
53516 char c;
53517
53518 if (get_user(c, buf))
53519diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53520index 2bb4dfc..a7f6e86 100644
53521--- a/drivers/tty/tty_io.c
53522+++ b/drivers/tty/tty_io.c
53523@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
53524
53525 void tty_default_fops(struct file_operations *fops)
53526 {
53527- *fops = tty_fops;
53528+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53529 }
53530
53531 /*
53532diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53533index 3737f55..7cef448 100644
53534--- a/drivers/tty/tty_ldisc.c
53535+++ b/drivers/tty/tty_ldisc.c
53536@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53537 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53538 tty_ldiscs[disc] = new_ldisc;
53539 new_ldisc->num = disc;
53540- new_ldisc->refcount = 0;
53541+ atomic_set(&new_ldisc->refcount, 0);
53542 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53543
53544 return ret;
53545@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53546 return -EINVAL;
53547
53548 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53549- if (tty_ldiscs[disc]->refcount)
53550+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53551 ret = -EBUSY;
53552 else
53553 tty_ldiscs[disc] = NULL;
53554@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53555 if (ldops) {
53556 ret = ERR_PTR(-EAGAIN);
53557 if (try_module_get(ldops->owner)) {
53558- ldops->refcount++;
53559+ atomic_inc(&ldops->refcount);
53560 ret = ldops;
53561 }
53562 }
53563@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53564 unsigned long flags;
53565
53566 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53567- ldops->refcount--;
53568+ atomic_dec(&ldops->refcount);
53569 module_put(ldops->owner);
53570 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53571 }
53572diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53573index 40b31835..94d92ae 100644
53574--- a/drivers/tty/tty_port.c
53575+++ b/drivers/tty/tty_port.c
53576@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
53577 unsigned long flags;
53578
53579 spin_lock_irqsave(&port->lock, flags);
53580- port->count = 0;
53581+ atomic_set(&port->count, 0);
53582 port->flags &= ~ASYNC_NORMAL_ACTIVE;
53583 tty = port->tty;
53584 if (tty)
53585@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53586
53587 /* The port lock protects the port counts */
53588 spin_lock_irqsave(&port->lock, flags);
53589- port->count--;
53590+ atomic_dec(&port->count);
53591 port->blocked_open++;
53592 spin_unlock_irqrestore(&port->lock, flags);
53593
53594@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53595 we must not mess that up further */
53596 spin_lock_irqsave(&port->lock, flags);
53597 if (!tty_hung_up_p(filp))
53598- port->count++;
53599+ atomic_inc(&port->count);
53600 port->blocked_open--;
53601 if (retval == 0)
53602 port->flags |= ASYNC_NORMAL_ACTIVE;
53603@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
53604 return 0;
53605
53606 spin_lock_irqsave(&port->lock, flags);
53607- if (tty->count == 1 && port->count != 1) {
53608+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
53609 printk(KERN_WARNING
53610 "tty_port_close_start: tty->count = 1 port count = %d.\n",
53611- port->count);
53612- port->count = 1;
53613+ atomic_read(&port->count));
53614+ atomic_set(&port->count, 1);
53615 }
53616- if (--port->count < 0) {
53617+ if (atomic_dec_return(&port->count) < 0) {
53618 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
53619- port->count);
53620- port->count = 0;
53621+ atomic_read(&port->count));
53622+ atomic_set(&port->count, 0);
53623 }
53624
53625- if (port->count) {
53626+ if (atomic_read(&port->count)) {
53627 spin_unlock_irqrestore(&port->lock, flags);
53628 return 0;
53629 }
53630@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
53631 struct file *filp)
53632 {
53633 spin_lock_irq(&port->lock);
53634- ++port->count;
53635+ atomic_inc(&port->count);
53636 spin_unlock_irq(&port->lock);
53637 tty_port_tty_set(port, tty);
53638
53639diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
53640index 8a89f6e..50b32af 100644
53641--- a/drivers/tty/vt/keyboard.c
53642+++ b/drivers/tty/vt/keyboard.c
53643@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
53644 kbd->kbdmode == VC_OFF) &&
53645 value != KVAL(K_SAK))
53646 return; /* SAK is allowed even in raw mode */
53647+
53648+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53649+ {
53650+ void *func = fn_handler[value];
53651+ if (func == fn_show_state || func == fn_show_ptregs ||
53652+ func == fn_show_mem)
53653+ return;
53654+ }
53655+#endif
53656+
53657 fn_handler[value](vc);
53658 }
53659
53660@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53661 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
53662 return -EFAULT;
53663
53664- if (!capable(CAP_SYS_TTY_CONFIG))
53665- perm = 0;
53666-
53667 switch (cmd) {
53668 case KDGKBENT:
53669 /* Ensure another thread doesn't free it under us */
53670@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53671 spin_unlock_irqrestore(&kbd_event_lock, flags);
53672 return put_user(val, &user_kbe->kb_value);
53673 case KDSKBENT:
53674+ if (!capable(CAP_SYS_TTY_CONFIG))
53675+ perm = 0;
53676+
53677 if (!perm)
53678 return -EPERM;
53679 if (!i && v == K_NOSUCHMAP) {
53680@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53681 int i, j, k;
53682 int ret;
53683
53684- if (!capable(CAP_SYS_TTY_CONFIG))
53685- perm = 0;
53686-
53687 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
53688 if (!kbs) {
53689 ret = -ENOMEM;
53690@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53691 kfree(kbs);
53692 return ((p && *p) ? -EOVERFLOW : 0);
53693 case KDSKBSENT:
53694+ if (!capable(CAP_SYS_TTY_CONFIG))
53695+ perm = 0;
53696+
53697 if (!perm) {
53698 ret = -EPERM;
53699 goto reterr;
53700diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
53701index 6276f13..84f2449 100644
53702--- a/drivers/uio/uio.c
53703+++ b/drivers/uio/uio.c
53704@@ -25,6 +25,7 @@
53705 #include <linux/kobject.h>
53706 #include <linux/cdev.h>
53707 #include <linux/uio_driver.h>
53708+#include <asm/local.h>
53709
53710 #define UIO_MAX_DEVICES (1U << MINORBITS)
53711
53712@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
53713 struct device_attribute *attr, char *buf)
53714 {
53715 struct uio_device *idev = dev_get_drvdata(dev);
53716- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
53717+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
53718 }
53719 static DEVICE_ATTR_RO(event);
53720
53721@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
53722 {
53723 struct uio_device *idev = info->uio_dev;
53724
53725- atomic_inc(&idev->event);
53726+ atomic_inc_unchecked(&idev->event);
53727 wake_up_interruptible(&idev->wait);
53728 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
53729 }
53730@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
53731 }
53732
53733 listener->dev = idev;
53734- listener->event_count = atomic_read(&idev->event);
53735+ listener->event_count = atomic_read_unchecked(&idev->event);
53736 filep->private_data = listener;
53737
53738 if (idev->info->open) {
53739@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
53740 return -EIO;
53741
53742 poll_wait(filep, &idev->wait, wait);
53743- if (listener->event_count != atomic_read(&idev->event))
53744+ if (listener->event_count != atomic_read_unchecked(&idev->event))
53745 return POLLIN | POLLRDNORM;
53746 return 0;
53747 }
53748@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
53749 do {
53750 set_current_state(TASK_INTERRUPTIBLE);
53751
53752- event_count = atomic_read(&idev->event);
53753+ event_count = atomic_read_unchecked(&idev->event);
53754 if (event_count != listener->event_count) {
53755 if (copy_to_user(buf, &event_count, count))
53756 retval = -EFAULT;
53757@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
53758 static int uio_find_mem_index(struct vm_area_struct *vma)
53759 {
53760 struct uio_device *idev = vma->vm_private_data;
53761+ unsigned long size;
53762
53763 if (vma->vm_pgoff < MAX_UIO_MAPS) {
53764- if (idev->info->mem[vma->vm_pgoff].size == 0)
53765+ size = idev->info->mem[vma->vm_pgoff].size;
53766+ if (size == 0)
53767+ return -1;
53768+ if (vma->vm_end - vma->vm_start > size)
53769 return -1;
53770 return (int)vma->vm_pgoff;
53771 }
53772@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
53773 idev->owner = owner;
53774 idev->info = info;
53775 init_waitqueue_head(&idev->wait);
53776- atomic_set(&idev->event, 0);
53777+ atomic_set_unchecked(&idev->event, 0);
53778
53779 ret = uio_get_minor(idev);
53780 if (ret)
53781diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
53782index 813d4d3..a71934f 100644
53783--- a/drivers/usb/atm/cxacru.c
53784+++ b/drivers/usb/atm/cxacru.c
53785@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
53786 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
53787 if (ret < 2)
53788 return -EINVAL;
53789- if (index < 0 || index > 0x7f)
53790+ if (index > 0x7f)
53791 return -EINVAL;
53792 pos += tmp;
53793
53794diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
53795index dada014..1d0d517 100644
53796--- a/drivers/usb/atm/usbatm.c
53797+++ b/drivers/usb/atm/usbatm.c
53798@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53799 if (printk_ratelimit())
53800 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
53801 __func__, vpi, vci);
53802- atomic_inc(&vcc->stats->rx_err);
53803+ atomic_inc_unchecked(&vcc->stats->rx_err);
53804 return;
53805 }
53806
53807@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53808 if (length > ATM_MAX_AAL5_PDU) {
53809 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
53810 __func__, length, vcc);
53811- atomic_inc(&vcc->stats->rx_err);
53812+ atomic_inc_unchecked(&vcc->stats->rx_err);
53813 goto out;
53814 }
53815
53816@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53817 if (sarb->len < pdu_length) {
53818 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
53819 __func__, pdu_length, sarb->len, vcc);
53820- atomic_inc(&vcc->stats->rx_err);
53821+ atomic_inc_unchecked(&vcc->stats->rx_err);
53822 goto out;
53823 }
53824
53825 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
53826 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
53827 __func__, vcc);
53828- atomic_inc(&vcc->stats->rx_err);
53829+ atomic_inc_unchecked(&vcc->stats->rx_err);
53830 goto out;
53831 }
53832
53833@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53834 if (printk_ratelimit())
53835 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
53836 __func__, length);
53837- atomic_inc(&vcc->stats->rx_drop);
53838+ atomic_inc_unchecked(&vcc->stats->rx_drop);
53839 goto out;
53840 }
53841
53842@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53843
53844 vcc->push(vcc, skb);
53845
53846- atomic_inc(&vcc->stats->rx);
53847+ atomic_inc_unchecked(&vcc->stats->rx);
53848 out:
53849 skb_trim(sarb, 0);
53850 }
53851@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
53852 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
53853
53854 usbatm_pop(vcc, skb);
53855- atomic_inc(&vcc->stats->tx);
53856+ atomic_inc_unchecked(&vcc->stats->tx);
53857
53858 skb = skb_dequeue(&instance->sndqueue);
53859 }
53860@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
53861 if (!left--)
53862 return sprintf(page,
53863 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
53864- atomic_read(&atm_dev->stats.aal5.tx),
53865- atomic_read(&atm_dev->stats.aal5.tx_err),
53866- atomic_read(&atm_dev->stats.aal5.rx),
53867- atomic_read(&atm_dev->stats.aal5.rx_err),
53868- atomic_read(&atm_dev->stats.aal5.rx_drop));
53869+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
53870+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
53871+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
53872+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
53873+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
53874
53875 if (!left--) {
53876 if (instance->disconnected)
53877diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
53878index 2a3bbdf..91d72cf 100644
53879--- a/drivers/usb/core/devices.c
53880+++ b/drivers/usb/core/devices.c
53881@@ -126,7 +126,7 @@ static const char format_endpt[] =
53882 * time it gets called.
53883 */
53884 static struct device_connect_event {
53885- atomic_t count;
53886+ atomic_unchecked_t count;
53887 wait_queue_head_t wait;
53888 } device_event = {
53889 .count = ATOMIC_INIT(1),
53890@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
53891
53892 void usbfs_conn_disc_event(void)
53893 {
53894- atomic_add(2, &device_event.count);
53895+ atomic_add_unchecked(2, &device_event.count);
53896 wake_up(&device_event.wait);
53897 }
53898
53899@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
53900
53901 poll_wait(file, &device_event.wait, wait);
53902
53903- event_count = atomic_read(&device_event.count);
53904+ event_count = atomic_read_unchecked(&device_event.count);
53905 if (file->f_version != event_count) {
53906 file->f_version = event_count;
53907 return POLLIN | POLLRDNORM;
53908diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
53909index e500243..401300f 100644
53910--- a/drivers/usb/core/devio.c
53911+++ b/drivers/usb/core/devio.c
53912@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
53913 struct usb_dev_state *ps = file->private_data;
53914 struct usb_device *dev = ps->dev;
53915 ssize_t ret = 0;
53916- unsigned len;
53917+ size_t len;
53918 loff_t pos;
53919 int i;
53920
53921@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
53922 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
53923 struct usb_config_descriptor *config =
53924 (struct usb_config_descriptor *)dev->rawdescriptors[i];
53925- unsigned int length = le16_to_cpu(config->wTotalLength);
53926+ size_t length = le16_to_cpu(config->wTotalLength);
53927
53928 if (*ppos < pos + length) {
53929
53930 /* The descriptor may claim to be longer than it
53931 * really is. Here is the actual allocated length. */
53932- unsigned alloclen =
53933+ size_t alloclen =
53934 le16_to_cpu(dev->config[i].desc.wTotalLength);
53935
53936- len = length - (*ppos - pos);
53937+ len = length + pos - *ppos;
53938 if (len > nbytes)
53939 len = nbytes;
53940
53941 /* Simply don't write (skip over) unallocated parts */
53942 if (alloclen > (*ppos - pos)) {
53943- alloclen -= (*ppos - pos);
53944+ alloclen = alloclen + pos - *ppos;
53945 if (copy_to_user(buf,
53946 dev->rawdescriptors[i] + (*ppos - pos),
53947 min(len, alloclen))) {
53948diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
53949index 45a915c..09f9735 100644
53950--- a/drivers/usb/core/hcd.c
53951+++ b/drivers/usb/core/hcd.c
53952@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
53953 */
53954 usb_get_urb(urb);
53955 atomic_inc(&urb->use_count);
53956- atomic_inc(&urb->dev->urbnum);
53957+ atomic_inc_unchecked(&urb->dev->urbnum);
53958 usbmon_urb_submit(&hcd->self, urb);
53959
53960 /* NOTE requirements on root-hub callers (usbfs and the hub
53961@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
53962 urb->hcpriv = NULL;
53963 INIT_LIST_HEAD(&urb->urb_list);
53964 atomic_dec(&urb->use_count);
53965- atomic_dec(&urb->dev->urbnum);
53966+ atomic_dec_unchecked(&urb->dev->urbnum);
53967 if (atomic_read(&urb->reject))
53968 wake_up(&usb_kill_urb_queue);
53969 usb_put_urb(urb);
53970diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
53971index b4bfa3a..008f926 100644
53972--- a/drivers/usb/core/hub.c
53973+++ b/drivers/usb/core/hub.c
53974@@ -26,6 +26,7 @@
53975 #include <linux/mutex.h>
53976 #include <linux/random.h>
53977 #include <linux/pm_qos.h>
53978+#include <linux/grsecurity.h>
53979
53980 #include <asm/uaccess.h>
53981 #include <asm/byteorder.h>
53982@@ -4664,6 +4665,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
53983 goto done;
53984 return;
53985 }
53986+
53987+ if (gr_handle_new_usb())
53988+ goto done;
53989+
53990 if (hub_is_superspeed(hub->hdev))
53991 unit_load = 150;
53992 else
53993diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
53994index f368d20..0c30ac5 100644
53995--- a/drivers/usb/core/message.c
53996+++ b/drivers/usb/core/message.c
53997@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
53998 * Return: If successful, the number of bytes transferred. Otherwise, a negative
53999 * error number.
54000 */
54001-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54002+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54003 __u8 requesttype, __u16 value, __u16 index, void *data,
54004 __u16 size, int timeout)
54005 {
54006@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
54007 * If successful, 0. Otherwise a negative error number. The number of actual
54008 * bytes transferred will be stored in the @actual_length parameter.
54009 */
54010-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54011+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54012 void *data, int len, int *actual_length, int timeout)
54013 {
54014 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
54015@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
54016 * bytes transferred will be stored in the @actual_length parameter.
54017 *
54018 */
54019-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54020+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54021 void *data, int len, int *actual_length, int timeout)
54022 {
54023 struct urb *urb;
54024diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
54025index d269738..7340cd7 100644
54026--- a/drivers/usb/core/sysfs.c
54027+++ b/drivers/usb/core/sysfs.c
54028@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
54029 struct usb_device *udev;
54030
54031 udev = to_usb_device(dev);
54032- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
54033+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
54034 }
54035 static DEVICE_ATTR_RO(urbnum);
54036
54037diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
54038index b1fb9ae..4224885 100644
54039--- a/drivers/usb/core/usb.c
54040+++ b/drivers/usb/core/usb.c
54041@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
54042 set_dev_node(&dev->dev, dev_to_node(bus->controller));
54043 dev->state = USB_STATE_ATTACHED;
54044 dev->lpm_disable_count = 1;
54045- atomic_set(&dev->urbnum, 0);
54046+ atomic_set_unchecked(&dev->urbnum, 0);
54047
54048 INIT_LIST_HEAD(&dev->ep0.urb_list);
54049 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
54050diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
54051index 8cfc319..4868255 100644
54052--- a/drivers/usb/early/ehci-dbgp.c
54053+++ b/drivers/usb/early/ehci-dbgp.c
54054@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
54055
54056 #ifdef CONFIG_KGDB
54057 static struct kgdb_io kgdbdbgp_io_ops;
54058-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
54059+static struct kgdb_io kgdbdbgp_io_ops_console;
54060+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
54061 #else
54062 #define dbgp_kgdb_mode (0)
54063 #endif
54064@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54065 .write_char = kgdbdbgp_write_char,
54066 };
54067
54068+static struct kgdb_io kgdbdbgp_io_ops_console = {
54069+ .name = "kgdbdbgp",
54070+ .read_char = kgdbdbgp_read_char,
54071+ .write_char = kgdbdbgp_write_char,
54072+ .is_console = 1
54073+};
54074+
54075 static int kgdbdbgp_wait_time;
54076
54077 static int __init kgdbdbgp_parse_config(char *str)
54078@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54079 ptr++;
54080 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54081 }
54082- kgdb_register_io_module(&kgdbdbgp_io_ops);
54083- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54084+ if (early_dbgp_console.index != -1)
54085+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54086+ else
54087+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54088
54089 return 0;
54090 }
54091diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54092index e971584..03495ab 100644
54093--- a/drivers/usb/gadget/function/f_uac1.c
54094+++ b/drivers/usb/gadget/function/f_uac1.c
54095@@ -14,6 +14,7 @@
54096 #include <linux/module.h>
54097 #include <linux/device.h>
54098 #include <linux/atomic.h>
54099+#include <linux/module.h>
54100
54101 #include "u_uac1.h"
54102
54103diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54104index 491082a..dfd7d17 100644
54105--- a/drivers/usb/gadget/function/u_serial.c
54106+++ b/drivers/usb/gadget/function/u_serial.c
54107@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54108 spin_lock_irq(&port->port_lock);
54109
54110 /* already open? Great. */
54111- if (port->port.count) {
54112+ if (atomic_read(&port->port.count)) {
54113 status = 0;
54114- port->port.count++;
54115+ atomic_inc(&port->port.count);
54116
54117 /* currently opening/closing? wait ... */
54118 } else if (port->openclose) {
54119@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54120 tty->driver_data = port;
54121 port->port.tty = tty;
54122
54123- port->port.count = 1;
54124+ atomic_set(&port->port.count, 1);
54125 port->openclose = false;
54126
54127 /* if connected, start the I/O stream */
54128@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54129
54130 spin_lock_irq(&port->port_lock);
54131
54132- if (port->port.count != 1) {
54133- if (port->port.count == 0)
54134+ if (atomic_read(&port->port.count) != 1) {
54135+ if (atomic_read(&port->port.count) == 0)
54136 WARN_ON(1);
54137 else
54138- --port->port.count;
54139+ atomic_dec(&port->port.count);
54140 goto exit;
54141 }
54142
54143@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54144 * and sleep if necessary
54145 */
54146 port->openclose = true;
54147- port->port.count = 0;
54148+ atomic_set(&port->port.count, 0);
54149
54150 gser = port->port_usb;
54151 if (gser && gser->disconnect)
54152@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
54153 int cond;
54154
54155 spin_lock_irq(&port->port_lock);
54156- cond = (port->port.count == 0) && !port->openclose;
54157+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54158 spin_unlock_irq(&port->port_lock);
54159 return cond;
54160 }
54161@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54162 /* if it's already open, start I/O ... and notify the serial
54163 * protocol about open/close status (connect/disconnect).
54164 */
54165- if (port->port.count) {
54166+ if (atomic_read(&port->port.count)) {
54167 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54168 gs_start_io(port);
54169 if (gser->connect)
54170@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
54171
54172 port->port_usb = NULL;
54173 gser->ioport = NULL;
54174- if (port->port.count > 0 || port->openclose) {
54175+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54176 wake_up_interruptible(&port->drain_wait);
54177 if (port->port.tty)
54178 tty_hangup(port->port.tty);
54179@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
54180
54181 /* finally, free any unused/unusable I/O buffers */
54182 spin_lock_irqsave(&port->port_lock, flags);
54183- if (port->port.count == 0 && !port->openclose)
54184+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54185 gs_buf_free(&port->port_write_buf);
54186 gs_free_requests(gser->out, &port->read_pool, NULL);
54187 gs_free_requests(gser->out, &port->read_queue, NULL);
54188diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54189index 53842a1..2bef3b6 100644
54190--- a/drivers/usb/gadget/function/u_uac1.c
54191+++ b/drivers/usb/gadget/function/u_uac1.c
54192@@ -17,6 +17,7 @@
54193 #include <linux/ctype.h>
54194 #include <linux/random.h>
54195 #include <linux/syscalls.h>
54196+#include <linux/module.h>
54197
54198 #include "u_uac1.h"
54199
54200diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54201index 118edb7..7a6415f 100644
54202--- a/drivers/usb/host/ehci-hub.c
54203+++ b/drivers/usb/host/ehci-hub.c
54204@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
54205 urb->transfer_flags = URB_DIR_IN;
54206 usb_get_urb(urb);
54207 atomic_inc(&urb->use_count);
54208- atomic_inc(&urb->dev->urbnum);
54209+ atomic_inc_unchecked(&urb->dev->urbnum);
54210 urb->setup_dma = dma_map_single(
54211 hcd->self.controller,
54212 urb->setup_packet,
54213@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54214 urb->status = -EINPROGRESS;
54215 usb_get_urb(urb);
54216 atomic_inc(&urb->use_count);
54217- atomic_inc(&urb->dev->urbnum);
54218+ atomic_inc_unchecked(&urb->dev->urbnum);
54219 retval = submit_single_step_set_feature(hcd, urb, 0);
54220 if (!retval && !wait_for_completion_timeout(&done,
54221 msecs_to_jiffies(2000))) {
54222diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54223index 1db0626..4948782 100644
54224--- a/drivers/usb/host/hwa-hc.c
54225+++ b/drivers/usb/host/hwa-hc.c
54226@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54227 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54228 struct wahc *wa = &hwahc->wa;
54229 struct device *dev = &wa->usb_iface->dev;
54230- u8 mas_le[UWB_NUM_MAS/8];
54231+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54232+
54233+ if (mas_le == NULL)
54234+ return -ENOMEM;
54235
54236 /* Set the stream index */
54237 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54238@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54239 WUSB_REQ_SET_WUSB_MAS,
54240 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54241 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54242- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54243+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54244 if (result < 0)
54245 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54246 out:
54247+ kfree(mas_le);
54248+
54249 return result;
54250 }
54251
54252diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54253index b3d245e..99549ed 100644
54254--- a/drivers/usb/misc/appledisplay.c
54255+++ b/drivers/usb/misc/appledisplay.c
54256@@ -84,7 +84,7 @@ struct appledisplay {
54257 struct mutex sysfslock; /* concurrent read and write */
54258 };
54259
54260-static atomic_t count_displays = ATOMIC_INIT(0);
54261+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54262 static struct workqueue_struct *wq;
54263
54264 static void appledisplay_complete(struct urb *urb)
54265@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54266
54267 /* Register backlight device */
54268 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54269- atomic_inc_return(&count_displays) - 1);
54270+ atomic_inc_return_unchecked(&count_displays) - 1);
54271 memset(&props, 0, sizeof(struct backlight_properties));
54272 props.type = BACKLIGHT_RAW;
54273 props.max_brightness = 0xff;
54274diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54275index 29fa1c3..a57b08e 100644
54276--- a/drivers/usb/serial/console.c
54277+++ b/drivers/usb/serial/console.c
54278@@ -125,7 +125,7 @@ static int usb_console_setup(struct console *co, char *options)
54279
54280 info->port = port;
54281
54282- ++port->port.count;
54283+ atomic_inc(&port->port.count);
54284 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54285 if (serial->type->set_termios) {
54286 /*
54287@@ -173,7 +173,7 @@ static int usb_console_setup(struct console *co, char *options)
54288 }
54289 /* Now that any required fake tty operations are completed restore
54290 * the tty port count */
54291- --port->port.count;
54292+ atomic_dec(&port->port.count);
54293 /* The console is special in terms of closing the device so
54294 * indicate this port is now acting as a system console. */
54295 port->port.console = 1;
54296@@ -186,7 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
54297 put_tty:
54298 tty_kref_put(tty);
54299 reset_open_count:
54300- port->port.count = 0;
54301+ atomic_set(&port->port.count, 0);
54302 usb_autopm_put_interface(serial->interface);
54303 error_get_interface:
54304 usb_serial_put(serial);
54305@@ -197,7 +197,7 @@ static int usb_console_setup(struct console *co, char *options)
54306 static void usb_console_write(struct console *co,
54307 const char *buf, unsigned count)
54308 {
54309- static struct usbcons_info *info = &usbcons_info;
54310+ struct usbcons_info *info = &usbcons_info;
54311 struct usb_serial_port *port = info->port;
54312 struct usb_serial *serial;
54313 int retval = -ENODEV;
54314diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54315index 307e339..6aa97cb 100644
54316--- a/drivers/usb/storage/usb.h
54317+++ b/drivers/usb/storage/usb.h
54318@@ -63,7 +63,7 @@ struct us_unusual_dev {
54319 __u8 useProtocol;
54320 __u8 useTransport;
54321 int (*initFunction)(struct us_data *);
54322-};
54323+} __do_const;
54324
54325
54326 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54327diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54328index a863a98..d272795 100644
54329--- a/drivers/usb/usbip/vhci.h
54330+++ b/drivers/usb/usbip/vhci.h
54331@@ -83,7 +83,7 @@ struct vhci_hcd {
54332 unsigned resuming:1;
54333 unsigned long re_timeout;
54334
54335- atomic_t seqnum;
54336+ atomic_unchecked_t seqnum;
54337
54338 /*
54339 * NOTE:
54340diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54341index 1ae9d40..c62604b 100644
54342--- a/drivers/usb/usbip/vhci_hcd.c
54343+++ b/drivers/usb/usbip/vhci_hcd.c
54344@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
54345
54346 spin_lock(&vdev->priv_lock);
54347
54348- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54349+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54350 if (priv->seqnum == 0xffff)
54351 dev_info(&urb->dev->dev, "seqnum max\n");
54352
54353@@ -684,7 +684,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54354 return -ENOMEM;
54355 }
54356
54357- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54358+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54359 if (unlink->seqnum == 0xffff)
54360 pr_info("seqnum max\n");
54361
54362@@ -888,7 +888,7 @@ static int vhci_start(struct usb_hcd *hcd)
54363 vdev->rhport = rhport;
54364 }
54365
54366- atomic_set(&vhci->seqnum, 0);
54367+ atomic_set_unchecked(&vhci->seqnum, 0);
54368 spin_lock_init(&vhci->lock);
54369
54370 hcd->power_budget = 0; /* no limit */
54371diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54372index 00e4a54..d676f85 100644
54373--- a/drivers/usb/usbip/vhci_rx.c
54374+++ b/drivers/usb/usbip/vhci_rx.c
54375@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54376 if (!urb) {
54377 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54378 pr_info("max seqnum %d\n",
54379- atomic_read(&the_controller->seqnum));
54380+ atomic_read_unchecked(&the_controller->seqnum));
54381 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54382 return;
54383 }
54384diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54385index edc7267..9f65ce2 100644
54386--- a/drivers/usb/wusbcore/wa-hc.h
54387+++ b/drivers/usb/wusbcore/wa-hc.h
54388@@ -240,7 +240,7 @@ struct wahc {
54389 spinlock_t xfer_list_lock;
54390 struct work_struct xfer_enqueue_work;
54391 struct work_struct xfer_error_work;
54392- atomic_t xfer_id_count;
54393+ atomic_unchecked_t xfer_id_count;
54394
54395 kernel_ulong_t quirks;
54396 };
54397@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54398 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54399 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54400 wa->dto_in_use = 0;
54401- atomic_set(&wa->xfer_id_count, 1);
54402+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54403 /* init the buf in URBs */
54404 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54405 usb_init_urb(&(wa->buf_in_urbs[index]));
54406diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54407index 69af4fd..da390d7 100644
54408--- a/drivers/usb/wusbcore/wa-xfer.c
54409+++ b/drivers/usb/wusbcore/wa-xfer.c
54410@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54411 */
54412 static void wa_xfer_id_init(struct wa_xfer *xfer)
54413 {
54414- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54415+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54416 }
54417
54418 /* Return the xfer's ID. */
54419diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54420index f018d8d..ccab63f 100644
54421--- a/drivers/vfio/vfio.c
54422+++ b/drivers/vfio/vfio.c
54423@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54424 return 0;
54425
54426 /* TODO Prevent device auto probing */
54427- WARN("Device %s added to live group %d!\n", dev_name(dev),
54428+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54429 iommu_group_id(group->iommu_group));
54430
54431 return 0;
54432diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
54433index 9484d56..d415d69 100644
54434--- a/drivers/vhost/net.c
54435+++ b/drivers/vhost/net.c
54436@@ -650,10 +650,8 @@ static void handle_rx(struct vhost_net *net)
54437 break;
54438 }
54439 /* TODO: Should check and handle checksum. */
54440-
54441- hdr.num_buffers = cpu_to_vhost16(vq, headcount);
54442 if (likely(mergeable) &&
54443- memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
54444+ memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
54445 offsetof(typeof(hdr), num_buffers),
54446 sizeof hdr.num_buffers)) {
54447 vq_err(vq, "Failed num_buffers write");
54448diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54449index 3bb02c6..a01ff38 100644
54450--- a/drivers/vhost/vringh.c
54451+++ b/drivers/vhost/vringh.c
54452@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54453 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
54454 {
54455 __virtio16 v = 0;
54456- int rc = get_user(v, (__force __virtio16 __user *)p);
54457+ int rc = get_user(v, (__force_user __virtio16 *)p);
54458 *val = vringh16_to_cpu(vrh, v);
54459 return rc;
54460 }
54461@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
54462 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
54463 {
54464 __virtio16 v = cpu_to_vringh16(vrh, val);
54465- return put_user(v, (__force __virtio16 __user *)p);
54466+ return put_user(v, (__force_user __virtio16 *)p);
54467 }
54468
54469 static inline int copydesc_user(void *dst, const void *src, size_t len)
54470 {
54471- return copy_from_user(dst, (__force void __user *)src, len) ?
54472+ return copy_from_user(dst, (void __force_user *)src, len) ?
54473 -EFAULT : 0;
54474 }
54475
54476@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54477 const struct vring_used_elem *src,
54478 unsigned int num)
54479 {
54480- return copy_to_user((__force void __user *)dst, src,
54481+ return copy_to_user((void __force_user *)dst, src,
54482 sizeof(*dst) * num) ? -EFAULT : 0;
54483 }
54484
54485 static inline int xfer_from_user(void *src, void *dst, size_t len)
54486 {
54487- return copy_from_user(dst, (__force void __user *)src, len) ?
54488+ return copy_from_user(dst, (void __force_user *)src, len) ?
54489 -EFAULT : 0;
54490 }
54491
54492 static inline int xfer_to_user(void *dst, void *src, size_t len)
54493 {
54494- return copy_to_user((__force void __user *)dst, src, len) ?
54495+ return copy_to_user((void __force_user *)dst, src, len) ?
54496 -EFAULT : 0;
54497 }
54498
54499@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
54500 vrh->last_used_idx = 0;
54501 vrh->vring.num = num;
54502 /* vring expects kernel addresses, but only used via accessors. */
54503- vrh->vring.desc = (__force struct vring_desc *)desc;
54504- vrh->vring.avail = (__force struct vring_avail *)avail;
54505- vrh->vring.used = (__force struct vring_used *)used;
54506+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54507+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54508+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54509 return 0;
54510 }
54511 EXPORT_SYMBOL(vringh_init_user);
54512@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
54513
54514 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
54515 {
54516- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
54517+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
54518 return 0;
54519 }
54520
54521diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54522index 84a110a..96312c3 100644
54523--- a/drivers/video/backlight/kb3886_bl.c
54524+++ b/drivers/video/backlight/kb3886_bl.c
54525@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54526 static unsigned long kb3886bl_flags;
54527 #define KB3886BL_SUSPENDED 0x01
54528
54529-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54530+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54531 {
54532 .ident = "Sahara Touch-iT",
54533 .matches = {
54534diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54535index 1b0b233..6f34c2c 100644
54536--- a/drivers/video/fbdev/arcfb.c
54537+++ b/drivers/video/fbdev/arcfb.c
54538@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54539 return -ENOSPC;
54540
54541 err = 0;
54542- if ((count + p) > fbmemlength) {
54543+ if (count > (fbmemlength - p)) {
54544 count = fbmemlength - p;
54545 err = -ENOSPC;
54546 }
54547diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54548index aedf2fb..47c9aca 100644
54549--- a/drivers/video/fbdev/aty/aty128fb.c
54550+++ b/drivers/video/fbdev/aty/aty128fb.c
54551@@ -149,7 +149,7 @@ enum {
54552 };
54553
54554 /* Must match above enum */
54555-static char * const r128_family[] = {
54556+static const char * const r128_family[] = {
54557 "AGP",
54558 "PCI",
54559 "PRO AGP",
54560diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54561index 37ec09b..98f8862 100644
54562--- a/drivers/video/fbdev/aty/atyfb_base.c
54563+++ b/drivers/video/fbdev/aty/atyfb_base.c
54564@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54565 par->accel_flags = var->accel_flags; /* hack */
54566
54567 if (var->accel_flags) {
54568- info->fbops->fb_sync = atyfb_sync;
54569+ pax_open_kernel();
54570+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54571+ pax_close_kernel();
54572 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54573 } else {
54574- info->fbops->fb_sync = NULL;
54575+ pax_open_kernel();
54576+ *(void **)&info->fbops->fb_sync = NULL;
54577+ pax_close_kernel();
54578 info->flags |= FBINFO_HWACCEL_DISABLED;
54579 }
54580
54581diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54582index 2fa0317..4983f2a 100644
54583--- a/drivers/video/fbdev/aty/mach64_cursor.c
54584+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54585@@ -8,6 +8,7 @@
54586 #include "../core/fb_draw.h"
54587
54588 #include <asm/io.h>
54589+#include <asm/pgtable.h>
54590
54591 #ifdef __sparc__
54592 #include <asm/fbio.h>
54593@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
54594 info->sprite.buf_align = 16; /* and 64 lines tall. */
54595 info->sprite.flags = FB_PIXMAP_IO;
54596
54597- info->fbops->fb_cursor = atyfb_cursor;
54598+ pax_open_kernel();
54599+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
54600+ pax_close_kernel();
54601
54602 return 0;
54603 }
54604diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
54605index d6cab1f..112f680 100644
54606--- a/drivers/video/fbdev/core/fb_defio.c
54607+++ b/drivers/video/fbdev/core/fb_defio.c
54608@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
54609
54610 BUG_ON(!fbdefio);
54611 mutex_init(&fbdefio->lock);
54612- info->fbops->fb_mmap = fb_deferred_io_mmap;
54613+ pax_open_kernel();
54614+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
54615+ pax_close_kernel();
54616 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
54617 INIT_LIST_HEAD(&fbdefio->pagelist);
54618 if (fbdefio->delay == 0) /* set a default of 1 s */
54619@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
54620 page->mapping = NULL;
54621 }
54622
54623- info->fbops->fb_mmap = NULL;
54624+ *(void **)&info->fbops->fb_mmap = NULL;
54625 mutex_destroy(&fbdefio->lock);
54626 }
54627 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
54628diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
54629index 0705d88..d9429bf 100644
54630--- a/drivers/video/fbdev/core/fbmem.c
54631+++ b/drivers/video/fbdev/core/fbmem.c
54632@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
54633 __u32 data;
54634 int err;
54635
54636- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
54637+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
54638
54639 data = (__u32) (unsigned long) fix->smem_start;
54640 err |= put_user(data, &fix32->smem_start);
54641diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
54642index 4254336..282567e 100644
54643--- a/drivers/video/fbdev/hyperv_fb.c
54644+++ b/drivers/video/fbdev/hyperv_fb.c
54645@@ -240,7 +240,7 @@ static uint screen_fb_size;
54646 static inline int synthvid_send(struct hv_device *hdev,
54647 struct synthvid_msg *msg)
54648 {
54649- static atomic64_t request_id = ATOMIC64_INIT(0);
54650+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
54651 int ret;
54652
54653 msg->pipe_hdr.type = PIPE_MSG_DATA;
54654@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
54655
54656 ret = vmbus_sendpacket(hdev->channel, msg,
54657 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
54658- atomic64_inc_return(&request_id),
54659+ atomic64_inc_return_unchecked(&request_id),
54660 VM_PKT_DATA_INBAND, 0);
54661
54662 if (ret)
54663diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
54664index 7672d2e..b56437f 100644
54665--- a/drivers/video/fbdev/i810/i810_accel.c
54666+++ b/drivers/video/fbdev/i810/i810_accel.c
54667@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
54668 }
54669 }
54670 printk("ringbuffer lockup!!!\n");
54671+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
54672 i810_report_error(mmio);
54673 par->dev_flags |= LOCKUP;
54674 info->pixmap.scan_align = 1;
54675diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54676index a01147f..5d896f8 100644
54677--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54678+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54679@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
54680
54681 #ifdef CONFIG_FB_MATROX_MYSTIQUE
54682 struct matrox_switch matrox_mystique = {
54683- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
54684+ .preinit = MGA1064_preinit,
54685+ .reset = MGA1064_reset,
54686+ .init = MGA1064_init,
54687+ .restore = MGA1064_restore,
54688 };
54689 EXPORT_SYMBOL(matrox_mystique);
54690 #endif
54691
54692 #ifdef CONFIG_FB_MATROX_G
54693 struct matrox_switch matrox_G100 = {
54694- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
54695+ .preinit = MGAG100_preinit,
54696+ .reset = MGAG100_reset,
54697+ .init = MGAG100_init,
54698+ .restore = MGAG100_restore,
54699 };
54700 EXPORT_SYMBOL(matrox_G100);
54701 #endif
54702diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54703index 195ad7c..09743fc 100644
54704--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54705+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54706@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
54707 }
54708
54709 struct matrox_switch matrox_millennium = {
54710- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
54711+ .preinit = Ti3026_preinit,
54712+ .reset = Ti3026_reset,
54713+ .init = Ti3026_init,
54714+ .restore = Ti3026_restore
54715 };
54716 EXPORT_SYMBOL(matrox_millennium);
54717 #endif
54718diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54719index fe92eed..106e085 100644
54720--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54721+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54722@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
54723 struct mb862xxfb_par *par = info->par;
54724
54725 if (info->var.bits_per_pixel == 32) {
54726- info->fbops->fb_fillrect = cfb_fillrect;
54727- info->fbops->fb_copyarea = cfb_copyarea;
54728- info->fbops->fb_imageblit = cfb_imageblit;
54729+ pax_open_kernel();
54730+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54731+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54732+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54733+ pax_close_kernel();
54734 } else {
54735 outreg(disp, GC_L0EM, 3);
54736- info->fbops->fb_fillrect = mb86290fb_fillrect;
54737- info->fbops->fb_copyarea = mb86290fb_copyarea;
54738- info->fbops->fb_imageblit = mb86290fb_imageblit;
54739+ pax_open_kernel();
54740+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
54741+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
54742+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
54743+ pax_close_kernel();
54744 }
54745 outreg(draw, GDC_REG_DRAW_BASE, 0);
54746 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
54747diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
54748index def0412..fed6529 100644
54749--- a/drivers/video/fbdev/nvidia/nvidia.c
54750+++ b/drivers/video/fbdev/nvidia/nvidia.c
54751@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
54752 info->fix.line_length = (info->var.xres_virtual *
54753 info->var.bits_per_pixel) >> 3;
54754 if (info->var.accel_flags) {
54755- info->fbops->fb_imageblit = nvidiafb_imageblit;
54756- info->fbops->fb_fillrect = nvidiafb_fillrect;
54757- info->fbops->fb_copyarea = nvidiafb_copyarea;
54758- info->fbops->fb_sync = nvidiafb_sync;
54759+ pax_open_kernel();
54760+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
54761+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
54762+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
54763+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
54764+ pax_close_kernel();
54765 info->pixmap.scan_align = 4;
54766 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54767 info->flags |= FBINFO_READS_FAST;
54768 NVResetGraphics(info);
54769 } else {
54770- info->fbops->fb_imageblit = cfb_imageblit;
54771- info->fbops->fb_fillrect = cfb_fillrect;
54772- info->fbops->fb_copyarea = cfb_copyarea;
54773- info->fbops->fb_sync = NULL;
54774+ pax_open_kernel();
54775+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54776+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54777+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54778+ *(void **)&info->fbops->fb_sync = NULL;
54779+ pax_close_kernel();
54780 info->pixmap.scan_align = 1;
54781 info->flags |= FBINFO_HWACCEL_DISABLED;
54782 info->flags &= ~FBINFO_READS_FAST;
54783@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
54784 info->pixmap.size = 8 * 1024;
54785 info->pixmap.flags = FB_PIXMAP_SYSTEM;
54786
54787- if (!hwcur)
54788- info->fbops->fb_cursor = NULL;
54789+ if (!hwcur) {
54790+ pax_open_kernel();
54791+ *(void **)&info->fbops->fb_cursor = NULL;
54792+ pax_close_kernel();
54793+ }
54794
54795 info->var.accel_flags = (!noaccel);
54796
54797diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
54798index 2412a0d..294215b 100644
54799--- a/drivers/video/fbdev/omap2/dss/display.c
54800+++ b/drivers/video/fbdev/omap2/dss/display.c
54801@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
54802 if (dssdev->name == NULL)
54803 dssdev->name = dssdev->alias;
54804
54805+ pax_open_kernel();
54806 if (drv && drv->get_resolution == NULL)
54807- drv->get_resolution = omapdss_default_get_resolution;
54808+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
54809 if (drv && drv->get_recommended_bpp == NULL)
54810- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54811+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54812 if (drv && drv->get_timings == NULL)
54813- drv->get_timings = omapdss_default_get_timings;
54814+ *(void **)&drv->get_timings = omapdss_default_get_timings;
54815+ pax_close_kernel();
54816
54817 mutex_lock(&panel_list_mutex);
54818 list_add_tail(&dssdev->panel_list, &panel_list);
54819diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
54820index 83433cb..71e9b98 100644
54821--- a/drivers/video/fbdev/s1d13xxxfb.c
54822+++ b/drivers/video/fbdev/s1d13xxxfb.c
54823@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
54824
54825 switch(prod_id) {
54826 case S1D13506_PROD_ID: /* activate acceleration */
54827- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54828- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54829+ pax_open_kernel();
54830+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54831+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54832+ pax_close_kernel();
54833 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
54834 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
54835 break;
54836diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54837index d3013cd..95b8285 100644
54838--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
54839+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54840@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
54841 }
54842
54843 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
54844- lcdc_sys_write_index,
54845- lcdc_sys_write_data,
54846- lcdc_sys_read_data,
54847+ .write_index = lcdc_sys_write_index,
54848+ .write_data = lcdc_sys_write_data,
54849+ .read_data = lcdc_sys_read_data,
54850 };
54851
54852 static int sh_mobile_lcdc_sginit(struct fb_info *info,
54853diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
54854index 9279e5f..d5f5276 100644
54855--- a/drivers/video/fbdev/smscufx.c
54856+++ b/drivers/video/fbdev/smscufx.c
54857@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
54858 fb_deferred_io_cleanup(info);
54859 kfree(info->fbdefio);
54860 info->fbdefio = NULL;
54861- info->fbops->fb_mmap = ufx_ops_mmap;
54862+ pax_open_kernel();
54863+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
54864+ pax_close_kernel();
54865 }
54866
54867 pr_debug("released /dev/fb%d user=%d count=%d",
54868diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
54869index ff2b873..626a8d5 100644
54870--- a/drivers/video/fbdev/udlfb.c
54871+++ b/drivers/video/fbdev/udlfb.c
54872@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
54873 dlfb_urb_completion(urb);
54874
54875 error:
54876- atomic_add(bytes_sent, &dev->bytes_sent);
54877- atomic_add(bytes_identical, &dev->bytes_identical);
54878- atomic_add(width*height*2, &dev->bytes_rendered);
54879+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54880+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54881+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
54882 end_cycles = get_cycles();
54883- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54884+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54885 >> 10)), /* Kcycles */
54886 &dev->cpu_kcycles_used);
54887
54888@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
54889 dlfb_urb_completion(urb);
54890
54891 error:
54892- atomic_add(bytes_sent, &dev->bytes_sent);
54893- atomic_add(bytes_identical, &dev->bytes_identical);
54894- atomic_add(bytes_rendered, &dev->bytes_rendered);
54895+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54896+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54897+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
54898 end_cycles = get_cycles();
54899- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54900+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54901 >> 10)), /* Kcycles */
54902 &dev->cpu_kcycles_used);
54903 }
54904@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
54905 fb_deferred_io_cleanup(info);
54906 kfree(info->fbdefio);
54907 info->fbdefio = NULL;
54908- info->fbops->fb_mmap = dlfb_ops_mmap;
54909+ pax_open_kernel();
54910+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
54911+ pax_close_kernel();
54912 }
54913
54914 pr_warn("released /dev/fb%d user=%d count=%d\n",
54915@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
54916 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54917 struct dlfb_data *dev = fb_info->par;
54918 return snprintf(buf, PAGE_SIZE, "%u\n",
54919- atomic_read(&dev->bytes_rendered));
54920+ atomic_read_unchecked(&dev->bytes_rendered));
54921 }
54922
54923 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54924@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54925 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54926 struct dlfb_data *dev = fb_info->par;
54927 return snprintf(buf, PAGE_SIZE, "%u\n",
54928- atomic_read(&dev->bytes_identical));
54929+ atomic_read_unchecked(&dev->bytes_identical));
54930 }
54931
54932 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54933@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54934 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54935 struct dlfb_data *dev = fb_info->par;
54936 return snprintf(buf, PAGE_SIZE, "%u\n",
54937- atomic_read(&dev->bytes_sent));
54938+ atomic_read_unchecked(&dev->bytes_sent));
54939 }
54940
54941 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54942@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54943 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54944 struct dlfb_data *dev = fb_info->par;
54945 return snprintf(buf, PAGE_SIZE, "%u\n",
54946- atomic_read(&dev->cpu_kcycles_used));
54947+ atomic_read_unchecked(&dev->cpu_kcycles_used));
54948 }
54949
54950 static ssize_t edid_show(
54951@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
54952 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54953 struct dlfb_data *dev = fb_info->par;
54954
54955- atomic_set(&dev->bytes_rendered, 0);
54956- atomic_set(&dev->bytes_identical, 0);
54957- atomic_set(&dev->bytes_sent, 0);
54958- atomic_set(&dev->cpu_kcycles_used, 0);
54959+ atomic_set_unchecked(&dev->bytes_rendered, 0);
54960+ atomic_set_unchecked(&dev->bytes_identical, 0);
54961+ atomic_set_unchecked(&dev->bytes_sent, 0);
54962+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
54963
54964 return count;
54965 }
54966diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
54967index d32d1c4..46722e6 100644
54968--- a/drivers/video/fbdev/uvesafb.c
54969+++ b/drivers/video/fbdev/uvesafb.c
54970@@ -19,6 +19,7 @@
54971 #include <linux/io.h>
54972 #include <linux/mutex.h>
54973 #include <linux/slab.h>
54974+#include <linux/moduleloader.h>
54975 #include <video/edid.h>
54976 #include <video/uvesafb.h>
54977 #ifdef CONFIG_X86
54978@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
54979 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
54980 par->pmi_setpal = par->ypan = 0;
54981 } else {
54982+
54983+#ifdef CONFIG_PAX_KERNEXEC
54984+#ifdef CONFIG_MODULES
54985+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
54986+#endif
54987+ if (!par->pmi_code) {
54988+ par->pmi_setpal = par->ypan = 0;
54989+ return 0;
54990+ }
54991+#endif
54992+
54993 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
54994 + task->t.regs.edi);
54995+
54996+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54997+ pax_open_kernel();
54998+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
54999+ pax_close_kernel();
55000+
55001+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55002+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55003+#else
55004 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55005 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55006+#endif
55007+
55008 printk(KERN_INFO "uvesafb: protected mode interface info at "
55009 "%04x:%04x\n",
55010 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55011@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55012 par->ypan = ypan;
55013
55014 if (par->pmi_setpal || par->ypan) {
55015+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55016 if (__supported_pte_mask & _PAGE_NX) {
55017 par->pmi_setpal = par->ypan = 0;
55018 printk(KERN_WARNING "uvesafb: NX protection is active, "
55019 "better not use the PMI.\n");
55020- } else {
55021+ } else
55022+#endif
55023 uvesafb_vbe_getpmi(task, par);
55024- }
55025 }
55026 #else
55027 /* The protected mode interface is not available on non-x86. */
55028@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55029 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55030
55031 /* Disable blanking if the user requested so. */
55032- if (!blank)
55033- info->fbops->fb_blank = NULL;
55034+ if (!blank) {
55035+ pax_open_kernel();
55036+ *(void **)&info->fbops->fb_blank = NULL;
55037+ pax_close_kernel();
55038+ }
55039
55040 /*
55041 * Find out how much IO memory is required for the mode with
55042@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55043 info->flags = FBINFO_FLAG_DEFAULT |
55044 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55045
55046- if (!par->ypan)
55047- info->fbops->fb_pan_display = NULL;
55048+ if (!par->ypan) {
55049+ pax_open_kernel();
55050+ *(void **)&info->fbops->fb_pan_display = NULL;
55051+ pax_close_kernel();
55052+ }
55053 }
55054
55055 static void uvesafb_init_mtrr(struct fb_info *info)
55056@@ -1786,6 +1816,11 @@ out_mode:
55057 out:
55058 kfree(par->vbe_modes);
55059
55060+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55061+ if (par->pmi_code)
55062+ module_memfree_exec(par->pmi_code);
55063+#endif
55064+
55065 framebuffer_release(info);
55066 return err;
55067 }
55068@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
55069 kfree(par->vbe_state_orig);
55070 kfree(par->vbe_state_saved);
55071
55072+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55073+ if (par->pmi_code)
55074+ module_memfree_exec(par->pmi_code);
55075+#endif
55076+
55077 framebuffer_release(info);
55078 }
55079 return 0;
55080diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55081index d79a0ac..2d0c3d4 100644
55082--- a/drivers/video/fbdev/vesafb.c
55083+++ b/drivers/video/fbdev/vesafb.c
55084@@ -9,6 +9,7 @@
55085 */
55086
55087 #include <linux/module.h>
55088+#include <linux/moduleloader.h>
55089 #include <linux/kernel.h>
55090 #include <linux/errno.h>
55091 #include <linux/string.h>
55092@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55093 static int vram_total; /* Set total amount of memory */
55094 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55095 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55096-static void (*pmi_start)(void) __read_mostly;
55097-static void (*pmi_pal) (void) __read_mostly;
55098+static void (*pmi_start)(void) __read_only;
55099+static void (*pmi_pal) (void) __read_only;
55100 static int depth __read_mostly;
55101 static int vga_compat __read_mostly;
55102 /* --------------------------------------------------------------------- */
55103@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55104 unsigned int size_remap;
55105 unsigned int size_total;
55106 char *option = NULL;
55107+ void *pmi_code = NULL;
55108
55109 /* ignore error return of fb_get_options */
55110 fb_get_options("vesafb", &option);
55111@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55112 size_remap = size_total;
55113 vesafb_fix.smem_len = size_remap;
55114
55115-#ifndef __i386__
55116- screen_info.vesapm_seg = 0;
55117-#endif
55118-
55119 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55120 printk(KERN_WARNING
55121 "vesafb: cannot reserve video memory at 0x%lx\n",
55122@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55123 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55124 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55125
55126+#ifdef __i386__
55127+
55128+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55129+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55130+ if (!pmi_code)
55131+#elif !defined(CONFIG_PAX_KERNEXEC)
55132+ if (0)
55133+#endif
55134+
55135+#endif
55136+ screen_info.vesapm_seg = 0;
55137+
55138 if (screen_info.vesapm_seg) {
55139- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55140- screen_info.vesapm_seg,screen_info.vesapm_off);
55141+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55142+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55143 }
55144
55145 if (screen_info.vesapm_seg < 0xc000)
55146@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55147
55148 if (ypan || pmi_setpal) {
55149 unsigned short *pmi_base;
55150+
55151 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55152- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55153- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55154+
55155+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55156+ pax_open_kernel();
55157+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55158+#else
55159+ pmi_code = pmi_base;
55160+#endif
55161+
55162+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55163+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55164+
55165+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55166+ pmi_start = ktva_ktla(pmi_start);
55167+ pmi_pal = ktva_ktla(pmi_pal);
55168+ pax_close_kernel();
55169+#endif
55170+
55171 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55172 if (pmi_base[3]) {
55173 printk(KERN_INFO "vesafb: pmi: ports = ");
55174@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55175 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55176 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55177
55178- if (!ypan)
55179- info->fbops->fb_pan_display = NULL;
55180+ if (!ypan) {
55181+ pax_open_kernel();
55182+ *(void **)&info->fbops->fb_pan_display = NULL;
55183+ pax_close_kernel();
55184+ }
55185
55186 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55187 err = -ENOMEM;
55188@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55189 fb_info(info, "%s frame buffer device\n", info->fix.id);
55190 return 0;
55191 err:
55192+
55193+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55194+ module_memfree_exec(pmi_code);
55195+#endif
55196+
55197 if (info->screen_base)
55198 iounmap(info->screen_base);
55199 framebuffer_release(info);
55200diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55201index 88714ae..16c2e11 100644
55202--- a/drivers/video/fbdev/via/via_clock.h
55203+++ b/drivers/video/fbdev/via/via_clock.h
55204@@ -56,7 +56,7 @@ struct via_clock {
55205
55206 void (*set_engine_pll_state)(u8 state);
55207 void (*set_engine_pll)(struct via_pll_config config);
55208-};
55209+} __no_const;
55210
55211
55212 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55213diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55214index 3c14e43..2630570 100644
55215--- a/drivers/video/logo/logo_linux_clut224.ppm
55216+++ b/drivers/video/logo/logo_linux_clut224.ppm
55217@@ -2,1603 +2,1123 @@ P3
55218 # Standard 224-color Linux logo
55219 80 80
55220 255
55221- 0 0 0 0 0 0 0 0 0 0 0 0
55222- 0 0 0 0 0 0 0 0 0 0 0 0
55223- 0 0 0 0 0 0 0 0 0 0 0 0
55224- 0 0 0 0 0 0 0 0 0 0 0 0
55225- 0 0 0 0 0 0 0 0 0 0 0 0
55226- 0 0 0 0 0 0 0 0 0 0 0 0
55227- 0 0 0 0 0 0 0 0 0 0 0 0
55228- 0 0 0 0 0 0 0 0 0 0 0 0
55229- 0 0 0 0 0 0 0 0 0 0 0 0
55230- 6 6 6 6 6 6 10 10 10 10 10 10
55231- 10 10 10 6 6 6 6 6 6 6 6 6
55232- 0 0 0 0 0 0 0 0 0 0 0 0
55233- 0 0 0 0 0 0 0 0 0 0 0 0
55234- 0 0 0 0 0 0 0 0 0 0 0 0
55235- 0 0 0 0 0 0 0 0 0 0 0 0
55236- 0 0 0 0 0 0 0 0 0 0 0 0
55237- 0 0 0 0 0 0 0 0 0 0 0 0
55238- 0 0 0 0 0 0 0 0 0 0 0 0
55239- 0 0 0 0 0 0 0 0 0 0 0 0
55240- 0 0 0 0 0 0 0 0 0 0 0 0
55241- 0 0 0 0 0 0 0 0 0 0 0 0
55242- 0 0 0 0 0 0 0 0 0 0 0 0
55243- 0 0 0 0 0 0 0 0 0 0 0 0
55244- 0 0 0 0 0 0 0 0 0 0 0 0
55245- 0 0 0 0 0 0 0 0 0 0 0 0
55246- 0 0 0 0 0 0 0 0 0 0 0 0
55247- 0 0 0 0 0 0 0 0 0 0 0 0
55248- 0 0 0 0 0 0 0 0 0 0 0 0
55249- 0 0 0 6 6 6 10 10 10 14 14 14
55250- 22 22 22 26 26 26 30 30 30 34 34 34
55251- 30 30 30 30 30 30 26 26 26 18 18 18
55252- 14 14 14 10 10 10 6 6 6 0 0 0
55253- 0 0 0 0 0 0 0 0 0 0 0 0
55254- 0 0 0 0 0 0 0 0 0 0 0 0
55255- 0 0 0 0 0 0 0 0 0 0 0 0
55256- 0 0 0 0 0 0 0 0 0 0 0 0
55257- 0 0 0 0 0 0 0 0 0 0 0 0
55258- 0 0 0 0 0 0 0 0 0 0 0 0
55259- 0 0 0 0 0 0 0 0 0 0 0 0
55260- 0 0 0 0 0 0 0 0 0 0 0 0
55261- 0 0 0 0 0 0 0 0 0 0 0 0
55262- 0 0 0 0 0 1 0 0 1 0 0 0
55263- 0 0 0 0 0 0 0 0 0 0 0 0
55264- 0 0 0 0 0 0 0 0 0 0 0 0
55265- 0 0 0 0 0 0 0 0 0 0 0 0
55266- 0 0 0 0 0 0 0 0 0 0 0 0
55267- 0 0 0 0 0 0 0 0 0 0 0 0
55268- 0 0 0 0 0 0 0 0 0 0 0 0
55269- 6 6 6 14 14 14 26 26 26 42 42 42
55270- 54 54 54 66 66 66 78 78 78 78 78 78
55271- 78 78 78 74 74 74 66 66 66 54 54 54
55272- 42 42 42 26 26 26 18 18 18 10 10 10
55273- 6 6 6 0 0 0 0 0 0 0 0 0
55274- 0 0 0 0 0 0 0 0 0 0 0 0
55275- 0 0 0 0 0 0 0 0 0 0 0 0
55276- 0 0 0 0 0 0 0 0 0 0 0 0
55277- 0 0 0 0 0 0 0 0 0 0 0 0
55278- 0 0 0 0 0 0 0 0 0 0 0 0
55279- 0 0 0 0 0 0 0 0 0 0 0 0
55280- 0 0 0 0 0 0 0 0 0 0 0 0
55281- 0 0 0 0 0 0 0 0 0 0 0 0
55282- 0 0 1 0 0 0 0 0 0 0 0 0
55283- 0 0 0 0 0 0 0 0 0 0 0 0
55284- 0 0 0 0 0 0 0 0 0 0 0 0
55285- 0 0 0 0 0 0 0 0 0 0 0 0
55286- 0 0 0 0 0 0 0 0 0 0 0 0
55287- 0 0 0 0 0 0 0 0 0 0 0 0
55288- 0 0 0 0 0 0 0 0 0 10 10 10
55289- 22 22 22 42 42 42 66 66 66 86 86 86
55290- 66 66 66 38 38 38 38 38 38 22 22 22
55291- 26 26 26 34 34 34 54 54 54 66 66 66
55292- 86 86 86 70 70 70 46 46 46 26 26 26
55293- 14 14 14 6 6 6 0 0 0 0 0 0
55294- 0 0 0 0 0 0 0 0 0 0 0 0
55295- 0 0 0 0 0 0 0 0 0 0 0 0
55296- 0 0 0 0 0 0 0 0 0 0 0 0
55297- 0 0 0 0 0 0 0 0 0 0 0 0
55298- 0 0 0 0 0 0 0 0 0 0 0 0
55299- 0 0 0 0 0 0 0 0 0 0 0 0
55300- 0 0 0 0 0 0 0 0 0 0 0 0
55301- 0 0 0 0 0 0 0 0 0 0 0 0
55302- 0 0 1 0 0 1 0 0 1 0 0 0
55303- 0 0 0 0 0 0 0 0 0 0 0 0
55304- 0 0 0 0 0 0 0 0 0 0 0 0
55305- 0 0 0 0 0 0 0 0 0 0 0 0
55306- 0 0 0 0 0 0 0 0 0 0 0 0
55307- 0 0 0 0 0 0 0 0 0 0 0 0
55308- 0 0 0 0 0 0 10 10 10 26 26 26
55309- 50 50 50 82 82 82 58 58 58 6 6 6
55310- 2 2 6 2 2 6 2 2 6 2 2 6
55311- 2 2 6 2 2 6 2 2 6 2 2 6
55312- 6 6 6 54 54 54 86 86 86 66 66 66
55313- 38 38 38 18 18 18 6 6 6 0 0 0
55314- 0 0 0 0 0 0 0 0 0 0 0 0
55315- 0 0 0 0 0 0 0 0 0 0 0 0
55316- 0 0 0 0 0 0 0 0 0 0 0 0
55317- 0 0 0 0 0 0 0 0 0 0 0 0
55318- 0 0 0 0 0 0 0 0 0 0 0 0
55319- 0 0 0 0 0 0 0 0 0 0 0 0
55320- 0 0 0 0 0 0 0 0 0 0 0 0
55321- 0 0 0 0 0 0 0 0 0 0 0 0
55322- 0 0 0 0 0 0 0 0 0 0 0 0
55323- 0 0 0 0 0 0 0 0 0 0 0 0
55324- 0 0 0 0 0 0 0 0 0 0 0 0
55325- 0 0 0 0 0 0 0 0 0 0 0 0
55326- 0 0 0 0 0 0 0 0 0 0 0 0
55327- 0 0 0 0 0 0 0 0 0 0 0 0
55328- 0 0 0 6 6 6 22 22 22 50 50 50
55329- 78 78 78 34 34 34 2 2 6 2 2 6
55330- 2 2 6 2 2 6 2 2 6 2 2 6
55331- 2 2 6 2 2 6 2 2 6 2 2 6
55332- 2 2 6 2 2 6 6 6 6 70 70 70
55333- 78 78 78 46 46 46 22 22 22 6 6 6
55334- 0 0 0 0 0 0 0 0 0 0 0 0
55335- 0 0 0 0 0 0 0 0 0 0 0 0
55336- 0 0 0 0 0 0 0 0 0 0 0 0
55337- 0 0 0 0 0 0 0 0 0 0 0 0
55338- 0 0 0 0 0 0 0 0 0 0 0 0
55339- 0 0 0 0 0 0 0 0 0 0 0 0
55340- 0 0 0 0 0 0 0 0 0 0 0 0
55341- 0 0 0 0 0 0 0 0 0 0 0 0
55342- 0 0 1 0 0 1 0 0 1 0 0 0
55343- 0 0 0 0 0 0 0 0 0 0 0 0
55344- 0 0 0 0 0 0 0 0 0 0 0 0
55345- 0 0 0 0 0 0 0 0 0 0 0 0
55346- 0 0 0 0 0 0 0 0 0 0 0 0
55347- 0 0 0 0 0 0 0 0 0 0 0 0
55348- 6 6 6 18 18 18 42 42 42 82 82 82
55349- 26 26 26 2 2 6 2 2 6 2 2 6
55350- 2 2 6 2 2 6 2 2 6 2 2 6
55351- 2 2 6 2 2 6 2 2 6 14 14 14
55352- 46 46 46 34 34 34 6 6 6 2 2 6
55353- 42 42 42 78 78 78 42 42 42 18 18 18
55354- 6 6 6 0 0 0 0 0 0 0 0 0
55355- 0 0 0 0 0 0 0 0 0 0 0 0
55356- 0 0 0 0 0 0 0 0 0 0 0 0
55357- 0 0 0 0 0 0 0 0 0 0 0 0
55358- 0 0 0 0 0 0 0 0 0 0 0 0
55359- 0 0 0 0 0 0 0 0 0 0 0 0
55360- 0 0 0 0 0 0 0 0 0 0 0 0
55361- 0 0 0 0 0 0 0 0 0 0 0 0
55362- 0 0 1 0 0 0 0 0 1 0 0 0
55363- 0 0 0 0 0 0 0 0 0 0 0 0
55364- 0 0 0 0 0 0 0 0 0 0 0 0
55365- 0 0 0 0 0 0 0 0 0 0 0 0
55366- 0 0 0 0 0 0 0 0 0 0 0 0
55367- 0 0 0 0 0 0 0 0 0 0 0 0
55368- 10 10 10 30 30 30 66 66 66 58 58 58
55369- 2 2 6 2 2 6 2 2 6 2 2 6
55370- 2 2 6 2 2 6 2 2 6 2 2 6
55371- 2 2 6 2 2 6 2 2 6 26 26 26
55372- 86 86 86 101 101 101 46 46 46 10 10 10
55373- 2 2 6 58 58 58 70 70 70 34 34 34
55374- 10 10 10 0 0 0 0 0 0 0 0 0
55375- 0 0 0 0 0 0 0 0 0 0 0 0
55376- 0 0 0 0 0 0 0 0 0 0 0 0
55377- 0 0 0 0 0 0 0 0 0 0 0 0
55378- 0 0 0 0 0 0 0 0 0 0 0 0
55379- 0 0 0 0 0 0 0 0 0 0 0 0
55380- 0 0 0 0 0 0 0 0 0 0 0 0
55381- 0 0 0 0 0 0 0 0 0 0 0 0
55382- 0 0 1 0 0 1 0 0 1 0 0 0
55383- 0 0 0 0 0 0 0 0 0 0 0 0
55384- 0 0 0 0 0 0 0 0 0 0 0 0
55385- 0 0 0 0 0 0 0 0 0 0 0 0
55386- 0 0 0 0 0 0 0 0 0 0 0 0
55387- 0 0 0 0 0 0 0 0 0 0 0 0
55388- 14 14 14 42 42 42 86 86 86 10 10 10
55389- 2 2 6 2 2 6 2 2 6 2 2 6
55390- 2 2 6 2 2 6 2 2 6 2 2 6
55391- 2 2 6 2 2 6 2 2 6 30 30 30
55392- 94 94 94 94 94 94 58 58 58 26 26 26
55393- 2 2 6 6 6 6 78 78 78 54 54 54
55394- 22 22 22 6 6 6 0 0 0 0 0 0
55395- 0 0 0 0 0 0 0 0 0 0 0 0
55396- 0 0 0 0 0 0 0 0 0 0 0 0
55397- 0 0 0 0 0 0 0 0 0 0 0 0
55398- 0 0 0 0 0 0 0 0 0 0 0 0
55399- 0 0 0 0 0 0 0 0 0 0 0 0
55400- 0 0 0 0 0 0 0 0 0 0 0 0
55401- 0 0 0 0 0 0 0 0 0 0 0 0
55402- 0 0 0 0 0 0 0 0 0 0 0 0
55403- 0 0 0 0 0 0 0 0 0 0 0 0
55404- 0 0 0 0 0 0 0 0 0 0 0 0
55405- 0 0 0 0 0 0 0 0 0 0 0 0
55406- 0 0 0 0 0 0 0 0 0 0 0 0
55407- 0 0 0 0 0 0 0 0 0 6 6 6
55408- 22 22 22 62 62 62 62 62 62 2 2 6
55409- 2 2 6 2 2 6 2 2 6 2 2 6
55410- 2 2 6 2 2 6 2 2 6 2 2 6
55411- 2 2 6 2 2 6 2 2 6 26 26 26
55412- 54 54 54 38 38 38 18 18 18 10 10 10
55413- 2 2 6 2 2 6 34 34 34 82 82 82
55414- 38 38 38 14 14 14 0 0 0 0 0 0
55415- 0 0 0 0 0 0 0 0 0 0 0 0
55416- 0 0 0 0 0 0 0 0 0 0 0 0
55417- 0 0 0 0 0 0 0 0 0 0 0 0
55418- 0 0 0 0 0 0 0 0 0 0 0 0
55419- 0 0 0 0 0 0 0 0 0 0 0 0
55420- 0 0 0 0 0 0 0 0 0 0 0 0
55421- 0 0 0 0 0 0 0 0 0 0 0 0
55422- 0 0 0 0 0 1 0 0 1 0 0 0
55423- 0 0 0 0 0 0 0 0 0 0 0 0
55424- 0 0 0 0 0 0 0 0 0 0 0 0
55425- 0 0 0 0 0 0 0 0 0 0 0 0
55426- 0 0 0 0 0 0 0 0 0 0 0 0
55427- 0 0 0 0 0 0 0 0 0 6 6 6
55428- 30 30 30 78 78 78 30 30 30 2 2 6
55429- 2 2 6 2 2 6 2 2 6 2 2 6
55430- 2 2 6 2 2 6 2 2 6 2 2 6
55431- 2 2 6 2 2 6 2 2 6 10 10 10
55432- 10 10 10 2 2 6 2 2 6 2 2 6
55433- 2 2 6 2 2 6 2 2 6 78 78 78
55434- 50 50 50 18 18 18 6 6 6 0 0 0
55435- 0 0 0 0 0 0 0 0 0 0 0 0
55436- 0 0 0 0 0 0 0 0 0 0 0 0
55437- 0 0 0 0 0 0 0 0 0 0 0 0
55438- 0 0 0 0 0 0 0 0 0 0 0 0
55439- 0 0 0 0 0 0 0 0 0 0 0 0
55440- 0 0 0 0 0 0 0 0 0 0 0 0
55441- 0 0 0 0 0 0 0 0 0 0 0 0
55442- 0 0 1 0 0 0 0 0 0 0 0 0
55443- 0 0 0 0 0 0 0 0 0 0 0 0
55444- 0 0 0 0 0 0 0 0 0 0 0 0
55445- 0 0 0 0 0 0 0 0 0 0 0 0
55446- 0 0 0 0 0 0 0 0 0 0 0 0
55447- 0 0 0 0 0 0 0 0 0 10 10 10
55448- 38 38 38 86 86 86 14 14 14 2 2 6
55449- 2 2 6 2 2 6 2 2 6 2 2 6
55450- 2 2 6 2 2 6 2 2 6 2 2 6
55451- 2 2 6 2 2 6 2 2 6 2 2 6
55452- 2 2 6 2 2 6 2 2 6 2 2 6
55453- 2 2 6 2 2 6 2 2 6 54 54 54
55454- 66 66 66 26 26 26 6 6 6 0 0 0
55455- 0 0 0 0 0 0 0 0 0 0 0 0
55456- 0 0 0 0 0 0 0 0 0 0 0 0
55457- 0 0 0 0 0 0 0 0 0 0 0 0
55458- 0 0 0 0 0 0 0 0 0 0 0 0
55459- 0 0 0 0 0 0 0 0 0 0 0 0
55460- 0 0 0 0 0 0 0 0 0 0 0 0
55461- 0 0 0 0 0 0 0 0 0 0 0 0
55462- 0 0 0 0 0 1 0 0 1 0 0 0
55463- 0 0 0 0 0 0 0 0 0 0 0 0
55464- 0 0 0 0 0 0 0 0 0 0 0 0
55465- 0 0 0 0 0 0 0 0 0 0 0 0
55466- 0 0 0 0 0 0 0 0 0 0 0 0
55467- 0 0 0 0 0 0 0 0 0 14 14 14
55468- 42 42 42 82 82 82 2 2 6 2 2 6
55469- 2 2 6 6 6 6 10 10 10 2 2 6
55470- 2 2 6 2 2 6 2 2 6 2 2 6
55471- 2 2 6 2 2 6 2 2 6 6 6 6
55472- 14 14 14 10 10 10 2 2 6 2 2 6
55473- 2 2 6 2 2 6 2 2 6 18 18 18
55474- 82 82 82 34 34 34 10 10 10 0 0 0
55475- 0 0 0 0 0 0 0 0 0 0 0 0
55476- 0 0 0 0 0 0 0 0 0 0 0 0
55477- 0 0 0 0 0 0 0 0 0 0 0 0
55478- 0 0 0 0 0 0 0 0 0 0 0 0
55479- 0 0 0 0 0 0 0 0 0 0 0 0
55480- 0 0 0 0 0 0 0 0 0 0 0 0
55481- 0 0 0 0 0 0 0 0 0 0 0 0
55482- 0 0 1 0 0 0 0 0 0 0 0 0
55483- 0 0 0 0 0 0 0 0 0 0 0 0
55484- 0 0 0 0 0 0 0 0 0 0 0 0
55485- 0 0 0 0 0 0 0 0 0 0 0 0
55486- 0 0 0 0 0 0 0 0 0 0 0 0
55487- 0 0 0 0 0 0 0 0 0 14 14 14
55488- 46 46 46 86 86 86 2 2 6 2 2 6
55489- 6 6 6 6 6 6 22 22 22 34 34 34
55490- 6 6 6 2 2 6 2 2 6 2 2 6
55491- 2 2 6 2 2 6 18 18 18 34 34 34
55492- 10 10 10 50 50 50 22 22 22 2 2 6
55493- 2 2 6 2 2 6 2 2 6 10 10 10
55494- 86 86 86 42 42 42 14 14 14 0 0 0
55495- 0 0 0 0 0 0 0 0 0 0 0 0
55496- 0 0 0 0 0 0 0 0 0 0 0 0
55497- 0 0 0 0 0 0 0 0 0 0 0 0
55498- 0 0 0 0 0 0 0 0 0 0 0 0
55499- 0 0 0 0 0 0 0 0 0 0 0 0
55500- 0 0 0 0 0 0 0 0 0 0 0 0
55501- 0 0 0 0 0 0 0 0 0 0 0 0
55502- 0 0 1 0 0 1 0 0 1 0 0 0
55503- 0 0 0 0 0 0 0 0 0 0 0 0
55504- 0 0 0 0 0 0 0 0 0 0 0 0
55505- 0 0 0 0 0 0 0 0 0 0 0 0
55506- 0 0 0 0 0 0 0 0 0 0 0 0
55507- 0 0 0 0 0 0 0 0 0 14 14 14
55508- 46 46 46 86 86 86 2 2 6 2 2 6
55509- 38 38 38 116 116 116 94 94 94 22 22 22
55510- 22 22 22 2 2 6 2 2 6 2 2 6
55511- 14 14 14 86 86 86 138 138 138 162 162 162
55512-154 154 154 38 38 38 26 26 26 6 6 6
55513- 2 2 6 2 2 6 2 2 6 2 2 6
55514- 86 86 86 46 46 46 14 14 14 0 0 0
55515- 0 0 0 0 0 0 0 0 0 0 0 0
55516- 0 0 0 0 0 0 0 0 0 0 0 0
55517- 0 0 0 0 0 0 0 0 0 0 0 0
55518- 0 0 0 0 0 0 0 0 0 0 0 0
55519- 0 0 0 0 0 0 0 0 0 0 0 0
55520- 0 0 0 0 0 0 0 0 0 0 0 0
55521- 0 0 0 0 0 0 0 0 0 0 0 0
55522- 0 0 0 0 0 0 0 0 0 0 0 0
55523- 0 0 0 0 0 0 0 0 0 0 0 0
55524- 0 0 0 0 0 0 0 0 0 0 0 0
55525- 0 0 0 0 0 0 0 0 0 0 0 0
55526- 0 0 0 0 0 0 0 0 0 0 0 0
55527- 0 0 0 0 0 0 0 0 0 14 14 14
55528- 46 46 46 86 86 86 2 2 6 14 14 14
55529-134 134 134 198 198 198 195 195 195 116 116 116
55530- 10 10 10 2 2 6 2 2 6 6 6 6
55531-101 98 89 187 187 187 210 210 210 218 218 218
55532-214 214 214 134 134 134 14 14 14 6 6 6
55533- 2 2 6 2 2 6 2 2 6 2 2 6
55534- 86 86 86 50 50 50 18 18 18 6 6 6
55535- 0 0 0 0 0 0 0 0 0 0 0 0
55536- 0 0 0 0 0 0 0 0 0 0 0 0
55537- 0 0 0 0 0 0 0 0 0 0 0 0
55538- 0 0 0 0 0 0 0 0 0 0 0 0
55539- 0 0 0 0 0 0 0 0 0 0 0 0
55540- 0 0 0 0 0 0 0 0 0 0 0 0
55541- 0 0 0 0 0 0 0 0 1 0 0 0
55542- 0 0 1 0 0 1 0 0 1 0 0 0
55543- 0 0 0 0 0 0 0 0 0 0 0 0
55544- 0 0 0 0 0 0 0 0 0 0 0 0
55545- 0 0 0 0 0 0 0 0 0 0 0 0
55546- 0 0 0 0 0 0 0 0 0 0 0 0
55547- 0 0 0 0 0 0 0 0 0 14 14 14
55548- 46 46 46 86 86 86 2 2 6 54 54 54
55549-218 218 218 195 195 195 226 226 226 246 246 246
55550- 58 58 58 2 2 6 2 2 6 30 30 30
55551-210 210 210 253 253 253 174 174 174 123 123 123
55552-221 221 221 234 234 234 74 74 74 2 2 6
55553- 2 2 6 2 2 6 2 2 6 2 2 6
55554- 70 70 70 58 58 58 22 22 22 6 6 6
55555- 0 0 0 0 0 0 0 0 0 0 0 0
55556- 0 0 0 0 0 0 0 0 0 0 0 0
55557- 0 0 0 0 0 0 0 0 0 0 0 0
55558- 0 0 0 0 0 0 0 0 0 0 0 0
55559- 0 0 0 0 0 0 0 0 0 0 0 0
55560- 0 0 0 0 0 0 0 0 0 0 0 0
55561- 0 0 0 0 0 0 0 0 0 0 0 0
55562- 0 0 0 0 0 0 0 0 0 0 0 0
55563- 0 0 0 0 0 0 0 0 0 0 0 0
55564- 0 0 0 0 0 0 0 0 0 0 0 0
55565- 0 0 0 0 0 0 0 0 0 0 0 0
55566- 0 0 0 0 0 0 0 0 0 0 0 0
55567- 0 0 0 0 0 0 0 0 0 14 14 14
55568- 46 46 46 82 82 82 2 2 6 106 106 106
55569-170 170 170 26 26 26 86 86 86 226 226 226
55570-123 123 123 10 10 10 14 14 14 46 46 46
55571-231 231 231 190 190 190 6 6 6 70 70 70
55572- 90 90 90 238 238 238 158 158 158 2 2 6
55573- 2 2 6 2 2 6 2 2 6 2 2 6
55574- 70 70 70 58 58 58 22 22 22 6 6 6
55575- 0 0 0 0 0 0 0 0 0 0 0 0
55576- 0 0 0 0 0 0 0 0 0 0 0 0
55577- 0 0 0 0 0 0 0 0 0 0 0 0
55578- 0 0 0 0 0 0 0 0 0 0 0 0
55579- 0 0 0 0 0 0 0 0 0 0 0 0
55580- 0 0 0 0 0 0 0 0 0 0 0 0
55581- 0 0 0 0 0 0 0 0 1 0 0 0
55582- 0 0 1 0 0 1 0 0 1 0 0 0
55583- 0 0 0 0 0 0 0 0 0 0 0 0
55584- 0 0 0 0 0 0 0 0 0 0 0 0
55585- 0 0 0 0 0 0 0 0 0 0 0 0
55586- 0 0 0 0 0 0 0 0 0 0 0 0
55587- 0 0 0 0 0 0 0 0 0 14 14 14
55588- 42 42 42 86 86 86 6 6 6 116 116 116
55589-106 106 106 6 6 6 70 70 70 149 149 149
55590-128 128 128 18 18 18 38 38 38 54 54 54
55591-221 221 221 106 106 106 2 2 6 14 14 14
55592- 46 46 46 190 190 190 198 198 198 2 2 6
55593- 2 2 6 2 2 6 2 2 6 2 2 6
55594- 74 74 74 62 62 62 22 22 22 6 6 6
55595- 0 0 0 0 0 0 0 0 0 0 0 0
55596- 0 0 0 0 0 0 0 0 0 0 0 0
55597- 0 0 0 0 0 0 0 0 0 0 0 0
55598- 0 0 0 0 0 0 0 0 0 0 0 0
55599- 0 0 0 0 0 0 0 0 0 0 0 0
55600- 0 0 0 0 0 0 0 0 0 0 0 0
55601- 0 0 0 0 0 0 0 0 1 0 0 0
55602- 0 0 1 0 0 0 0 0 1 0 0 0
55603- 0 0 0 0 0 0 0 0 0 0 0 0
55604- 0 0 0 0 0 0 0 0 0 0 0 0
55605- 0 0 0 0 0 0 0 0 0 0 0 0
55606- 0 0 0 0 0 0 0 0 0 0 0 0
55607- 0 0 0 0 0 0 0 0 0 14 14 14
55608- 42 42 42 94 94 94 14 14 14 101 101 101
55609-128 128 128 2 2 6 18 18 18 116 116 116
55610-118 98 46 121 92 8 121 92 8 98 78 10
55611-162 162 162 106 106 106 2 2 6 2 2 6
55612- 2 2 6 195 195 195 195 195 195 6 6 6
55613- 2 2 6 2 2 6 2 2 6 2 2 6
55614- 74 74 74 62 62 62 22 22 22 6 6 6
55615- 0 0 0 0 0 0 0 0 0 0 0 0
55616- 0 0 0 0 0 0 0 0 0 0 0 0
55617- 0 0 0 0 0 0 0 0 0 0 0 0
55618- 0 0 0 0 0 0 0 0 0 0 0 0
55619- 0 0 0 0 0 0 0 0 0 0 0 0
55620- 0 0 0 0 0 0 0 0 0 0 0 0
55621- 0 0 0 0 0 0 0 0 1 0 0 1
55622- 0 0 1 0 0 0 0 0 1 0 0 0
55623- 0 0 0 0 0 0 0 0 0 0 0 0
55624- 0 0 0 0 0 0 0 0 0 0 0 0
55625- 0 0 0 0 0 0 0 0 0 0 0 0
55626- 0 0 0 0 0 0 0 0 0 0 0 0
55627- 0 0 0 0 0 0 0 0 0 10 10 10
55628- 38 38 38 90 90 90 14 14 14 58 58 58
55629-210 210 210 26 26 26 54 38 6 154 114 10
55630-226 170 11 236 186 11 225 175 15 184 144 12
55631-215 174 15 175 146 61 37 26 9 2 2 6
55632- 70 70 70 246 246 246 138 138 138 2 2 6
55633- 2 2 6 2 2 6 2 2 6 2 2 6
55634- 70 70 70 66 66 66 26 26 26 6 6 6
55635- 0 0 0 0 0 0 0 0 0 0 0 0
55636- 0 0 0 0 0 0 0 0 0 0 0 0
55637- 0 0 0 0 0 0 0 0 0 0 0 0
55638- 0 0 0 0 0 0 0 0 0 0 0 0
55639- 0 0 0 0 0 0 0 0 0 0 0 0
55640- 0 0 0 0 0 0 0 0 0 0 0 0
55641- 0 0 0 0 0 0 0 0 0 0 0 0
55642- 0 0 0 0 0 0 0 0 0 0 0 0
55643- 0 0 0 0 0 0 0 0 0 0 0 0
55644- 0 0 0 0 0 0 0 0 0 0 0 0
55645- 0 0 0 0 0 0 0 0 0 0 0 0
55646- 0 0 0 0 0 0 0 0 0 0 0 0
55647- 0 0 0 0 0 0 0 0 0 10 10 10
55648- 38 38 38 86 86 86 14 14 14 10 10 10
55649-195 195 195 188 164 115 192 133 9 225 175 15
55650-239 182 13 234 190 10 232 195 16 232 200 30
55651-245 207 45 241 208 19 232 195 16 184 144 12
55652-218 194 134 211 206 186 42 42 42 2 2 6
55653- 2 2 6 2 2 6 2 2 6 2 2 6
55654- 50 50 50 74 74 74 30 30 30 6 6 6
55655- 0 0 0 0 0 0 0 0 0 0 0 0
55656- 0 0 0 0 0 0 0 0 0 0 0 0
55657- 0 0 0 0 0 0 0 0 0 0 0 0
55658- 0 0 0 0 0 0 0 0 0 0 0 0
55659- 0 0 0 0 0 0 0 0 0 0 0 0
55660- 0 0 0 0 0 0 0 0 0 0 0 0
55661- 0 0 0 0 0 0 0 0 0 0 0 0
55662- 0 0 0 0 0 0 0 0 0 0 0 0
55663- 0 0 0 0 0 0 0 0 0 0 0 0
55664- 0 0 0 0 0 0 0 0 0 0 0 0
55665- 0 0 0 0 0 0 0 0 0 0 0 0
55666- 0 0 0 0 0 0 0 0 0 0 0 0
55667- 0 0 0 0 0 0 0 0 0 10 10 10
55668- 34 34 34 86 86 86 14 14 14 2 2 6
55669-121 87 25 192 133 9 219 162 10 239 182 13
55670-236 186 11 232 195 16 241 208 19 244 214 54
55671-246 218 60 246 218 38 246 215 20 241 208 19
55672-241 208 19 226 184 13 121 87 25 2 2 6
55673- 2 2 6 2 2 6 2 2 6 2 2 6
55674- 50 50 50 82 82 82 34 34 34 10 10 10
55675- 0 0 0 0 0 0 0 0 0 0 0 0
55676- 0 0 0 0 0 0 0 0 0 0 0 0
55677- 0 0 0 0 0 0 0 0 0 0 0 0
55678- 0 0 0 0 0 0 0 0 0 0 0 0
55679- 0 0 0 0 0 0 0 0 0 0 0 0
55680- 0 0 0 0 0 0 0 0 0 0 0 0
55681- 0 0 0 0 0 0 0 0 0 0 0 0
55682- 0 0 0 0 0 0 0 0 0 0 0 0
55683- 0 0 0 0 0 0 0 0 0 0 0 0
55684- 0 0 0 0 0 0 0 0 0 0 0 0
55685- 0 0 0 0 0 0 0 0 0 0 0 0
55686- 0 0 0 0 0 0 0 0 0 0 0 0
55687- 0 0 0 0 0 0 0 0 0 10 10 10
55688- 34 34 34 82 82 82 30 30 30 61 42 6
55689-180 123 7 206 145 10 230 174 11 239 182 13
55690-234 190 10 238 202 15 241 208 19 246 218 74
55691-246 218 38 246 215 20 246 215 20 246 215 20
55692-226 184 13 215 174 15 184 144 12 6 6 6
55693- 2 2 6 2 2 6 2 2 6 2 2 6
55694- 26 26 26 94 94 94 42 42 42 14 14 14
55695- 0 0 0 0 0 0 0 0 0 0 0 0
55696- 0 0 0 0 0 0 0 0 0 0 0 0
55697- 0 0 0 0 0 0 0 0 0 0 0 0
55698- 0 0 0 0 0 0 0 0 0 0 0 0
55699- 0 0 0 0 0 0 0 0 0 0 0 0
55700- 0 0 0 0 0 0 0 0 0 0 0 0
55701- 0 0 0 0 0 0 0 0 0 0 0 0
55702- 0 0 0 0 0 0 0 0 0 0 0 0
55703- 0 0 0 0 0 0 0 0 0 0 0 0
55704- 0 0 0 0 0 0 0 0 0 0 0 0
55705- 0 0 0 0 0 0 0 0 0 0 0 0
55706- 0 0 0 0 0 0 0 0 0 0 0 0
55707- 0 0 0 0 0 0 0 0 0 10 10 10
55708- 30 30 30 78 78 78 50 50 50 104 69 6
55709-192 133 9 216 158 10 236 178 12 236 186 11
55710-232 195 16 241 208 19 244 214 54 245 215 43
55711-246 215 20 246 215 20 241 208 19 198 155 10
55712-200 144 11 216 158 10 156 118 10 2 2 6
55713- 2 2 6 2 2 6 2 2 6 2 2 6
55714- 6 6 6 90 90 90 54 54 54 18 18 18
55715- 6 6 6 0 0 0 0 0 0 0 0 0
55716- 0 0 0 0 0 0 0 0 0 0 0 0
55717- 0 0 0 0 0 0 0 0 0 0 0 0
55718- 0 0 0 0 0 0 0 0 0 0 0 0
55719- 0 0 0 0 0 0 0 0 0 0 0 0
55720- 0 0 0 0 0 0 0 0 0 0 0 0
55721- 0 0 0 0 0 0 0 0 0 0 0 0
55722- 0 0 0 0 0 0 0 0 0 0 0 0
55723- 0 0 0 0 0 0 0 0 0 0 0 0
55724- 0 0 0 0 0 0 0 0 0 0 0 0
55725- 0 0 0 0 0 0 0 0 0 0 0 0
55726- 0 0 0 0 0 0 0 0 0 0 0 0
55727- 0 0 0 0 0 0 0 0 0 10 10 10
55728- 30 30 30 78 78 78 46 46 46 22 22 22
55729-137 92 6 210 162 10 239 182 13 238 190 10
55730-238 202 15 241 208 19 246 215 20 246 215 20
55731-241 208 19 203 166 17 185 133 11 210 150 10
55732-216 158 10 210 150 10 102 78 10 2 2 6
55733- 6 6 6 54 54 54 14 14 14 2 2 6
55734- 2 2 6 62 62 62 74 74 74 30 30 30
55735- 10 10 10 0 0 0 0 0 0 0 0 0
55736- 0 0 0 0 0 0 0 0 0 0 0 0
55737- 0 0 0 0 0 0 0 0 0 0 0 0
55738- 0 0 0 0 0 0 0 0 0 0 0 0
55739- 0 0 0 0 0 0 0 0 0 0 0 0
55740- 0 0 0 0 0 0 0 0 0 0 0 0
55741- 0 0 0 0 0 0 0 0 0 0 0 0
55742- 0 0 0 0 0 0 0 0 0 0 0 0
55743- 0 0 0 0 0 0 0 0 0 0 0 0
55744- 0 0 0 0 0 0 0 0 0 0 0 0
55745- 0 0 0 0 0 0 0 0 0 0 0 0
55746- 0 0 0 0 0 0 0 0 0 0 0 0
55747- 0 0 0 0 0 0 0 0 0 10 10 10
55748- 34 34 34 78 78 78 50 50 50 6 6 6
55749- 94 70 30 139 102 15 190 146 13 226 184 13
55750-232 200 30 232 195 16 215 174 15 190 146 13
55751-168 122 10 192 133 9 210 150 10 213 154 11
55752-202 150 34 182 157 106 101 98 89 2 2 6
55753- 2 2 6 78 78 78 116 116 116 58 58 58
55754- 2 2 6 22 22 22 90 90 90 46 46 46
55755- 18 18 18 6 6 6 0 0 0 0 0 0
55756- 0 0 0 0 0 0 0 0 0 0 0 0
55757- 0 0 0 0 0 0 0 0 0 0 0 0
55758- 0 0 0 0 0 0 0 0 0 0 0 0
55759- 0 0 0 0 0 0 0 0 0 0 0 0
55760- 0 0 0 0 0 0 0 0 0 0 0 0
55761- 0 0 0 0 0 0 0 0 0 0 0 0
55762- 0 0 0 0 0 0 0 0 0 0 0 0
55763- 0 0 0 0 0 0 0 0 0 0 0 0
55764- 0 0 0 0 0 0 0 0 0 0 0 0
55765- 0 0 0 0 0 0 0 0 0 0 0 0
55766- 0 0 0 0 0 0 0 0 0 0 0 0
55767- 0 0 0 0 0 0 0 0 0 10 10 10
55768- 38 38 38 86 86 86 50 50 50 6 6 6
55769-128 128 128 174 154 114 156 107 11 168 122 10
55770-198 155 10 184 144 12 197 138 11 200 144 11
55771-206 145 10 206 145 10 197 138 11 188 164 115
55772-195 195 195 198 198 198 174 174 174 14 14 14
55773- 2 2 6 22 22 22 116 116 116 116 116 116
55774- 22 22 22 2 2 6 74 74 74 70 70 70
55775- 30 30 30 10 10 10 0 0 0 0 0 0
55776- 0 0 0 0 0 0 0 0 0 0 0 0
55777- 0 0 0 0 0 0 0 0 0 0 0 0
55778- 0 0 0 0 0 0 0 0 0 0 0 0
55779- 0 0 0 0 0 0 0 0 0 0 0 0
55780- 0 0 0 0 0 0 0 0 0 0 0 0
55781- 0 0 0 0 0 0 0 0 0 0 0 0
55782- 0 0 0 0 0 0 0 0 0 0 0 0
55783- 0 0 0 0 0 0 0 0 0 0 0 0
55784- 0 0 0 0 0 0 0 0 0 0 0 0
55785- 0 0 0 0 0 0 0 0 0 0 0 0
55786- 0 0 0 0 0 0 0 0 0 0 0 0
55787- 0 0 0 0 0 0 6 6 6 18 18 18
55788- 50 50 50 101 101 101 26 26 26 10 10 10
55789-138 138 138 190 190 190 174 154 114 156 107 11
55790-197 138 11 200 144 11 197 138 11 192 133 9
55791-180 123 7 190 142 34 190 178 144 187 187 187
55792-202 202 202 221 221 221 214 214 214 66 66 66
55793- 2 2 6 2 2 6 50 50 50 62 62 62
55794- 6 6 6 2 2 6 10 10 10 90 90 90
55795- 50 50 50 18 18 18 6 6 6 0 0 0
55796- 0 0 0 0 0 0 0 0 0 0 0 0
55797- 0 0 0 0 0 0 0 0 0 0 0 0
55798- 0 0 0 0 0 0 0 0 0 0 0 0
55799- 0 0 0 0 0 0 0 0 0 0 0 0
55800- 0 0 0 0 0 0 0 0 0 0 0 0
55801- 0 0 0 0 0 0 0 0 0 0 0 0
55802- 0 0 0 0 0 0 0 0 0 0 0 0
55803- 0 0 0 0 0 0 0 0 0 0 0 0
55804- 0 0 0 0 0 0 0 0 0 0 0 0
55805- 0 0 0 0 0 0 0 0 0 0 0 0
55806- 0 0 0 0 0 0 0 0 0 0 0 0
55807- 0 0 0 0 0 0 10 10 10 34 34 34
55808- 74 74 74 74 74 74 2 2 6 6 6 6
55809-144 144 144 198 198 198 190 190 190 178 166 146
55810-154 121 60 156 107 11 156 107 11 168 124 44
55811-174 154 114 187 187 187 190 190 190 210 210 210
55812-246 246 246 253 253 253 253 253 253 182 182 182
55813- 6 6 6 2 2 6 2 2 6 2 2 6
55814- 2 2 6 2 2 6 2 2 6 62 62 62
55815- 74 74 74 34 34 34 14 14 14 0 0 0
55816- 0 0 0 0 0 0 0 0 0 0 0 0
55817- 0 0 0 0 0 0 0 0 0 0 0 0
55818- 0 0 0 0 0 0 0 0 0 0 0 0
55819- 0 0 0 0 0 0 0 0 0 0 0 0
55820- 0 0 0 0 0 0 0 0 0 0 0 0
55821- 0 0 0 0 0 0 0 0 0 0 0 0
55822- 0 0 0 0 0 0 0 0 0 0 0 0
55823- 0 0 0 0 0 0 0 0 0 0 0 0
55824- 0 0 0 0 0 0 0 0 0 0 0 0
55825- 0 0 0 0 0 0 0 0 0 0 0 0
55826- 0 0 0 0 0 0 0 0 0 0 0 0
55827- 0 0 0 10 10 10 22 22 22 54 54 54
55828- 94 94 94 18 18 18 2 2 6 46 46 46
55829-234 234 234 221 221 221 190 190 190 190 190 190
55830-190 190 190 187 187 187 187 187 187 190 190 190
55831-190 190 190 195 195 195 214 214 214 242 242 242
55832-253 253 253 253 253 253 253 253 253 253 253 253
55833- 82 82 82 2 2 6 2 2 6 2 2 6
55834- 2 2 6 2 2 6 2 2 6 14 14 14
55835- 86 86 86 54 54 54 22 22 22 6 6 6
55836- 0 0 0 0 0 0 0 0 0 0 0 0
55837- 0 0 0 0 0 0 0 0 0 0 0 0
55838- 0 0 0 0 0 0 0 0 0 0 0 0
55839- 0 0 0 0 0 0 0 0 0 0 0 0
55840- 0 0 0 0 0 0 0 0 0 0 0 0
55841- 0 0 0 0 0 0 0 0 0 0 0 0
55842- 0 0 0 0 0 0 0 0 0 0 0 0
55843- 0 0 0 0 0 0 0 0 0 0 0 0
55844- 0 0 0 0 0 0 0 0 0 0 0 0
55845- 0 0 0 0 0 0 0 0 0 0 0 0
55846- 0 0 0 0 0 0 0 0 0 0 0 0
55847- 6 6 6 18 18 18 46 46 46 90 90 90
55848- 46 46 46 18 18 18 6 6 6 182 182 182
55849-253 253 253 246 246 246 206 206 206 190 190 190
55850-190 190 190 190 190 190 190 190 190 190 190 190
55851-206 206 206 231 231 231 250 250 250 253 253 253
55852-253 253 253 253 253 253 253 253 253 253 253 253
55853-202 202 202 14 14 14 2 2 6 2 2 6
55854- 2 2 6 2 2 6 2 2 6 2 2 6
55855- 42 42 42 86 86 86 42 42 42 18 18 18
55856- 6 6 6 0 0 0 0 0 0 0 0 0
55857- 0 0 0 0 0 0 0 0 0 0 0 0
55858- 0 0 0 0 0 0 0 0 0 0 0 0
55859- 0 0 0 0 0 0 0 0 0 0 0 0
55860- 0 0 0 0 0 0 0 0 0 0 0 0
55861- 0 0 0 0 0 0 0 0 0 0 0 0
55862- 0 0 0 0 0 0 0 0 0 0 0 0
55863- 0 0 0 0 0 0 0 0 0 0 0 0
55864- 0 0 0 0 0 0 0 0 0 0 0 0
55865- 0 0 0 0 0 0 0 0 0 0 0 0
55866- 0 0 0 0 0 0 0 0 0 6 6 6
55867- 14 14 14 38 38 38 74 74 74 66 66 66
55868- 2 2 6 6 6 6 90 90 90 250 250 250
55869-253 253 253 253 253 253 238 238 238 198 198 198
55870-190 190 190 190 190 190 195 195 195 221 221 221
55871-246 246 246 253 253 253 253 253 253 253 253 253
55872-253 253 253 253 253 253 253 253 253 253 253 253
55873-253 253 253 82 82 82 2 2 6 2 2 6
55874- 2 2 6 2 2 6 2 2 6 2 2 6
55875- 2 2 6 78 78 78 70 70 70 34 34 34
55876- 14 14 14 6 6 6 0 0 0 0 0 0
55877- 0 0 0 0 0 0 0 0 0 0 0 0
55878- 0 0 0 0 0 0 0 0 0 0 0 0
55879- 0 0 0 0 0 0 0 0 0 0 0 0
55880- 0 0 0 0 0 0 0 0 0 0 0 0
55881- 0 0 0 0 0 0 0 0 0 0 0 0
55882- 0 0 0 0 0 0 0 0 0 0 0 0
55883- 0 0 0 0 0 0 0 0 0 0 0 0
55884- 0 0 0 0 0 0 0 0 0 0 0 0
55885- 0 0 0 0 0 0 0 0 0 0 0 0
55886- 0 0 0 0 0 0 0 0 0 14 14 14
55887- 34 34 34 66 66 66 78 78 78 6 6 6
55888- 2 2 6 18 18 18 218 218 218 253 253 253
55889-253 253 253 253 253 253 253 253 253 246 246 246
55890-226 226 226 231 231 231 246 246 246 253 253 253
55891-253 253 253 253 253 253 253 253 253 253 253 253
55892-253 253 253 253 253 253 253 253 253 253 253 253
55893-253 253 253 178 178 178 2 2 6 2 2 6
55894- 2 2 6 2 2 6 2 2 6 2 2 6
55895- 2 2 6 18 18 18 90 90 90 62 62 62
55896- 30 30 30 10 10 10 0 0 0 0 0 0
55897- 0 0 0 0 0 0 0 0 0 0 0 0
55898- 0 0 0 0 0 0 0 0 0 0 0 0
55899- 0 0 0 0 0 0 0 0 0 0 0 0
55900- 0 0 0 0 0 0 0 0 0 0 0 0
55901- 0 0 0 0 0 0 0 0 0 0 0 0
55902- 0 0 0 0 0 0 0 0 0 0 0 0
55903- 0 0 0 0 0 0 0 0 0 0 0 0
55904- 0 0 0 0 0 0 0 0 0 0 0 0
55905- 0 0 0 0 0 0 0 0 0 0 0 0
55906- 0 0 0 0 0 0 10 10 10 26 26 26
55907- 58 58 58 90 90 90 18 18 18 2 2 6
55908- 2 2 6 110 110 110 253 253 253 253 253 253
55909-253 253 253 253 253 253 253 253 253 253 253 253
55910-250 250 250 253 253 253 253 253 253 253 253 253
55911-253 253 253 253 253 253 253 253 253 253 253 253
55912-253 253 253 253 253 253 253 253 253 253 253 253
55913-253 253 253 231 231 231 18 18 18 2 2 6
55914- 2 2 6 2 2 6 2 2 6 2 2 6
55915- 2 2 6 2 2 6 18 18 18 94 94 94
55916- 54 54 54 26 26 26 10 10 10 0 0 0
55917- 0 0 0 0 0 0 0 0 0 0 0 0
55918- 0 0 0 0 0 0 0 0 0 0 0 0
55919- 0 0 0 0 0 0 0 0 0 0 0 0
55920- 0 0 0 0 0 0 0 0 0 0 0 0
55921- 0 0 0 0 0 0 0 0 0 0 0 0
55922- 0 0 0 0 0 0 0 0 0 0 0 0
55923- 0 0 0 0 0 0 0 0 0 0 0 0
55924- 0 0 0 0 0 0 0 0 0 0 0 0
55925- 0 0 0 0 0 0 0 0 0 0 0 0
55926- 0 0 0 6 6 6 22 22 22 50 50 50
55927- 90 90 90 26 26 26 2 2 6 2 2 6
55928- 14 14 14 195 195 195 250 250 250 253 253 253
55929-253 253 253 253 253 253 253 253 253 253 253 253
55930-253 253 253 253 253 253 253 253 253 253 253 253
55931-253 253 253 253 253 253 253 253 253 253 253 253
55932-253 253 253 253 253 253 253 253 253 253 253 253
55933-250 250 250 242 242 242 54 54 54 2 2 6
55934- 2 2 6 2 2 6 2 2 6 2 2 6
55935- 2 2 6 2 2 6 2 2 6 38 38 38
55936- 86 86 86 50 50 50 22 22 22 6 6 6
55937- 0 0 0 0 0 0 0 0 0 0 0 0
55938- 0 0 0 0 0 0 0 0 0 0 0 0
55939- 0 0 0 0 0 0 0 0 0 0 0 0
55940- 0 0 0 0 0 0 0 0 0 0 0 0
55941- 0 0 0 0 0 0 0 0 0 0 0 0
55942- 0 0 0 0 0 0 0 0 0 0 0 0
55943- 0 0 0 0 0 0 0 0 0 0 0 0
55944- 0 0 0 0 0 0 0 0 0 0 0 0
55945- 0 0 0 0 0 0 0 0 0 0 0 0
55946- 6 6 6 14 14 14 38 38 38 82 82 82
55947- 34 34 34 2 2 6 2 2 6 2 2 6
55948- 42 42 42 195 195 195 246 246 246 253 253 253
55949-253 253 253 253 253 253 253 253 253 250 250 250
55950-242 242 242 242 242 242 250 250 250 253 253 253
55951-253 253 253 253 253 253 253 253 253 253 253 253
55952-253 253 253 250 250 250 246 246 246 238 238 238
55953-226 226 226 231 231 231 101 101 101 6 6 6
55954- 2 2 6 2 2 6 2 2 6 2 2 6
55955- 2 2 6 2 2 6 2 2 6 2 2 6
55956- 38 38 38 82 82 82 42 42 42 14 14 14
55957- 6 6 6 0 0 0 0 0 0 0 0 0
55958- 0 0 0 0 0 0 0 0 0 0 0 0
55959- 0 0 0 0 0 0 0 0 0 0 0 0
55960- 0 0 0 0 0 0 0 0 0 0 0 0
55961- 0 0 0 0 0 0 0 0 0 0 0 0
55962- 0 0 0 0 0 0 0 0 0 0 0 0
55963- 0 0 0 0 0 0 0 0 0 0 0 0
55964- 0 0 0 0 0 0 0 0 0 0 0 0
55965- 0 0 0 0 0 0 0 0 0 0 0 0
55966- 10 10 10 26 26 26 62 62 62 66 66 66
55967- 2 2 6 2 2 6 2 2 6 6 6 6
55968- 70 70 70 170 170 170 206 206 206 234 234 234
55969-246 246 246 250 250 250 250 250 250 238 238 238
55970-226 226 226 231 231 231 238 238 238 250 250 250
55971-250 250 250 250 250 250 246 246 246 231 231 231
55972-214 214 214 206 206 206 202 202 202 202 202 202
55973-198 198 198 202 202 202 182 182 182 18 18 18
55974- 2 2 6 2 2 6 2 2 6 2 2 6
55975- 2 2 6 2 2 6 2 2 6 2 2 6
55976- 2 2 6 62 62 62 66 66 66 30 30 30
55977- 10 10 10 0 0 0 0 0 0 0 0 0
55978- 0 0 0 0 0 0 0 0 0 0 0 0
55979- 0 0 0 0 0 0 0 0 0 0 0 0
55980- 0 0 0 0 0 0 0 0 0 0 0 0
55981- 0 0 0 0 0 0 0 0 0 0 0 0
55982- 0 0 0 0 0 0 0 0 0 0 0 0
55983- 0 0 0 0 0 0 0 0 0 0 0 0
55984- 0 0 0 0 0 0 0 0 0 0 0 0
55985- 0 0 0 0 0 0 0 0 0 0 0 0
55986- 14 14 14 42 42 42 82 82 82 18 18 18
55987- 2 2 6 2 2 6 2 2 6 10 10 10
55988- 94 94 94 182 182 182 218 218 218 242 242 242
55989-250 250 250 253 253 253 253 253 253 250 250 250
55990-234 234 234 253 253 253 253 253 253 253 253 253
55991-253 253 253 253 253 253 253 253 253 246 246 246
55992-238 238 238 226 226 226 210 210 210 202 202 202
55993-195 195 195 195 195 195 210 210 210 158 158 158
55994- 6 6 6 14 14 14 50 50 50 14 14 14
55995- 2 2 6 2 2 6 2 2 6 2 2 6
55996- 2 2 6 6 6 6 86 86 86 46 46 46
55997- 18 18 18 6 6 6 0 0 0 0 0 0
55998- 0 0 0 0 0 0 0 0 0 0 0 0
55999- 0 0 0 0 0 0 0 0 0 0 0 0
56000- 0 0 0 0 0 0 0 0 0 0 0 0
56001- 0 0 0 0 0 0 0 0 0 0 0 0
56002- 0 0 0 0 0 0 0 0 0 0 0 0
56003- 0 0 0 0 0 0 0 0 0 0 0 0
56004- 0 0 0 0 0 0 0 0 0 0 0 0
56005- 0 0 0 0 0 0 0 0 0 6 6 6
56006- 22 22 22 54 54 54 70 70 70 2 2 6
56007- 2 2 6 10 10 10 2 2 6 22 22 22
56008-166 166 166 231 231 231 250 250 250 253 253 253
56009-253 253 253 253 253 253 253 253 253 250 250 250
56010-242 242 242 253 253 253 253 253 253 253 253 253
56011-253 253 253 253 253 253 253 253 253 253 253 253
56012-253 253 253 253 253 253 253 253 253 246 246 246
56013-231 231 231 206 206 206 198 198 198 226 226 226
56014- 94 94 94 2 2 6 6 6 6 38 38 38
56015- 30 30 30 2 2 6 2 2 6 2 2 6
56016- 2 2 6 2 2 6 62 62 62 66 66 66
56017- 26 26 26 10 10 10 0 0 0 0 0 0
56018- 0 0 0 0 0 0 0 0 0 0 0 0
56019- 0 0 0 0 0 0 0 0 0 0 0 0
56020- 0 0 0 0 0 0 0 0 0 0 0 0
56021- 0 0 0 0 0 0 0 0 0 0 0 0
56022- 0 0 0 0 0 0 0 0 0 0 0 0
56023- 0 0 0 0 0 0 0 0 0 0 0 0
56024- 0 0 0 0 0 0 0 0 0 0 0 0
56025- 0 0 0 0 0 0 0 0 0 10 10 10
56026- 30 30 30 74 74 74 50 50 50 2 2 6
56027- 26 26 26 26 26 26 2 2 6 106 106 106
56028-238 238 238 253 253 253 253 253 253 253 253 253
56029-253 253 253 253 253 253 253 253 253 253 253 253
56030-253 253 253 253 253 253 253 253 253 253 253 253
56031-253 253 253 253 253 253 253 253 253 253 253 253
56032-253 253 253 253 253 253 253 253 253 253 253 253
56033-253 253 253 246 246 246 218 218 218 202 202 202
56034-210 210 210 14 14 14 2 2 6 2 2 6
56035- 30 30 30 22 22 22 2 2 6 2 2 6
56036- 2 2 6 2 2 6 18 18 18 86 86 86
56037- 42 42 42 14 14 14 0 0 0 0 0 0
56038- 0 0 0 0 0 0 0 0 0 0 0 0
56039- 0 0 0 0 0 0 0 0 0 0 0 0
56040- 0 0 0 0 0 0 0 0 0 0 0 0
56041- 0 0 0 0 0 0 0 0 0 0 0 0
56042- 0 0 0 0 0 0 0 0 0 0 0 0
56043- 0 0 0 0 0 0 0 0 0 0 0 0
56044- 0 0 0 0 0 0 0 0 0 0 0 0
56045- 0 0 0 0 0 0 0 0 0 14 14 14
56046- 42 42 42 90 90 90 22 22 22 2 2 6
56047- 42 42 42 2 2 6 18 18 18 218 218 218
56048-253 253 253 253 253 253 253 253 253 253 253 253
56049-253 253 253 253 253 253 253 253 253 253 253 253
56050-253 253 253 253 253 253 253 253 253 253 253 253
56051-253 253 253 253 253 253 253 253 253 253 253 253
56052-253 253 253 253 253 253 253 253 253 253 253 253
56053-253 253 253 253 253 253 250 250 250 221 221 221
56054-218 218 218 101 101 101 2 2 6 14 14 14
56055- 18 18 18 38 38 38 10 10 10 2 2 6
56056- 2 2 6 2 2 6 2 2 6 78 78 78
56057- 58 58 58 22 22 22 6 6 6 0 0 0
56058- 0 0 0 0 0 0 0 0 0 0 0 0
56059- 0 0 0 0 0 0 0 0 0 0 0 0
56060- 0 0 0 0 0 0 0 0 0 0 0 0
56061- 0 0 0 0 0 0 0 0 0 0 0 0
56062- 0 0 0 0 0 0 0 0 0 0 0 0
56063- 0 0 0 0 0 0 0 0 0 0 0 0
56064- 0 0 0 0 0 0 0 0 0 0 0 0
56065- 0 0 0 0 0 0 6 6 6 18 18 18
56066- 54 54 54 82 82 82 2 2 6 26 26 26
56067- 22 22 22 2 2 6 123 123 123 253 253 253
56068-253 253 253 253 253 253 253 253 253 253 253 253
56069-253 253 253 253 253 253 253 253 253 253 253 253
56070-253 253 253 253 253 253 253 253 253 253 253 253
56071-253 253 253 253 253 253 253 253 253 253 253 253
56072-253 253 253 253 253 253 253 253 253 253 253 253
56073-253 253 253 253 253 253 253 253 253 250 250 250
56074-238 238 238 198 198 198 6 6 6 38 38 38
56075- 58 58 58 26 26 26 38 38 38 2 2 6
56076- 2 2 6 2 2 6 2 2 6 46 46 46
56077- 78 78 78 30 30 30 10 10 10 0 0 0
56078- 0 0 0 0 0 0 0 0 0 0 0 0
56079- 0 0 0 0 0 0 0 0 0 0 0 0
56080- 0 0 0 0 0 0 0 0 0 0 0 0
56081- 0 0 0 0 0 0 0 0 0 0 0 0
56082- 0 0 0 0 0 0 0 0 0 0 0 0
56083- 0 0 0 0 0 0 0 0 0 0 0 0
56084- 0 0 0 0 0 0 0 0 0 0 0 0
56085- 0 0 0 0 0 0 10 10 10 30 30 30
56086- 74 74 74 58 58 58 2 2 6 42 42 42
56087- 2 2 6 22 22 22 231 231 231 253 253 253
56088-253 253 253 253 253 253 253 253 253 253 253 253
56089-253 253 253 253 253 253 253 253 253 250 250 250
56090-253 253 253 253 253 253 253 253 253 253 253 253
56091-253 253 253 253 253 253 253 253 253 253 253 253
56092-253 253 253 253 253 253 253 253 253 253 253 253
56093-253 253 253 253 253 253 253 253 253 253 253 253
56094-253 253 253 246 246 246 46 46 46 38 38 38
56095- 42 42 42 14 14 14 38 38 38 14 14 14
56096- 2 2 6 2 2 6 2 2 6 6 6 6
56097- 86 86 86 46 46 46 14 14 14 0 0 0
56098- 0 0 0 0 0 0 0 0 0 0 0 0
56099- 0 0 0 0 0 0 0 0 0 0 0 0
56100- 0 0 0 0 0 0 0 0 0 0 0 0
56101- 0 0 0 0 0 0 0 0 0 0 0 0
56102- 0 0 0 0 0 0 0 0 0 0 0 0
56103- 0 0 0 0 0 0 0 0 0 0 0 0
56104- 0 0 0 0 0 0 0 0 0 0 0 0
56105- 0 0 0 6 6 6 14 14 14 42 42 42
56106- 90 90 90 18 18 18 18 18 18 26 26 26
56107- 2 2 6 116 116 116 253 253 253 253 253 253
56108-253 253 253 253 253 253 253 253 253 253 253 253
56109-253 253 253 253 253 253 250 250 250 238 238 238
56110-253 253 253 253 253 253 253 253 253 253 253 253
56111-253 253 253 253 253 253 253 253 253 253 253 253
56112-253 253 253 253 253 253 253 253 253 253 253 253
56113-253 253 253 253 253 253 253 253 253 253 253 253
56114-253 253 253 253 253 253 94 94 94 6 6 6
56115- 2 2 6 2 2 6 10 10 10 34 34 34
56116- 2 2 6 2 2 6 2 2 6 2 2 6
56117- 74 74 74 58 58 58 22 22 22 6 6 6
56118- 0 0 0 0 0 0 0 0 0 0 0 0
56119- 0 0 0 0 0 0 0 0 0 0 0 0
56120- 0 0 0 0 0 0 0 0 0 0 0 0
56121- 0 0 0 0 0 0 0 0 0 0 0 0
56122- 0 0 0 0 0 0 0 0 0 0 0 0
56123- 0 0 0 0 0 0 0 0 0 0 0 0
56124- 0 0 0 0 0 0 0 0 0 0 0 0
56125- 0 0 0 10 10 10 26 26 26 66 66 66
56126- 82 82 82 2 2 6 38 38 38 6 6 6
56127- 14 14 14 210 210 210 253 253 253 253 253 253
56128-253 253 253 253 253 253 253 253 253 253 253 253
56129-253 253 253 253 253 253 246 246 246 242 242 242
56130-253 253 253 253 253 253 253 253 253 253 253 253
56131-253 253 253 253 253 253 253 253 253 253 253 253
56132-253 253 253 253 253 253 253 253 253 253 253 253
56133-253 253 253 253 253 253 253 253 253 253 253 253
56134-253 253 253 253 253 253 144 144 144 2 2 6
56135- 2 2 6 2 2 6 2 2 6 46 46 46
56136- 2 2 6 2 2 6 2 2 6 2 2 6
56137- 42 42 42 74 74 74 30 30 30 10 10 10
56138- 0 0 0 0 0 0 0 0 0 0 0 0
56139- 0 0 0 0 0 0 0 0 0 0 0 0
56140- 0 0 0 0 0 0 0 0 0 0 0 0
56141- 0 0 0 0 0 0 0 0 0 0 0 0
56142- 0 0 0 0 0 0 0 0 0 0 0 0
56143- 0 0 0 0 0 0 0 0 0 0 0 0
56144- 0 0 0 0 0 0 0 0 0 0 0 0
56145- 6 6 6 14 14 14 42 42 42 90 90 90
56146- 26 26 26 6 6 6 42 42 42 2 2 6
56147- 74 74 74 250 250 250 253 253 253 253 253 253
56148-253 253 253 253 253 253 253 253 253 253 253 253
56149-253 253 253 253 253 253 242 242 242 242 242 242
56150-253 253 253 253 253 253 253 253 253 253 253 253
56151-253 253 253 253 253 253 253 253 253 253 253 253
56152-253 253 253 253 253 253 253 253 253 253 253 253
56153-253 253 253 253 253 253 253 253 253 253 253 253
56154-253 253 253 253 253 253 182 182 182 2 2 6
56155- 2 2 6 2 2 6 2 2 6 46 46 46
56156- 2 2 6 2 2 6 2 2 6 2 2 6
56157- 10 10 10 86 86 86 38 38 38 10 10 10
56158- 0 0 0 0 0 0 0 0 0 0 0 0
56159- 0 0 0 0 0 0 0 0 0 0 0 0
56160- 0 0 0 0 0 0 0 0 0 0 0 0
56161- 0 0 0 0 0 0 0 0 0 0 0 0
56162- 0 0 0 0 0 0 0 0 0 0 0 0
56163- 0 0 0 0 0 0 0 0 0 0 0 0
56164- 0 0 0 0 0 0 0 0 0 0 0 0
56165- 10 10 10 26 26 26 66 66 66 82 82 82
56166- 2 2 6 22 22 22 18 18 18 2 2 6
56167-149 149 149 253 253 253 253 253 253 253 253 253
56168-253 253 253 253 253 253 253 253 253 253 253 253
56169-253 253 253 253 253 253 234 234 234 242 242 242
56170-253 253 253 253 253 253 253 253 253 253 253 253
56171-253 253 253 253 253 253 253 253 253 253 253 253
56172-253 253 253 253 253 253 253 253 253 253 253 253
56173-253 253 253 253 253 253 253 253 253 253 253 253
56174-253 253 253 253 253 253 206 206 206 2 2 6
56175- 2 2 6 2 2 6 2 2 6 38 38 38
56176- 2 2 6 2 2 6 2 2 6 2 2 6
56177- 6 6 6 86 86 86 46 46 46 14 14 14
56178- 0 0 0 0 0 0 0 0 0 0 0 0
56179- 0 0 0 0 0 0 0 0 0 0 0 0
56180- 0 0 0 0 0 0 0 0 0 0 0 0
56181- 0 0 0 0 0 0 0 0 0 0 0 0
56182- 0 0 0 0 0 0 0 0 0 0 0 0
56183- 0 0 0 0 0 0 0 0 0 0 0 0
56184- 0 0 0 0 0 0 0 0 0 6 6 6
56185- 18 18 18 46 46 46 86 86 86 18 18 18
56186- 2 2 6 34 34 34 10 10 10 6 6 6
56187-210 210 210 253 253 253 253 253 253 253 253 253
56188-253 253 253 253 253 253 253 253 253 253 253 253
56189-253 253 253 253 253 253 234 234 234 242 242 242
56190-253 253 253 253 253 253 253 253 253 253 253 253
56191-253 253 253 253 253 253 253 253 253 253 253 253
56192-253 253 253 253 253 253 253 253 253 253 253 253
56193-253 253 253 253 253 253 253 253 253 253 253 253
56194-253 253 253 253 253 253 221 221 221 6 6 6
56195- 2 2 6 2 2 6 6 6 6 30 30 30
56196- 2 2 6 2 2 6 2 2 6 2 2 6
56197- 2 2 6 82 82 82 54 54 54 18 18 18
56198- 6 6 6 0 0 0 0 0 0 0 0 0
56199- 0 0 0 0 0 0 0 0 0 0 0 0
56200- 0 0 0 0 0 0 0 0 0 0 0 0
56201- 0 0 0 0 0 0 0 0 0 0 0 0
56202- 0 0 0 0 0 0 0 0 0 0 0 0
56203- 0 0 0 0 0 0 0 0 0 0 0 0
56204- 0 0 0 0 0 0 0 0 0 10 10 10
56205- 26 26 26 66 66 66 62 62 62 2 2 6
56206- 2 2 6 38 38 38 10 10 10 26 26 26
56207-238 238 238 253 253 253 253 253 253 253 253 253
56208-253 253 253 253 253 253 253 253 253 253 253 253
56209-253 253 253 253 253 253 231 231 231 238 238 238
56210-253 253 253 253 253 253 253 253 253 253 253 253
56211-253 253 253 253 253 253 253 253 253 253 253 253
56212-253 253 253 253 253 253 253 253 253 253 253 253
56213-253 253 253 253 253 253 253 253 253 253 253 253
56214-253 253 253 253 253 253 231 231 231 6 6 6
56215- 2 2 6 2 2 6 10 10 10 30 30 30
56216- 2 2 6 2 2 6 2 2 6 2 2 6
56217- 2 2 6 66 66 66 58 58 58 22 22 22
56218- 6 6 6 0 0 0 0 0 0 0 0 0
56219- 0 0 0 0 0 0 0 0 0 0 0 0
56220- 0 0 0 0 0 0 0 0 0 0 0 0
56221- 0 0 0 0 0 0 0 0 0 0 0 0
56222- 0 0 0 0 0 0 0 0 0 0 0 0
56223- 0 0 0 0 0 0 0 0 0 0 0 0
56224- 0 0 0 0 0 0 0 0 0 10 10 10
56225- 38 38 38 78 78 78 6 6 6 2 2 6
56226- 2 2 6 46 46 46 14 14 14 42 42 42
56227-246 246 246 253 253 253 253 253 253 253 253 253
56228-253 253 253 253 253 253 253 253 253 253 253 253
56229-253 253 253 253 253 253 231 231 231 242 242 242
56230-253 253 253 253 253 253 253 253 253 253 253 253
56231-253 253 253 253 253 253 253 253 253 253 253 253
56232-253 253 253 253 253 253 253 253 253 253 253 253
56233-253 253 253 253 253 253 253 253 253 253 253 253
56234-253 253 253 253 253 253 234 234 234 10 10 10
56235- 2 2 6 2 2 6 22 22 22 14 14 14
56236- 2 2 6 2 2 6 2 2 6 2 2 6
56237- 2 2 6 66 66 66 62 62 62 22 22 22
56238- 6 6 6 0 0 0 0 0 0 0 0 0
56239- 0 0 0 0 0 0 0 0 0 0 0 0
56240- 0 0 0 0 0 0 0 0 0 0 0 0
56241- 0 0 0 0 0 0 0 0 0 0 0 0
56242- 0 0 0 0 0 0 0 0 0 0 0 0
56243- 0 0 0 0 0 0 0 0 0 0 0 0
56244- 0 0 0 0 0 0 6 6 6 18 18 18
56245- 50 50 50 74 74 74 2 2 6 2 2 6
56246- 14 14 14 70 70 70 34 34 34 62 62 62
56247-250 250 250 253 253 253 253 253 253 253 253 253
56248-253 253 253 253 253 253 253 253 253 253 253 253
56249-253 253 253 253 253 253 231 231 231 246 246 246
56250-253 253 253 253 253 253 253 253 253 253 253 253
56251-253 253 253 253 253 253 253 253 253 253 253 253
56252-253 253 253 253 253 253 253 253 253 253 253 253
56253-253 253 253 253 253 253 253 253 253 253 253 253
56254-253 253 253 253 253 253 234 234 234 14 14 14
56255- 2 2 6 2 2 6 30 30 30 2 2 6
56256- 2 2 6 2 2 6 2 2 6 2 2 6
56257- 2 2 6 66 66 66 62 62 62 22 22 22
56258- 6 6 6 0 0 0 0 0 0 0 0 0
56259- 0 0 0 0 0 0 0 0 0 0 0 0
56260- 0 0 0 0 0 0 0 0 0 0 0 0
56261- 0 0 0 0 0 0 0 0 0 0 0 0
56262- 0 0 0 0 0 0 0 0 0 0 0 0
56263- 0 0 0 0 0 0 0 0 0 0 0 0
56264- 0 0 0 0 0 0 6 6 6 18 18 18
56265- 54 54 54 62 62 62 2 2 6 2 2 6
56266- 2 2 6 30 30 30 46 46 46 70 70 70
56267-250 250 250 253 253 253 253 253 253 253 253 253
56268-253 253 253 253 253 253 253 253 253 253 253 253
56269-253 253 253 253 253 253 231 231 231 246 246 246
56270-253 253 253 253 253 253 253 253 253 253 253 253
56271-253 253 253 253 253 253 253 253 253 253 253 253
56272-253 253 253 253 253 253 253 253 253 253 253 253
56273-253 253 253 253 253 253 253 253 253 253 253 253
56274-253 253 253 253 253 253 226 226 226 10 10 10
56275- 2 2 6 6 6 6 30 30 30 2 2 6
56276- 2 2 6 2 2 6 2 2 6 2 2 6
56277- 2 2 6 66 66 66 58 58 58 22 22 22
56278- 6 6 6 0 0 0 0 0 0 0 0 0
56279- 0 0 0 0 0 0 0 0 0 0 0 0
56280- 0 0 0 0 0 0 0 0 0 0 0 0
56281- 0 0 0 0 0 0 0 0 0 0 0 0
56282- 0 0 0 0 0 0 0 0 0 0 0 0
56283- 0 0 0 0 0 0 0 0 0 0 0 0
56284- 0 0 0 0 0 0 6 6 6 22 22 22
56285- 58 58 58 62 62 62 2 2 6 2 2 6
56286- 2 2 6 2 2 6 30 30 30 78 78 78
56287-250 250 250 253 253 253 253 253 253 253 253 253
56288-253 253 253 253 253 253 253 253 253 253 253 253
56289-253 253 253 253 253 253 231 231 231 246 246 246
56290-253 253 253 253 253 253 253 253 253 253 253 253
56291-253 253 253 253 253 253 253 253 253 253 253 253
56292-253 253 253 253 253 253 253 253 253 253 253 253
56293-253 253 253 253 253 253 253 253 253 253 253 253
56294-253 253 253 253 253 253 206 206 206 2 2 6
56295- 22 22 22 34 34 34 18 14 6 22 22 22
56296- 26 26 26 18 18 18 6 6 6 2 2 6
56297- 2 2 6 82 82 82 54 54 54 18 18 18
56298- 6 6 6 0 0 0 0 0 0 0 0 0
56299- 0 0 0 0 0 0 0 0 0 0 0 0
56300- 0 0 0 0 0 0 0 0 0 0 0 0
56301- 0 0 0 0 0 0 0 0 0 0 0 0
56302- 0 0 0 0 0 0 0 0 0 0 0 0
56303- 0 0 0 0 0 0 0 0 0 0 0 0
56304- 0 0 0 0 0 0 6 6 6 26 26 26
56305- 62 62 62 106 106 106 74 54 14 185 133 11
56306-210 162 10 121 92 8 6 6 6 62 62 62
56307-238 238 238 253 253 253 253 253 253 253 253 253
56308-253 253 253 253 253 253 253 253 253 253 253 253
56309-253 253 253 253 253 253 231 231 231 246 246 246
56310-253 253 253 253 253 253 253 253 253 253 253 253
56311-253 253 253 253 253 253 253 253 253 253 253 253
56312-253 253 253 253 253 253 253 253 253 253 253 253
56313-253 253 253 253 253 253 253 253 253 253 253 253
56314-253 253 253 253 253 253 158 158 158 18 18 18
56315- 14 14 14 2 2 6 2 2 6 2 2 6
56316- 6 6 6 18 18 18 66 66 66 38 38 38
56317- 6 6 6 94 94 94 50 50 50 18 18 18
56318- 6 6 6 0 0 0 0 0 0 0 0 0
56319- 0 0 0 0 0 0 0 0 0 0 0 0
56320- 0 0 0 0 0 0 0 0 0 0 0 0
56321- 0 0 0 0 0 0 0 0 0 0 0 0
56322- 0 0 0 0 0 0 0 0 0 0 0 0
56323- 0 0 0 0 0 0 0 0 0 6 6 6
56324- 10 10 10 10 10 10 18 18 18 38 38 38
56325- 78 78 78 142 134 106 216 158 10 242 186 14
56326-246 190 14 246 190 14 156 118 10 10 10 10
56327- 90 90 90 238 238 238 253 253 253 253 253 253
56328-253 253 253 253 253 253 253 253 253 253 253 253
56329-253 253 253 253 253 253 231 231 231 250 250 250
56330-253 253 253 253 253 253 253 253 253 253 253 253
56331-253 253 253 253 253 253 253 253 253 253 253 253
56332-253 253 253 253 253 253 253 253 253 253 253 253
56333-253 253 253 253 253 253 253 253 253 246 230 190
56334-238 204 91 238 204 91 181 142 44 37 26 9
56335- 2 2 6 2 2 6 2 2 6 2 2 6
56336- 2 2 6 2 2 6 38 38 38 46 46 46
56337- 26 26 26 106 106 106 54 54 54 18 18 18
56338- 6 6 6 0 0 0 0 0 0 0 0 0
56339- 0 0 0 0 0 0 0 0 0 0 0 0
56340- 0 0 0 0 0 0 0 0 0 0 0 0
56341- 0 0 0 0 0 0 0 0 0 0 0 0
56342- 0 0 0 0 0 0 0 0 0 0 0 0
56343- 0 0 0 6 6 6 14 14 14 22 22 22
56344- 30 30 30 38 38 38 50 50 50 70 70 70
56345-106 106 106 190 142 34 226 170 11 242 186 14
56346-246 190 14 246 190 14 246 190 14 154 114 10
56347- 6 6 6 74 74 74 226 226 226 253 253 253
56348-253 253 253 253 253 253 253 253 253 253 253 253
56349-253 253 253 253 253 253 231 231 231 250 250 250
56350-253 253 253 253 253 253 253 253 253 253 253 253
56351-253 253 253 253 253 253 253 253 253 253 253 253
56352-253 253 253 253 253 253 253 253 253 253 253 253
56353-253 253 253 253 253 253 253 253 253 228 184 62
56354-241 196 14 241 208 19 232 195 16 38 30 10
56355- 2 2 6 2 2 6 2 2 6 2 2 6
56356- 2 2 6 6 6 6 30 30 30 26 26 26
56357-203 166 17 154 142 90 66 66 66 26 26 26
56358- 6 6 6 0 0 0 0 0 0 0 0 0
56359- 0 0 0 0 0 0 0 0 0 0 0 0
56360- 0 0 0 0 0 0 0 0 0 0 0 0
56361- 0 0 0 0 0 0 0 0 0 0 0 0
56362- 0 0 0 0 0 0 0 0 0 0 0 0
56363- 6 6 6 18 18 18 38 38 38 58 58 58
56364- 78 78 78 86 86 86 101 101 101 123 123 123
56365-175 146 61 210 150 10 234 174 13 246 186 14
56366-246 190 14 246 190 14 246 190 14 238 190 10
56367-102 78 10 2 2 6 46 46 46 198 198 198
56368-253 253 253 253 253 253 253 253 253 253 253 253
56369-253 253 253 253 253 253 234 234 234 242 242 242
56370-253 253 253 253 253 253 253 253 253 253 253 253
56371-253 253 253 253 253 253 253 253 253 253 253 253
56372-253 253 253 253 253 253 253 253 253 253 253 253
56373-253 253 253 253 253 253 253 253 253 224 178 62
56374-242 186 14 241 196 14 210 166 10 22 18 6
56375- 2 2 6 2 2 6 2 2 6 2 2 6
56376- 2 2 6 2 2 6 6 6 6 121 92 8
56377-238 202 15 232 195 16 82 82 82 34 34 34
56378- 10 10 10 0 0 0 0 0 0 0 0 0
56379- 0 0 0 0 0 0 0 0 0 0 0 0
56380- 0 0 0 0 0 0 0 0 0 0 0 0
56381- 0 0 0 0 0 0 0 0 0 0 0 0
56382- 0 0 0 0 0 0 0 0 0 0 0 0
56383- 14 14 14 38 38 38 70 70 70 154 122 46
56384-190 142 34 200 144 11 197 138 11 197 138 11
56385-213 154 11 226 170 11 242 186 14 246 190 14
56386-246 190 14 246 190 14 246 190 14 246 190 14
56387-225 175 15 46 32 6 2 2 6 22 22 22
56388-158 158 158 250 250 250 253 253 253 253 253 253
56389-253 253 253 253 253 253 253 253 253 253 253 253
56390-253 253 253 253 253 253 253 253 253 253 253 253
56391-253 253 253 253 253 253 253 253 253 253 253 253
56392-253 253 253 253 253 253 253 253 253 253 253 253
56393-253 253 253 250 250 250 242 242 242 224 178 62
56394-239 182 13 236 186 11 213 154 11 46 32 6
56395- 2 2 6 2 2 6 2 2 6 2 2 6
56396- 2 2 6 2 2 6 61 42 6 225 175 15
56397-238 190 10 236 186 11 112 100 78 42 42 42
56398- 14 14 14 0 0 0 0 0 0 0 0 0
56399- 0 0 0 0 0 0 0 0 0 0 0 0
56400- 0 0 0 0 0 0 0 0 0 0 0 0
56401- 0 0 0 0 0 0 0 0 0 0 0 0
56402- 0 0 0 0 0 0 0 0 0 6 6 6
56403- 22 22 22 54 54 54 154 122 46 213 154 11
56404-226 170 11 230 174 11 226 170 11 226 170 11
56405-236 178 12 242 186 14 246 190 14 246 190 14
56406-246 190 14 246 190 14 246 190 14 246 190 14
56407-241 196 14 184 144 12 10 10 10 2 2 6
56408- 6 6 6 116 116 116 242 242 242 253 253 253
56409-253 253 253 253 253 253 253 253 253 253 253 253
56410-253 253 253 253 253 253 253 253 253 253 253 253
56411-253 253 253 253 253 253 253 253 253 253 253 253
56412-253 253 253 253 253 253 253 253 253 253 253 253
56413-253 253 253 231 231 231 198 198 198 214 170 54
56414-236 178 12 236 178 12 210 150 10 137 92 6
56415- 18 14 6 2 2 6 2 2 6 2 2 6
56416- 6 6 6 70 47 6 200 144 11 236 178 12
56417-239 182 13 239 182 13 124 112 88 58 58 58
56418- 22 22 22 6 6 6 0 0 0 0 0 0
56419- 0 0 0 0 0 0 0 0 0 0 0 0
56420- 0 0 0 0 0 0 0 0 0 0 0 0
56421- 0 0 0 0 0 0 0 0 0 0 0 0
56422- 0 0 0 0 0 0 0 0 0 10 10 10
56423- 30 30 30 70 70 70 180 133 36 226 170 11
56424-239 182 13 242 186 14 242 186 14 246 186 14
56425-246 190 14 246 190 14 246 190 14 246 190 14
56426-246 190 14 246 190 14 246 190 14 246 190 14
56427-246 190 14 232 195 16 98 70 6 2 2 6
56428- 2 2 6 2 2 6 66 66 66 221 221 221
56429-253 253 253 253 253 253 253 253 253 253 253 253
56430-253 253 253 253 253 253 253 253 253 253 253 253
56431-253 253 253 253 253 253 253 253 253 253 253 253
56432-253 253 253 253 253 253 253 253 253 253 253 253
56433-253 253 253 206 206 206 198 198 198 214 166 58
56434-230 174 11 230 174 11 216 158 10 192 133 9
56435-163 110 8 116 81 8 102 78 10 116 81 8
56436-167 114 7 197 138 11 226 170 11 239 182 13
56437-242 186 14 242 186 14 162 146 94 78 78 78
56438- 34 34 34 14 14 14 6 6 6 0 0 0
56439- 0 0 0 0 0 0 0 0 0 0 0 0
56440- 0 0 0 0 0 0 0 0 0 0 0 0
56441- 0 0 0 0 0 0 0 0 0 0 0 0
56442- 0 0 0 0 0 0 0 0 0 6 6 6
56443- 30 30 30 78 78 78 190 142 34 226 170 11
56444-239 182 13 246 190 14 246 190 14 246 190 14
56445-246 190 14 246 190 14 246 190 14 246 190 14
56446-246 190 14 246 190 14 246 190 14 246 190 14
56447-246 190 14 241 196 14 203 166 17 22 18 6
56448- 2 2 6 2 2 6 2 2 6 38 38 38
56449-218 218 218 253 253 253 253 253 253 253 253 253
56450-253 253 253 253 253 253 253 253 253 253 253 253
56451-253 253 253 253 253 253 253 253 253 253 253 253
56452-253 253 253 253 253 253 253 253 253 253 253 253
56453-250 250 250 206 206 206 198 198 198 202 162 69
56454-226 170 11 236 178 12 224 166 10 210 150 10
56455-200 144 11 197 138 11 192 133 9 197 138 11
56456-210 150 10 226 170 11 242 186 14 246 190 14
56457-246 190 14 246 186 14 225 175 15 124 112 88
56458- 62 62 62 30 30 30 14 14 14 6 6 6
56459- 0 0 0 0 0 0 0 0 0 0 0 0
56460- 0 0 0 0 0 0 0 0 0 0 0 0
56461- 0 0 0 0 0 0 0 0 0 0 0 0
56462- 0 0 0 0 0 0 0 0 0 10 10 10
56463- 30 30 30 78 78 78 174 135 50 224 166 10
56464-239 182 13 246 190 14 246 190 14 246 190 14
56465-246 190 14 246 190 14 246 190 14 246 190 14
56466-246 190 14 246 190 14 246 190 14 246 190 14
56467-246 190 14 246 190 14 241 196 14 139 102 15
56468- 2 2 6 2 2 6 2 2 6 2 2 6
56469- 78 78 78 250 250 250 253 253 253 253 253 253
56470-253 253 253 253 253 253 253 253 253 253 253 253
56471-253 253 253 253 253 253 253 253 253 253 253 253
56472-253 253 253 253 253 253 253 253 253 253 253 253
56473-250 250 250 214 214 214 198 198 198 190 150 46
56474-219 162 10 236 178 12 234 174 13 224 166 10
56475-216 158 10 213 154 11 213 154 11 216 158 10
56476-226 170 11 239 182 13 246 190 14 246 190 14
56477-246 190 14 246 190 14 242 186 14 206 162 42
56478-101 101 101 58 58 58 30 30 30 14 14 14
56479- 6 6 6 0 0 0 0 0 0 0 0 0
56480- 0 0 0 0 0 0 0 0 0 0 0 0
56481- 0 0 0 0 0 0 0 0 0 0 0 0
56482- 0 0 0 0 0 0 0 0 0 10 10 10
56483- 30 30 30 74 74 74 174 135 50 216 158 10
56484-236 178 12 246 190 14 246 190 14 246 190 14
56485-246 190 14 246 190 14 246 190 14 246 190 14
56486-246 190 14 246 190 14 246 190 14 246 190 14
56487-246 190 14 246 190 14 241 196 14 226 184 13
56488- 61 42 6 2 2 6 2 2 6 2 2 6
56489- 22 22 22 238 238 238 253 253 253 253 253 253
56490-253 253 253 253 253 253 253 253 253 253 253 253
56491-253 253 253 253 253 253 253 253 253 253 253 253
56492-253 253 253 253 253 253 253 253 253 253 253 253
56493-253 253 253 226 226 226 187 187 187 180 133 36
56494-216 158 10 236 178 12 239 182 13 236 178 12
56495-230 174 11 226 170 11 226 170 11 230 174 11
56496-236 178 12 242 186 14 246 190 14 246 190 14
56497-246 190 14 246 190 14 246 186 14 239 182 13
56498-206 162 42 106 106 106 66 66 66 34 34 34
56499- 14 14 14 6 6 6 0 0 0 0 0 0
56500- 0 0 0 0 0 0 0 0 0 0 0 0
56501- 0 0 0 0 0 0 0 0 0 0 0 0
56502- 0 0 0 0 0 0 0 0 0 6 6 6
56503- 26 26 26 70 70 70 163 133 67 213 154 11
56504-236 178 12 246 190 14 246 190 14 246 190 14
56505-246 190 14 246 190 14 246 190 14 246 190 14
56506-246 190 14 246 190 14 246 190 14 246 190 14
56507-246 190 14 246 190 14 246 190 14 241 196 14
56508-190 146 13 18 14 6 2 2 6 2 2 6
56509- 46 46 46 246 246 246 253 253 253 253 253 253
56510-253 253 253 253 253 253 253 253 253 253 253 253
56511-253 253 253 253 253 253 253 253 253 253 253 253
56512-253 253 253 253 253 253 253 253 253 253 253 253
56513-253 253 253 221 221 221 86 86 86 156 107 11
56514-216 158 10 236 178 12 242 186 14 246 186 14
56515-242 186 14 239 182 13 239 182 13 242 186 14
56516-242 186 14 246 186 14 246 190 14 246 190 14
56517-246 190 14 246 190 14 246 190 14 246 190 14
56518-242 186 14 225 175 15 142 122 72 66 66 66
56519- 30 30 30 10 10 10 0 0 0 0 0 0
56520- 0 0 0 0 0 0 0 0 0 0 0 0
56521- 0 0 0 0 0 0 0 0 0 0 0 0
56522- 0 0 0 0 0 0 0 0 0 6 6 6
56523- 26 26 26 70 70 70 163 133 67 210 150 10
56524-236 178 12 246 190 14 246 190 14 246 190 14
56525-246 190 14 246 190 14 246 190 14 246 190 14
56526-246 190 14 246 190 14 246 190 14 246 190 14
56527-246 190 14 246 190 14 246 190 14 246 190 14
56528-232 195 16 121 92 8 34 34 34 106 106 106
56529-221 221 221 253 253 253 253 253 253 253 253 253
56530-253 253 253 253 253 253 253 253 253 253 253 253
56531-253 253 253 253 253 253 253 253 253 253 253 253
56532-253 253 253 253 253 253 253 253 253 253 253 253
56533-242 242 242 82 82 82 18 14 6 163 110 8
56534-216 158 10 236 178 12 242 186 14 246 190 14
56535-246 190 14 246 190 14 246 190 14 246 190 14
56536-246 190 14 246 190 14 246 190 14 246 190 14
56537-246 190 14 246 190 14 246 190 14 246 190 14
56538-246 190 14 246 190 14 242 186 14 163 133 67
56539- 46 46 46 18 18 18 6 6 6 0 0 0
56540- 0 0 0 0 0 0 0 0 0 0 0 0
56541- 0 0 0 0 0 0 0 0 0 0 0 0
56542- 0 0 0 0 0 0 0 0 0 10 10 10
56543- 30 30 30 78 78 78 163 133 67 210 150 10
56544-236 178 12 246 186 14 246 190 14 246 190 14
56545-246 190 14 246 190 14 246 190 14 246 190 14
56546-246 190 14 246 190 14 246 190 14 246 190 14
56547-246 190 14 246 190 14 246 190 14 246 190 14
56548-241 196 14 215 174 15 190 178 144 253 253 253
56549-253 253 253 253 253 253 253 253 253 253 253 253
56550-253 253 253 253 253 253 253 253 253 253 253 253
56551-253 253 253 253 253 253 253 253 253 253 253 253
56552-253 253 253 253 253 253 253 253 253 218 218 218
56553- 58 58 58 2 2 6 22 18 6 167 114 7
56554-216 158 10 236 178 12 246 186 14 246 190 14
56555-246 190 14 246 190 14 246 190 14 246 190 14
56556-246 190 14 246 190 14 246 190 14 246 190 14
56557-246 190 14 246 190 14 246 190 14 246 190 14
56558-246 190 14 246 186 14 242 186 14 190 150 46
56559- 54 54 54 22 22 22 6 6 6 0 0 0
56560- 0 0 0 0 0 0 0 0 0 0 0 0
56561- 0 0 0 0 0 0 0 0 0 0 0 0
56562- 0 0 0 0 0 0 0 0 0 14 14 14
56563- 38 38 38 86 86 86 180 133 36 213 154 11
56564-236 178 12 246 186 14 246 190 14 246 190 14
56565-246 190 14 246 190 14 246 190 14 246 190 14
56566-246 190 14 246 190 14 246 190 14 246 190 14
56567-246 190 14 246 190 14 246 190 14 246 190 14
56568-246 190 14 232 195 16 190 146 13 214 214 214
56569-253 253 253 253 253 253 253 253 253 253 253 253
56570-253 253 253 253 253 253 253 253 253 253 253 253
56571-253 253 253 253 253 253 253 253 253 253 253 253
56572-253 253 253 250 250 250 170 170 170 26 26 26
56573- 2 2 6 2 2 6 37 26 9 163 110 8
56574-219 162 10 239 182 13 246 186 14 246 190 14
56575-246 190 14 246 190 14 246 190 14 246 190 14
56576-246 190 14 246 190 14 246 190 14 246 190 14
56577-246 190 14 246 190 14 246 190 14 246 190 14
56578-246 186 14 236 178 12 224 166 10 142 122 72
56579- 46 46 46 18 18 18 6 6 6 0 0 0
56580- 0 0 0 0 0 0 0 0 0 0 0 0
56581- 0 0 0 0 0 0 0 0 0 0 0 0
56582- 0 0 0 0 0 0 6 6 6 18 18 18
56583- 50 50 50 109 106 95 192 133 9 224 166 10
56584-242 186 14 246 190 14 246 190 14 246 190 14
56585-246 190 14 246 190 14 246 190 14 246 190 14
56586-246 190 14 246 190 14 246 190 14 246 190 14
56587-246 190 14 246 190 14 246 190 14 246 190 14
56588-242 186 14 226 184 13 210 162 10 142 110 46
56589-226 226 226 253 253 253 253 253 253 253 253 253
56590-253 253 253 253 253 253 253 253 253 253 253 253
56591-253 253 253 253 253 253 253 253 253 253 253 253
56592-198 198 198 66 66 66 2 2 6 2 2 6
56593- 2 2 6 2 2 6 50 34 6 156 107 11
56594-219 162 10 239 182 13 246 186 14 246 190 14
56595-246 190 14 246 190 14 246 190 14 246 190 14
56596-246 190 14 246 190 14 246 190 14 246 190 14
56597-246 190 14 246 190 14 246 190 14 242 186 14
56598-234 174 13 213 154 11 154 122 46 66 66 66
56599- 30 30 30 10 10 10 0 0 0 0 0 0
56600- 0 0 0 0 0 0 0 0 0 0 0 0
56601- 0 0 0 0 0 0 0 0 0 0 0 0
56602- 0 0 0 0 0 0 6 6 6 22 22 22
56603- 58 58 58 154 121 60 206 145 10 234 174 13
56604-242 186 14 246 186 14 246 190 14 246 190 14
56605-246 190 14 246 190 14 246 190 14 246 190 14
56606-246 190 14 246 190 14 246 190 14 246 190 14
56607-246 190 14 246 190 14 246 190 14 246 190 14
56608-246 186 14 236 178 12 210 162 10 163 110 8
56609- 61 42 6 138 138 138 218 218 218 250 250 250
56610-253 253 253 253 253 253 253 253 253 250 250 250
56611-242 242 242 210 210 210 144 144 144 66 66 66
56612- 6 6 6 2 2 6 2 2 6 2 2 6
56613- 2 2 6 2 2 6 61 42 6 163 110 8
56614-216 158 10 236 178 12 246 190 14 246 190 14
56615-246 190 14 246 190 14 246 190 14 246 190 14
56616-246 190 14 246 190 14 246 190 14 246 190 14
56617-246 190 14 239 182 13 230 174 11 216 158 10
56618-190 142 34 124 112 88 70 70 70 38 38 38
56619- 18 18 18 6 6 6 0 0 0 0 0 0
56620- 0 0 0 0 0 0 0 0 0 0 0 0
56621- 0 0 0 0 0 0 0 0 0 0 0 0
56622- 0 0 0 0 0 0 6 6 6 22 22 22
56623- 62 62 62 168 124 44 206 145 10 224 166 10
56624-236 178 12 239 182 13 242 186 14 242 186 14
56625-246 186 14 246 190 14 246 190 14 246 190 14
56626-246 190 14 246 190 14 246 190 14 246 190 14
56627-246 190 14 246 190 14 246 190 14 246 190 14
56628-246 190 14 236 178 12 216 158 10 175 118 6
56629- 80 54 7 2 2 6 6 6 6 30 30 30
56630- 54 54 54 62 62 62 50 50 50 38 38 38
56631- 14 14 14 2 2 6 2 2 6 2 2 6
56632- 2 2 6 2 2 6 2 2 6 2 2 6
56633- 2 2 6 6 6 6 80 54 7 167 114 7
56634-213 154 11 236 178 12 246 190 14 246 190 14
56635-246 190 14 246 190 14 246 190 14 246 190 14
56636-246 190 14 242 186 14 239 182 13 239 182 13
56637-230 174 11 210 150 10 174 135 50 124 112 88
56638- 82 82 82 54 54 54 34 34 34 18 18 18
56639- 6 6 6 0 0 0 0 0 0 0 0 0
56640- 0 0 0 0 0 0 0 0 0 0 0 0
56641- 0 0 0 0 0 0 0 0 0 0 0 0
56642- 0 0 0 0 0 0 6 6 6 18 18 18
56643- 50 50 50 158 118 36 192 133 9 200 144 11
56644-216 158 10 219 162 10 224 166 10 226 170 11
56645-230 174 11 236 178 12 239 182 13 239 182 13
56646-242 186 14 246 186 14 246 190 14 246 190 14
56647-246 190 14 246 190 14 246 190 14 246 190 14
56648-246 186 14 230 174 11 210 150 10 163 110 8
56649-104 69 6 10 10 10 2 2 6 2 2 6
56650- 2 2 6 2 2 6 2 2 6 2 2 6
56651- 2 2 6 2 2 6 2 2 6 2 2 6
56652- 2 2 6 2 2 6 2 2 6 2 2 6
56653- 2 2 6 6 6 6 91 60 6 167 114 7
56654-206 145 10 230 174 11 242 186 14 246 190 14
56655-246 190 14 246 190 14 246 186 14 242 186 14
56656-239 182 13 230 174 11 224 166 10 213 154 11
56657-180 133 36 124 112 88 86 86 86 58 58 58
56658- 38 38 38 22 22 22 10 10 10 6 6 6
56659- 0 0 0 0 0 0 0 0 0 0 0 0
56660- 0 0 0 0 0 0 0 0 0 0 0 0
56661- 0 0 0 0 0 0 0 0 0 0 0 0
56662- 0 0 0 0 0 0 0 0 0 14 14 14
56663- 34 34 34 70 70 70 138 110 50 158 118 36
56664-167 114 7 180 123 7 192 133 9 197 138 11
56665-200 144 11 206 145 10 213 154 11 219 162 10
56666-224 166 10 230 174 11 239 182 13 242 186 14
56667-246 186 14 246 186 14 246 186 14 246 186 14
56668-239 182 13 216 158 10 185 133 11 152 99 6
56669-104 69 6 18 14 6 2 2 6 2 2 6
56670- 2 2 6 2 2 6 2 2 6 2 2 6
56671- 2 2 6 2 2 6 2 2 6 2 2 6
56672- 2 2 6 2 2 6 2 2 6 2 2 6
56673- 2 2 6 6 6 6 80 54 7 152 99 6
56674-192 133 9 219 162 10 236 178 12 239 182 13
56675-246 186 14 242 186 14 239 182 13 236 178 12
56676-224 166 10 206 145 10 192 133 9 154 121 60
56677- 94 94 94 62 62 62 42 42 42 22 22 22
56678- 14 14 14 6 6 6 0 0 0 0 0 0
56679- 0 0 0 0 0 0 0 0 0 0 0 0
56680- 0 0 0 0 0 0 0 0 0 0 0 0
56681- 0 0 0 0 0 0 0 0 0 0 0 0
56682- 0 0 0 0 0 0 0 0 0 6 6 6
56683- 18 18 18 34 34 34 58 58 58 78 78 78
56684-101 98 89 124 112 88 142 110 46 156 107 11
56685-163 110 8 167 114 7 175 118 6 180 123 7
56686-185 133 11 197 138 11 210 150 10 219 162 10
56687-226 170 11 236 178 12 236 178 12 234 174 13
56688-219 162 10 197 138 11 163 110 8 130 83 6
56689- 91 60 6 10 10 10 2 2 6 2 2 6
56690- 18 18 18 38 38 38 38 38 38 38 38 38
56691- 38 38 38 38 38 38 38 38 38 38 38 38
56692- 38 38 38 38 38 38 26 26 26 2 2 6
56693- 2 2 6 6 6 6 70 47 6 137 92 6
56694-175 118 6 200 144 11 219 162 10 230 174 11
56695-234 174 13 230 174 11 219 162 10 210 150 10
56696-192 133 9 163 110 8 124 112 88 82 82 82
56697- 50 50 50 30 30 30 14 14 14 6 6 6
56698- 0 0 0 0 0 0 0 0 0 0 0 0
56699- 0 0 0 0 0 0 0 0 0 0 0 0
56700- 0 0 0 0 0 0 0 0 0 0 0 0
56701- 0 0 0 0 0 0 0 0 0 0 0 0
56702- 0 0 0 0 0 0 0 0 0 0 0 0
56703- 6 6 6 14 14 14 22 22 22 34 34 34
56704- 42 42 42 58 58 58 74 74 74 86 86 86
56705-101 98 89 122 102 70 130 98 46 121 87 25
56706-137 92 6 152 99 6 163 110 8 180 123 7
56707-185 133 11 197 138 11 206 145 10 200 144 11
56708-180 123 7 156 107 11 130 83 6 104 69 6
56709- 50 34 6 54 54 54 110 110 110 101 98 89
56710- 86 86 86 82 82 82 78 78 78 78 78 78
56711- 78 78 78 78 78 78 78 78 78 78 78 78
56712- 78 78 78 82 82 82 86 86 86 94 94 94
56713-106 106 106 101 101 101 86 66 34 124 80 6
56714-156 107 11 180 123 7 192 133 9 200 144 11
56715-206 145 10 200 144 11 192 133 9 175 118 6
56716-139 102 15 109 106 95 70 70 70 42 42 42
56717- 22 22 22 10 10 10 0 0 0 0 0 0
56718- 0 0 0 0 0 0 0 0 0 0 0 0
56719- 0 0 0 0 0 0 0 0 0 0 0 0
56720- 0 0 0 0 0 0 0 0 0 0 0 0
56721- 0 0 0 0 0 0 0 0 0 0 0 0
56722- 0 0 0 0 0 0 0 0 0 0 0 0
56723- 0 0 0 0 0 0 6 6 6 10 10 10
56724- 14 14 14 22 22 22 30 30 30 38 38 38
56725- 50 50 50 62 62 62 74 74 74 90 90 90
56726-101 98 89 112 100 78 121 87 25 124 80 6
56727-137 92 6 152 99 6 152 99 6 152 99 6
56728-138 86 6 124 80 6 98 70 6 86 66 30
56729-101 98 89 82 82 82 58 58 58 46 46 46
56730- 38 38 38 34 34 34 34 34 34 34 34 34
56731- 34 34 34 34 34 34 34 34 34 34 34 34
56732- 34 34 34 34 34 34 38 38 38 42 42 42
56733- 54 54 54 82 82 82 94 86 76 91 60 6
56734-134 86 6 156 107 11 167 114 7 175 118 6
56735-175 118 6 167 114 7 152 99 6 121 87 25
56736-101 98 89 62 62 62 34 34 34 18 18 18
56737- 6 6 6 0 0 0 0 0 0 0 0 0
56738- 0 0 0 0 0 0 0 0 0 0 0 0
56739- 0 0 0 0 0 0 0 0 0 0 0 0
56740- 0 0 0 0 0 0 0 0 0 0 0 0
56741- 0 0 0 0 0 0 0 0 0 0 0 0
56742- 0 0 0 0 0 0 0 0 0 0 0 0
56743- 0 0 0 0 0 0 0 0 0 0 0 0
56744- 0 0 0 6 6 6 6 6 6 10 10 10
56745- 18 18 18 22 22 22 30 30 30 42 42 42
56746- 50 50 50 66 66 66 86 86 86 101 98 89
56747-106 86 58 98 70 6 104 69 6 104 69 6
56748-104 69 6 91 60 6 82 62 34 90 90 90
56749- 62 62 62 38 38 38 22 22 22 14 14 14
56750- 10 10 10 10 10 10 10 10 10 10 10 10
56751- 10 10 10 10 10 10 6 6 6 10 10 10
56752- 10 10 10 10 10 10 10 10 10 14 14 14
56753- 22 22 22 42 42 42 70 70 70 89 81 66
56754- 80 54 7 104 69 6 124 80 6 137 92 6
56755-134 86 6 116 81 8 100 82 52 86 86 86
56756- 58 58 58 30 30 30 14 14 14 6 6 6
56757- 0 0 0 0 0 0 0 0 0 0 0 0
56758- 0 0 0 0 0 0 0 0 0 0 0 0
56759- 0 0 0 0 0 0 0 0 0 0 0 0
56760- 0 0 0 0 0 0 0 0 0 0 0 0
56761- 0 0 0 0 0 0 0 0 0 0 0 0
56762- 0 0 0 0 0 0 0 0 0 0 0 0
56763- 0 0 0 0 0 0 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 0 0 0
56765- 0 0 0 6 6 6 10 10 10 14 14 14
56766- 18 18 18 26 26 26 38 38 38 54 54 54
56767- 70 70 70 86 86 86 94 86 76 89 81 66
56768- 89 81 66 86 86 86 74 74 74 50 50 50
56769- 30 30 30 14 14 14 6 6 6 0 0 0
56770- 0 0 0 0 0 0 0 0 0 0 0 0
56771- 0 0 0 0 0 0 0 0 0 0 0 0
56772- 0 0 0 0 0 0 0 0 0 0 0 0
56773- 6 6 6 18 18 18 34 34 34 58 58 58
56774- 82 82 82 89 81 66 89 81 66 89 81 66
56775- 94 86 66 94 86 76 74 74 74 50 50 50
56776- 26 26 26 14 14 14 6 6 6 0 0 0
56777- 0 0 0 0 0 0 0 0 0 0 0 0
56778- 0 0 0 0 0 0 0 0 0 0 0 0
56779- 0 0 0 0 0 0 0 0 0 0 0 0
56780- 0 0 0 0 0 0 0 0 0 0 0 0
56781- 0 0 0 0 0 0 0 0 0 0 0 0
56782- 0 0 0 0 0 0 0 0 0 0 0 0
56783- 0 0 0 0 0 0 0 0 0 0 0 0
56784- 0 0 0 0 0 0 0 0 0 0 0 0
56785- 0 0 0 0 0 0 0 0 0 0 0 0
56786- 6 6 6 6 6 6 14 14 14 18 18 18
56787- 30 30 30 38 38 38 46 46 46 54 54 54
56788- 50 50 50 42 42 42 30 30 30 18 18 18
56789- 10 10 10 0 0 0 0 0 0 0 0 0
56790- 0 0 0 0 0 0 0 0 0 0 0 0
56791- 0 0 0 0 0 0 0 0 0 0 0 0
56792- 0 0 0 0 0 0 0 0 0 0 0 0
56793- 0 0 0 6 6 6 14 14 14 26 26 26
56794- 38 38 38 50 50 50 58 58 58 58 58 58
56795- 54 54 54 42 42 42 30 30 30 18 18 18
56796- 10 10 10 0 0 0 0 0 0 0 0 0
56797- 0 0 0 0 0 0 0 0 0 0 0 0
56798- 0 0 0 0 0 0 0 0 0 0 0 0
56799- 0 0 0 0 0 0 0 0 0 0 0 0
56800- 0 0 0 0 0 0 0 0 0 0 0 0
56801- 0 0 0 0 0 0 0 0 0 0 0 0
56802- 0 0 0 0 0 0 0 0 0 0 0 0
56803- 0 0 0 0 0 0 0 0 0 0 0 0
56804- 0 0 0 0 0 0 0 0 0 0 0 0
56805- 0 0 0 0 0 0 0 0 0 0 0 0
56806- 0 0 0 0 0 0 0 0 0 6 6 6
56807- 6 6 6 10 10 10 14 14 14 18 18 18
56808- 18 18 18 14 14 14 10 10 10 6 6 6
56809- 0 0 0 0 0 0 0 0 0 0 0 0
56810- 0 0 0 0 0 0 0 0 0 0 0 0
56811- 0 0 0 0 0 0 0 0 0 0 0 0
56812- 0 0 0 0 0 0 0 0 0 0 0 0
56813- 0 0 0 0 0 0 0 0 0 6 6 6
56814- 14 14 14 18 18 18 22 22 22 22 22 22
56815- 18 18 18 14 14 14 10 10 10 6 6 6
56816- 0 0 0 0 0 0 0 0 0 0 0 0
56817- 0 0 0 0 0 0 0 0 0 0 0 0
56818- 0 0 0 0 0 0 0 0 0 0 0 0
56819- 0 0 0 0 0 0 0 0 0 0 0 0
56820- 0 0 0 0 0 0 0 0 0 0 0 0
56821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56834+4 4 4 4 4 4
56835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56848+4 4 4 4 4 4
56849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56862+4 4 4 4 4 4
56863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56876+4 4 4 4 4 4
56877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56890+4 4 4 4 4 4
56891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56904+4 4 4 4 4 4
56905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56909+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
56910+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
56911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56914+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
56915+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
56916+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
56917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56918+4 4 4 4 4 4
56919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56923+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
56924+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
56925+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56928+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
56929+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
56930+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
56931+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56932+4 4 4 4 4 4
56933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56937+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
56938+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
56939+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
56940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56942+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
56943+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
56944+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
56945+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
56946+4 4 4 4 4 4
56947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56950+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
56951+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
56952+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
56953+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
56954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56955+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
56956+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
56957+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
56958+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
56959+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
56960+4 4 4 4 4 4
56961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56964+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
56965+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
56966+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
56967+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
56968+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
56969+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
56970+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
56971+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
56972+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
56973+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
56974+4 4 4 4 4 4
56975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
56978+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
56979+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
56980+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
56981+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
56982+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
56983+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
56984+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
56985+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
56986+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
56987+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
56988+4 4 4 4 4 4
56989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56991+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
56992+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
56993+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
56994+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
56995+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
56996+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
56997+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
56998+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
56999+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
57000+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
57001+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
57002+4 4 4 4 4 4
57003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57005+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
57006+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
57007+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
57008+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
57009+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
57010+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
57011+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
57012+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
57013+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
57014+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
57015+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
57016+4 4 4 4 4 4
57017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57019+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
57020+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
57021+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
57022+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
57023+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
57024+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
57025+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
57026+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
57027+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
57028+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
57029+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57030+4 4 4 4 4 4
57031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57033+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
57034+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
57035+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
57036+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
57037+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
57038+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
57039+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57040+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57041+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57042+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57043+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57044+4 4 4 4 4 4
57045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57046+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57047+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57048+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57049+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57050+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57051+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57052+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57053+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57054+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57055+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57056+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57057+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57058+4 4 4 4 4 4
57059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57060+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57061+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57062+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57063+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57064+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57065+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57066+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57067+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57068+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57069+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57070+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57071+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57072+0 0 0 4 4 4
57073+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57074+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57075+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57076+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57077+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57078+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57079+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57080+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57081+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57082+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57083+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57084+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57085+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57086+2 0 0 0 0 0
57087+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57088+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57089+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57090+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57091+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57092+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57093+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57094+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57095+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57096+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57097+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57098+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57099+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57100+37 38 37 0 0 0
57101+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57102+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57103+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57104+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57105+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57106+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57107+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57108+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57109+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57110+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57111+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57112+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57113+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57114+85 115 134 4 0 0
57115+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57116+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57117+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57118+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57119+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57120+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57121+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57122+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57123+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57124+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57125+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57126+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57127+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57128+60 73 81 4 0 0
57129+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57130+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57131+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57132+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57133+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57134+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57135+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57136+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57137+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57138+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57139+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57140+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57141+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57142+16 19 21 4 0 0
57143+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57144+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57145+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57146+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57147+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57148+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57149+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57150+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57151+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57152+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57153+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57154+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57155+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57156+4 0 0 4 3 3
57157+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57158+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57159+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57161+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57162+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57163+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57164+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57165+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57166+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57167+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57168+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57169+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57170+3 2 2 4 4 4
57171+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57172+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57173+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57174+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57175+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57176+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57177+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57178+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57179+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57180+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57181+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57182+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57183+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57184+4 4 4 4 4 4
57185+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57186+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57187+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57188+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57189+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57190+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57191+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57192+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57193+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57194+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57195+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57196+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57197+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57198+4 4 4 4 4 4
57199+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57200+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57201+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57202+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57203+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57204+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57205+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57206+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57207+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57208+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57209+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57210+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57211+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57212+5 5 5 5 5 5
57213+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57214+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57215+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57216+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57217+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57218+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57219+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57220+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57221+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57222+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57223+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57224+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57225+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57226+5 5 5 4 4 4
57227+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57228+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57229+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57230+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57231+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57232+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57233+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57234+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57235+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57236+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57237+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57238+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57240+4 4 4 4 4 4
57241+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57242+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57243+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57244+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57245+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57246+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57247+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57248+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57249+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57250+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57251+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57252+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57254+4 4 4 4 4 4
57255+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57256+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57257+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57258+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57259+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57260+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57261+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57262+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57263+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57264+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57265+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57268+4 4 4 4 4 4
57269+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57270+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57271+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57272+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57273+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57274+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57275+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57276+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57277+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57278+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57279+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57282+4 4 4 4 4 4
57283+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57284+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57285+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57286+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57287+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57288+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57289+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57290+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57291+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57292+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57293+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57296+4 4 4 4 4 4
57297+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57298+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57299+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57300+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57301+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57302+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57303+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57304+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57305+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57306+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57307+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57310+4 4 4 4 4 4
57311+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57312+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57313+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57314+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57315+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57316+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57317+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57318+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57319+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57320+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57321+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57324+4 4 4 4 4 4
57325+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57326+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57327+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57328+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57329+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57330+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57331+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57332+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57333+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57334+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57335+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57338+4 4 4 4 4 4
57339+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57340+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57341+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57342+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57343+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57344+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57345+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57346+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57347+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57348+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57349+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57352+4 4 4 4 4 4
57353+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57354+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57355+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57356+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57357+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57358+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57359+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57360+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57361+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57362+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57363+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57366+4 4 4 4 4 4
57367+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57368+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57369+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57370+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57371+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57372+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57373+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57374+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57375+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57376+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57377+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57380+4 4 4 4 4 4
57381+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57382+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57383+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57384+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57385+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57386+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57387+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57388+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57389+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57390+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57391+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57394+4 4 4 4 4 4
57395+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57396+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57397+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57398+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57399+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57400+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57401+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57402+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57403+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57404+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57405+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57408+4 4 4 4 4 4
57409+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57410+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57411+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57412+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57413+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57414+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57415+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57416+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57417+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57418+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57419+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57422+4 4 4 4 4 4
57423+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57424+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57425+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57426+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57427+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57428+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57429+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57430+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57431+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57432+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57433+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57436+4 4 4 4 4 4
57437+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57438+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57439+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57440+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57441+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57442+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57443+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57444+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57445+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57446+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57447+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57450+4 4 4 4 4 4
57451+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57452+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57453+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57454+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57455+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57456+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57457+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57458+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57459+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57460+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57461+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57464+4 4 4 4 4 4
57465+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57466+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57467+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57468+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57469+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57470+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57471+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57472+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57473+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57474+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57475+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57478+4 4 4 4 4 4
57479+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57480+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57481+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57482+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57483+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57484+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57485+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57486+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57487+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57488+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57489+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57492+4 4 4 4 4 4
57493+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57494+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57495+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57496+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57497+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57498+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57499+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57500+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57501+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57502+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57503+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57506+4 4 4 4 4 4
57507+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57508+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57509+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57510+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57511+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57512+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57513+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57514+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57515+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57516+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57517+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57520+4 4 4 4 4 4
57521+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57522+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57523+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57524+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57525+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57526+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57527+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57528+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57529+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57530+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57531+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57534+4 4 4 4 4 4
57535+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57536+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57537+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57538+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57539+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57540+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57541+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57542+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57543+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57544+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57545+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57548+4 4 4 4 4 4
57549+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57550+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57551+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57552+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57553+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57554+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57555+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57556+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57557+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57558+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57559+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57562+4 4 4 4 4 4
57563+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57564+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57565+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57566+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57567+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57568+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57569+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57570+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57571+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57572+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57573+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57576+4 4 4 4 4 4
57577+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57578+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57579+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57580+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57581+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57582+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57583+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57584+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57585+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57586+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57587+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57590+4 4 4 4 4 4
57591+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
57592+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57593+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
57594+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
57595+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
57596+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
57597+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
57598+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
57599+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
57600+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
57601+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57604+4 4 4 4 4 4
57605+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
57606+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57607+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
57608+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
57609+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
57610+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
57611+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57612+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
57613+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
57614+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
57615+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57618+4 4 4 4 4 4
57619+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
57620+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
57621+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
57622+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
57623+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
57624+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
57625+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
57626+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
57627+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
57628+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
57629+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57632+4 4 4 4 4 4
57633+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
57634+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
57635+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57636+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
57637+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
57638+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
57639+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
57640+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
57641+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
57642+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
57643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57646+4 4 4 4 4 4
57647+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57648+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
57649+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
57650+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
57651+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
57652+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
57653+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
57654+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
57655+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
57656+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57660+4 4 4 4 4 4
57661+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
57662+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
57663+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
57664+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
57665+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
57666+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
57667+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
57668+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
57669+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
57670+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57674+4 4 4 4 4 4
57675+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
57676+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
57677+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
57678+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
57679+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
57680+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
57681+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
57682+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
57683+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57684+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57688+4 4 4 4 4 4
57689+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
57690+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57691+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
57692+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57693+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
57694+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
57695+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
57696+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
57697+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
57698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57702+4 4 4 4 4 4
57703+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
57704+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
57705+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
57706+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
57707+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
57708+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
57709+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
57710+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
57711+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
57712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57716+4 4 4 4 4 4
57717+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57718+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
57719+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
57720+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
57721+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
57722+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
57723+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
57724+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
57725+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57730+4 4 4 4 4 4
57731+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
57732+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
57733+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57734+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
57735+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
57736+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
57737+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
57738+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
57739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57744+4 4 4 4 4 4
57745+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57746+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
57747+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
57748+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
57749+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
57750+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
57751+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
57752+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57758+4 4 4 4 4 4
57759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57760+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
57761+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57762+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
57763+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
57764+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
57765+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
57766+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
57767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57772+4 4 4 4 4 4
57773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57774+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
57775+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
57776+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
57777+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
57778+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
57779+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
57780+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
57781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57786+4 4 4 4 4 4
57787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57788+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57789+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
57790+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57791+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
57792+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
57793+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
57794+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57800+4 4 4 4 4 4
57801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57803+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57804+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
57805+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
57806+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
57807+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
57808+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57814+4 4 4 4 4 4
57815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57818+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57819+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
57820+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
57821+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
57822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57828+4 4 4 4 4 4
57829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57832+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57833+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57834+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
57835+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
57836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57842+4 4 4 4 4 4
57843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57846+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57847+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57848+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57849+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
57850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57856+4 4 4 4 4 4
57857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57860+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
57861+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
57862+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
57863+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
57864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57870+4 4 4 4 4 4
57871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57875+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
57876+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57877+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57884+4 4 4 4 4 4
57885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57889+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
57890+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
57891+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57898+4 4 4 4 4 4
57899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57903+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
57904+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
57905+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57912+4 4 4 4 4 4
57913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57917+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
57918+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
57919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57926+4 4 4 4 4 4
57927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57931+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57932+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
57933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57940+4 4 4 4 4 4
57941diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
57942index fef20db..d28b1ab 100644
57943--- a/drivers/xen/xenfs/xenstored.c
57944+++ b/drivers/xen/xenfs/xenstored.c
57945@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
57946 static int xsd_kva_open(struct inode *inode, struct file *file)
57947 {
57948 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
57949+#ifdef CONFIG_GRKERNSEC_HIDESYM
57950+ NULL);
57951+#else
57952 xen_store_interface);
57953+#endif
57954+
57955 if (!file->private_data)
57956 return -ENOMEM;
57957 return 0;
57958diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
57959index eb14e05..5156de7 100644
57960--- a/fs/9p/vfs_addr.c
57961+++ b/fs/9p/vfs_addr.c
57962@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
57963
57964 retval = v9fs_file_write_internal(inode,
57965 v9inode->writeback_fid,
57966- (__force const char __user *)buffer,
57967+ (const char __force_user *)buffer,
57968 len, &offset, 0);
57969 if (retval > 0)
57970 retval = 0;
57971diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
57972index 9ee5343..5165e3c 100644
57973--- a/fs/9p/vfs_inode.c
57974+++ b/fs/9p/vfs_inode.c
57975@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
57976 void
57977 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
57978 {
57979- char *s = nd_get_link(nd);
57980+ const char *s = nd_get_link(nd);
57981
57982 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
57983 dentry, IS_ERR(s) ? "<error>" : s);
57984diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
57985index c055d56e..a46f4f5 100644
57986--- a/fs/Kconfig.binfmt
57987+++ b/fs/Kconfig.binfmt
57988@@ -106,7 +106,7 @@ config HAVE_AOUT
57989
57990 config BINFMT_AOUT
57991 tristate "Kernel support for a.out and ECOFF binaries"
57992- depends on HAVE_AOUT
57993+ depends on HAVE_AOUT && BROKEN
57994 ---help---
57995 A.out (Assembler.OUTput) is a set of formats for libraries and
57996 executables used in the earliest versions of UNIX. Linux used
57997diff --git a/fs/afs/inode.c b/fs/afs/inode.c
57998index 8a1d38e..300a14e 100644
57999--- a/fs/afs/inode.c
58000+++ b/fs/afs/inode.c
58001@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58002 struct afs_vnode *vnode;
58003 struct super_block *sb;
58004 struct inode *inode;
58005- static atomic_t afs_autocell_ino;
58006+ static atomic_unchecked_t afs_autocell_ino;
58007
58008 _enter("{%x:%u},%*.*s,",
58009 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
58010@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58011 data.fid.unique = 0;
58012 data.fid.vnode = 0;
58013
58014- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
58015+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
58016 afs_iget5_autocell_test, afs_iget5_set,
58017 &data);
58018 if (!inode) {
58019diff --git a/fs/aio.c b/fs/aio.c
58020index c428871..3f3041b 100644
58021--- a/fs/aio.c
58022+++ b/fs/aio.c
58023@@ -413,7 +413,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58024 size += sizeof(struct io_event) * nr_events;
58025
58026 nr_pages = PFN_UP(size);
58027- if (nr_pages < 0)
58028+ if (nr_pages <= 0)
58029 return -EINVAL;
58030
58031 file = aio_private_file(ctx, nr_pages);
58032diff --git a/fs/attr.c b/fs/attr.c
58033index 6530ced..4a827e2 100644
58034--- a/fs/attr.c
58035+++ b/fs/attr.c
58036@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
58037 unsigned long limit;
58038
58039 limit = rlimit(RLIMIT_FSIZE);
58040+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58041 if (limit != RLIM_INFINITY && offset > limit)
58042 goto out_sig;
58043 if (offset > inode->i_sb->s_maxbytes)
58044diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58045index 116fd38..c04182da 100644
58046--- a/fs/autofs4/waitq.c
58047+++ b/fs/autofs4/waitq.c
58048@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58049 {
58050 unsigned long sigpipe, flags;
58051 mm_segment_t fs;
58052- const char *data = (const char *)addr;
58053+ const char __user *data = (const char __force_user *)addr;
58054 ssize_t wr = 0;
58055
58056 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58057@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58058 return 1;
58059 }
58060
58061+#ifdef CONFIG_GRKERNSEC_HIDESYM
58062+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58063+#endif
58064+
58065 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58066 enum autofs_notify notify)
58067 {
58068@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58069
58070 /* If this is a direct mount request create a dummy name */
58071 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58072+#ifdef CONFIG_GRKERNSEC_HIDESYM
58073+ /* this name does get written to userland via autofs4_write() */
58074+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58075+#else
58076 qstr.len = sprintf(name, "%p", dentry);
58077+#endif
58078 else {
58079 qstr.len = autofs4_getpath(sbi, dentry, &name);
58080 if (!qstr.len) {
58081diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58082index 2722387..56059b5 100644
58083--- a/fs/befs/endian.h
58084+++ b/fs/befs/endian.h
58085@@ -11,7 +11,7 @@
58086
58087 #include <asm/byteorder.h>
58088
58089-static inline u64
58090+static inline u64 __intentional_overflow(-1)
58091 fs64_to_cpu(const struct super_block *sb, fs64 n)
58092 {
58093 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58094@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58095 return (__force fs64)cpu_to_be64(n);
58096 }
58097
58098-static inline u32
58099+static inline u32 __intentional_overflow(-1)
58100 fs32_to_cpu(const struct super_block *sb, fs32 n)
58101 {
58102 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58103@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58104 return (__force fs32)cpu_to_be32(n);
58105 }
58106
58107-static inline u16
58108+static inline u16 __intentional_overflow(-1)
58109 fs16_to_cpu(const struct super_block *sb, fs16 n)
58110 {
58111 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58112diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58113index 4c55668..eeae150 100644
58114--- a/fs/binfmt_aout.c
58115+++ b/fs/binfmt_aout.c
58116@@ -16,6 +16,7 @@
58117 #include <linux/string.h>
58118 #include <linux/fs.h>
58119 #include <linux/file.h>
58120+#include <linux/security.h>
58121 #include <linux/stat.h>
58122 #include <linux/fcntl.h>
58123 #include <linux/ptrace.h>
58124@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58125 #endif
58126 # define START_STACK(u) ((void __user *)u.start_stack)
58127
58128+ memset(&dump, 0, sizeof(dump));
58129+
58130 fs = get_fs();
58131 set_fs(KERNEL_DS);
58132 has_dumped = 1;
58133@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58134
58135 /* If the size of the dump file exceeds the rlimit, then see what would happen
58136 if we wrote the stack, but not the data area. */
58137+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58138 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58139 dump.u_dsize = 0;
58140
58141 /* Make sure we have enough room to write the stack and data areas. */
58142+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58143 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58144 dump.u_ssize = 0;
58145
58146@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58147 rlim = rlimit(RLIMIT_DATA);
58148 if (rlim >= RLIM_INFINITY)
58149 rlim = ~0;
58150+
58151+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58152 if (ex.a_data + ex.a_bss > rlim)
58153 return -ENOMEM;
58154
58155@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58156
58157 install_exec_creds(bprm);
58158
58159+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58160+ current->mm->pax_flags = 0UL;
58161+#endif
58162+
58163+#ifdef CONFIG_PAX_PAGEEXEC
58164+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58165+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58166+
58167+#ifdef CONFIG_PAX_EMUTRAMP
58168+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58169+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58170+#endif
58171+
58172+#ifdef CONFIG_PAX_MPROTECT
58173+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58174+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58175+#endif
58176+
58177+ }
58178+#endif
58179+
58180 if (N_MAGIC(ex) == OMAGIC) {
58181 unsigned long text_addr, map_size;
58182 loff_t pos;
58183@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58184 return error;
58185
58186 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58187- PROT_READ | PROT_WRITE | PROT_EXEC,
58188+ PROT_READ | PROT_WRITE,
58189 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58190 fd_offset + ex.a_text);
58191 if (error != N_DATADDR(ex))
58192diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58193index 995986b..dcc4ef2 100644
58194--- a/fs/binfmt_elf.c
58195+++ b/fs/binfmt_elf.c
58196@@ -34,6 +34,7 @@
58197 #include <linux/utsname.h>
58198 #include <linux/coredump.h>
58199 #include <linux/sched.h>
58200+#include <linux/xattr.h>
58201 #include <asm/uaccess.h>
58202 #include <asm/param.h>
58203 #include <asm/page.h>
58204@@ -47,7 +48,7 @@
58205
58206 static int load_elf_binary(struct linux_binprm *bprm);
58207 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58208- int, int, unsigned long);
58209+ int, int, unsigned long) __intentional_overflow(-1);
58210
58211 #ifdef CONFIG_USELIB
58212 static int load_elf_library(struct file *);
58213@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58214 #define elf_core_dump NULL
58215 #endif
58216
58217+#ifdef CONFIG_PAX_MPROTECT
58218+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58219+#endif
58220+
58221+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58222+static void elf_handle_mmap(struct file *file);
58223+#endif
58224+
58225 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58226 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58227 #else
58228@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58229 .load_binary = load_elf_binary,
58230 .load_shlib = load_elf_library,
58231 .core_dump = elf_core_dump,
58232+
58233+#ifdef CONFIG_PAX_MPROTECT
58234+ .handle_mprotect= elf_handle_mprotect,
58235+#endif
58236+
58237+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58238+ .handle_mmap = elf_handle_mmap,
58239+#endif
58240+
58241 .min_coredump = ELF_EXEC_PAGESIZE,
58242 };
58243
58244@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58245
58246 static int set_brk(unsigned long start, unsigned long end)
58247 {
58248+ unsigned long e = end;
58249+
58250 start = ELF_PAGEALIGN(start);
58251 end = ELF_PAGEALIGN(end);
58252 if (end > start) {
58253@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58254 if (BAD_ADDR(addr))
58255 return addr;
58256 }
58257- current->mm->start_brk = current->mm->brk = end;
58258+ current->mm->start_brk = current->mm->brk = e;
58259 return 0;
58260 }
58261
58262@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58263 elf_addr_t __user *u_rand_bytes;
58264 const char *k_platform = ELF_PLATFORM;
58265 const char *k_base_platform = ELF_BASE_PLATFORM;
58266- unsigned char k_rand_bytes[16];
58267+ u32 k_rand_bytes[4];
58268 int items;
58269 elf_addr_t *elf_info;
58270 int ei_index = 0;
58271 const struct cred *cred = current_cred();
58272 struct vm_area_struct *vma;
58273+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58274
58275 /*
58276 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58277@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58278 * Generate 16 random bytes for userspace PRNG seeding.
58279 */
58280 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58281- u_rand_bytes = (elf_addr_t __user *)
58282- STACK_ALLOC(p, sizeof(k_rand_bytes));
58283+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58284+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58285+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58286+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58287+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58288+ u_rand_bytes = (elf_addr_t __user *) p;
58289 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58290 return -EFAULT;
58291
58292@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58293 return -EFAULT;
58294 current->mm->env_end = p;
58295
58296+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58297+
58298 /* Put the elf_info on the stack in the right place. */
58299 sp = (elf_addr_t __user *)envp + 1;
58300- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58301+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58302 return -EFAULT;
58303 return 0;
58304 }
58305@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
58306 an ELF header */
58307
58308 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58309- struct file *interpreter, unsigned long *interp_map_addr,
58310+ struct file *interpreter,
58311 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
58312 {
58313 struct elf_phdr *eppnt;
58314- unsigned long load_addr = 0;
58315+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58316 int load_addr_set = 0;
58317 unsigned long last_bss = 0, elf_bss = 0;
58318- unsigned long error = ~0UL;
58319+ unsigned long error = -EINVAL;
58320 unsigned long total_size;
58321 int i;
58322
58323@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58324 goto out;
58325 }
58326
58327+#ifdef CONFIG_PAX_SEGMEXEC
58328+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58329+ pax_task_size = SEGMEXEC_TASK_SIZE;
58330+#endif
58331+
58332 eppnt = interp_elf_phdata;
58333 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58334 if (eppnt->p_type == PT_LOAD) {
58335@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58336 map_addr = elf_map(interpreter, load_addr + vaddr,
58337 eppnt, elf_prot, elf_type, total_size);
58338 total_size = 0;
58339- if (!*interp_map_addr)
58340- *interp_map_addr = map_addr;
58341 error = map_addr;
58342 if (BAD_ADDR(map_addr))
58343 goto out;
58344@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58345 k = load_addr + eppnt->p_vaddr;
58346 if (BAD_ADDR(k) ||
58347 eppnt->p_filesz > eppnt->p_memsz ||
58348- eppnt->p_memsz > TASK_SIZE ||
58349- TASK_SIZE - eppnt->p_memsz < k) {
58350+ eppnt->p_memsz > pax_task_size ||
58351+ pax_task_size - eppnt->p_memsz < k) {
58352 error = -ENOMEM;
58353 goto out;
58354 }
58355@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58356 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58357
58358 /* Map the last of the bss segment */
58359- error = vm_brk(elf_bss, last_bss - elf_bss);
58360- if (BAD_ADDR(error))
58361- goto out;
58362+ if (last_bss > elf_bss) {
58363+ error = vm_brk(elf_bss, last_bss - elf_bss);
58364+ if (BAD_ADDR(error))
58365+ goto out;
58366+ }
58367 }
58368
58369 error = load_addr;
58370@@ -634,6 +666,336 @@ out:
58371 return error;
58372 }
58373
58374+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58375+#ifdef CONFIG_PAX_SOFTMODE
58376+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58377+{
58378+ unsigned long pax_flags = 0UL;
58379+
58380+#ifdef CONFIG_PAX_PAGEEXEC
58381+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58382+ pax_flags |= MF_PAX_PAGEEXEC;
58383+#endif
58384+
58385+#ifdef CONFIG_PAX_SEGMEXEC
58386+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58387+ pax_flags |= MF_PAX_SEGMEXEC;
58388+#endif
58389+
58390+#ifdef CONFIG_PAX_EMUTRAMP
58391+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58392+ pax_flags |= MF_PAX_EMUTRAMP;
58393+#endif
58394+
58395+#ifdef CONFIG_PAX_MPROTECT
58396+ if (elf_phdata->p_flags & PF_MPROTECT)
58397+ pax_flags |= MF_PAX_MPROTECT;
58398+#endif
58399+
58400+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58401+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58402+ pax_flags |= MF_PAX_RANDMMAP;
58403+#endif
58404+
58405+ return pax_flags;
58406+}
58407+#endif
58408+
58409+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58410+{
58411+ unsigned long pax_flags = 0UL;
58412+
58413+#ifdef CONFIG_PAX_PAGEEXEC
58414+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58415+ pax_flags |= MF_PAX_PAGEEXEC;
58416+#endif
58417+
58418+#ifdef CONFIG_PAX_SEGMEXEC
58419+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58420+ pax_flags |= MF_PAX_SEGMEXEC;
58421+#endif
58422+
58423+#ifdef CONFIG_PAX_EMUTRAMP
58424+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58425+ pax_flags |= MF_PAX_EMUTRAMP;
58426+#endif
58427+
58428+#ifdef CONFIG_PAX_MPROTECT
58429+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58430+ pax_flags |= MF_PAX_MPROTECT;
58431+#endif
58432+
58433+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58434+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58435+ pax_flags |= MF_PAX_RANDMMAP;
58436+#endif
58437+
58438+ return pax_flags;
58439+}
58440+#endif
58441+
58442+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58443+#ifdef CONFIG_PAX_SOFTMODE
58444+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58445+{
58446+ unsigned long pax_flags = 0UL;
58447+
58448+#ifdef CONFIG_PAX_PAGEEXEC
58449+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58450+ pax_flags |= MF_PAX_PAGEEXEC;
58451+#endif
58452+
58453+#ifdef CONFIG_PAX_SEGMEXEC
58454+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58455+ pax_flags |= MF_PAX_SEGMEXEC;
58456+#endif
58457+
58458+#ifdef CONFIG_PAX_EMUTRAMP
58459+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58460+ pax_flags |= MF_PAX_EMUTRAMP;
58461+#endif
58462+
58463+#ifdef CONFIG_PAX_MPROTECT
58464+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58465+ pax_flags |= MF_PAX_MPROTECT;
58466+#endif
58467+
58468+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58469+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58470+ pax_flags |= MF_PAX_RANDMMAP;
58471+#endif
58472+
58473+ return pax_flags;
58474+}
58475+#endif
58476+
58477+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58478+{
58479+ unsigned long pax_flags = 0UL;
58480+
58481+#ifdef CONFIG_PAX_PAGEEXEC
58482+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58483+ pax_flags |= MF_PAX_PAGEEXEC;
58484+#endif
58485+
58486+#ifdef CONFIG_PAX_SEGMEXEC
58487+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58488+ pax_flags |= MF_PAX_SEGMEXEC;
58489+#endif
58490+
58491+#ifdef CONFIG_PAX_EMUTRAMP
58492+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58493+ pax_flags |= MF_PAX_EMUTRAMP;
58494+#endif
58495+
58496+#ifdef CONFIG_PAX_MPROTECT
58497+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58498+ pax_flags |= MF_PAX_MPROTECT;
58499+#endif
58500+
58501+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58502+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58503+ pax_flags |= MF_PAX_RANDMMAP;
58504+#endif
58505+
58506+ return pax_flags;
58507+}
58508+#endif
58509+
58510+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58511+static unsigned long pax_parse_defaults(void)
58512+{
58513+ unsigned long pax_flags = 0UL;
58514+
58515+#ifdef CONFIG_PAX_SOFTMODE
58516+ if (pax_softmode)
58517+ return pax_flags;
58518+#endif
58519+
58520+#ifdef CONFIG_PAX_PAGEEXEC
58521+ pax_flags |= MF_PAX_PAGEEXEC;
58522+#endif
58523+
58524+#ifdef CONFIG_PAX_SEGMEXEC
58525+ pax_flags |= MF_PAX_SEGMEXEC;
58526+#endif
58527+
58528+#ifdef CONFIG_PAX_MPROTECT
58529+ pax_flags |= MF_PAX_MPROTECT;
58530+#endif
58531+
58532+#ifdef CONFIG_PAX_RANDMMAP
58533+ if (randomize_va_space)
58534+ pax_flags |= MF_PAX_RANDMMAP;
58535+#endif
58536+
58537+ return pax_flags;
58538+}
58539+
58540+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58541+{
58542+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58543+
58544+#ifdef CONFIG_PAX_EI_PAX
58545+
58546+#ifdef CONFIG_PAX_SOFTMODE
58547+ if (pax_softmode)
58548+ return pax_flags;
58549+#endif
58550+
58551+ pax_flags = 0UL;
58552+
58553+#ifdef CONFIG_PAX_PAGEEXEC
58554+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58555+ pax_flags |= MF_PAX_PAGEEXEC;
58556+#endif
58557+
58558+#ifdef CONFIG_PAX_SEGMEXEC
58559+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58560+ pax_flags |= MF_PAX_SEGMEXEC;
58561+#endif
58562+
58563+#ifdef CONFIG_PAX_EMUTRAMP
58564+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58565+ pax_flags |= MF_PAX_EMUTRAMP;
58566+#endif
58567+
58568+#ifdef CONFIG_PAX_MPROTECT
58569+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58570+ pax_flags |= MF_PAX_MPROTECT;
58571+#endif
58572+
58573+#ifdef CONFIG_PAX_ASLR
58574+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58575+ pax_flags |= MF_PAX_RANDMMAP;
58576+#endif
58577+
58578+#endif
58579+
58580+ return pax_flags;
58581+
58582+}
58583+
58584+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
58585+{
58586+
58587+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58588+ unsigned long i;
58589+
58590+ for (i = 0UL; i < elf_ex->e_phnum; i++)
58591+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
58592+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
58593+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
58594+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
58595+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
58596+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
58597+ return PAX_PARSE_FLAGS_FALLBACK;
58598+
58599+#ifdef CONFIG_PAX_SOFTMODE
58600+ if (pax_softmode)
58601+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
58602+ else
58603+#endif
58604+
58605+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
58606+ break;
58607+ }
58608+#endif
58609+
58610+ return PAX_PARSE_FLAGS_FALLBACK;
58611+}
58612+
58613+static unsigned long pax_parse_xattr_pax(struct file * const file)
58614+{
58615+
58616+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58617+ ssize_t xattr_size, i;
58618+ unsigned char xattr_value[sizeof("pemrs") - 1];
58619+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
58620+
58621+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
58622+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
58623+ return PAX_PARSE_FLAGS_FALLBACK;
58624+
58625+ for (i = 0; i < xattr_size; i++)
58626+ switch (xattr_value[i]) {
58627+ default:
58628+ return PAX_PARSE_FLAGS_FALLBACK;
58629+
58630+#define parse_flag(option1, option2, flag) \
58631+ case option1: \
58632+ if (pax_flags_hardmode & MF_PAX_##flag) \
58633+ return PAX_PARSE_FLAGS_FALLBACK;\
58634+ pax_flags_hardmode |= MF_PAX_##flag; \
58635+ break; \
58636+ case option2: \
58637+ if (pax_flags_softmode & MF_PAX_##flag) \
58638+ return PAX_PARSE_FLAGS_FALLBACK;\
58639+ pax_flags_softmode |= MF_PAX_##flag; \
58640+ break;
58641+
58642+ parse_flag('p', 'P', PAGEEXEC);
58643+ parse_flag('e', 'E', EMUTRAMP);
58644+ parse_flag('m', 'M', MPROTECT);
58645+ parse_flag('r', 'R', RANDMMAP);
58646+ parse_flag('s', 'S', SEGMEXEC);
58647+
58648+#undef parse_flag
58649+ }
58650+
58651+ if (pax_flags_hardmode & pax_flags_softmode)
58652+ return PAX_PARSE_FLAGS_FALLBACK;
58653+
58654+#ifdef CONFIG_PAX_SOFTMODE
58655+ if (pax_softmode)
58656+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
58657+ else
58658+#endif
58659+
58660+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
58661+#else
58662+ return PAX_PARSE_FLAGS_FALLBACK;
58663+#endif
58664+
58665+}
58666+
58667+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
58668+{
58669+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
58670+
58671+ pax_flags = pax_parse_defaults();
58672+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
58673+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
58674+ xattr_pax_flags = pax_parse_xattr_pax(file);
58675+
58676+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58677+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58678+ pt_pax_flags != xattr_pax_flags)
58679+ return -EINVAL;
58680+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58681+ pax_flags = xattr_pax_flags;
58682+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58683+ pax_flags = pt_pax_flags;
58684+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58685+ pax_flags = ei_pax_flags;
58686+
58687+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
58688+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58689+ if ((__supported_pte_mask & _PAGE_NX))
58690+ pax_flags &= ~MF_PAX_SEGMEXEC;
58691+ else
58692+ pax_flags &= ~MF_PAX_PAGEEXEC;
58693+ }
58694+#endif
58695+
58696+ if (0 > pax_check_flags(&pax_flags))
58697+ return -EINVAL;
58698+
58699+ current->mm->pax_flags = pax_flags;
58700+ return 0;
58701+}
58702+#endif
58703+
58704 /*
58705 * These are the functions used to load ELF style executables and shared
58706 * libraries. There is no binary dependent code anywhere else.
58707@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
58708 {
58709 unsigned long random_variable = 0;
58710
58711+#ifdef CONFIG_PAX_RANDUSTACK
58712+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
58713+ return stack_top - current->mm->delta_stack;
58714+#endif
58715+
58716 if ((current->flags & PF_RANDOMIZE) &&
58717 !(current->personality & ADDR_NO_RANDOMIZE)) {
58718 random_variable = (unsigned long) get_random_int();
58719@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58720 unsigned long load_addr = 0, load_bias = 0;
58721 int load_addr_set = 0;
58722 char * elf_interpreter = NULL;
58723- unsigned long error;
58724+ unsigned long error = 0;
58725 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
58726 unsigned long elf_bss, elf_brk;
58727 int retval, i;
58728@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58729 struct elfhdr interp_elf_ex;
58730 } *loc;
58731 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
58732+ unsigned long pax_task_size;
58733
58734 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
58735 if (!loc) {
58736@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
58737 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
58738 may depend on the personality. */
58739 SET_PERSONALITY2(loc->elf_ex, &arch_state);
58740+
58741+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58742+ current->mm->pax_flags = 0UL;
58743+#endif
58744+
58745+#ifdef CONFIG_PAX_DLRESOLVE
58746+ current->mm->call_dl_resolve = 0UL;
58747+#endif
58748+
58749+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
58750+ current->mm->call_syscall = 0UL;
58751+#endif
58752+
58753+#ifdef CONFIG_PAX_ASLR
58754+ current->mm->delta_mmap = 0UL;
58755+ current->mm->delta_stack = 0UL;
58756+#endif
58757+
58758+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58759+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
58760+ send_sig(SIGKILL, current, 0);
58761+ goto out_free_dentry;
58762+ }
58763+#endif
58764+
58765+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58766+ pax_set_initial_flags(bprm);
58767+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58768+ if (pax_set_initial_flags_func)
58769+ (pax_set_initial_flags_func)(bprm);
58770+#endif
58771+
58772+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58773+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
58774+ current->mm->context.user_cs_limit = PAGE_SIZE;
58775+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
58776+ }
58777+#endif
58778+
58779+#ifdef CONFIG_PAX_SEGMEXEC
58780+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
58781+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
58782+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
58783+ pax_task_size = SEGMEXEC_TASK_SIZE;
58784+ current->mm->def_flags |= VM_NOHUGEPAGE;
58785+ } else
58786+#endif
58787+
58788+ pax_task_size = TASK_SIZE;
58789+
58790+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
58791+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58792+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
58793+ put_cpu();
58794+ }
58795+#endif
58796+
58797+#ifdef CONFIG_PAX_ASLR
58798+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58799+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
58800+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
58801+ }
58802+#endif
58803+
58804+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58805+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58806+ executable_stack = EXSTACK_DISABLE_X;
58807+ current->personality &= ~READ_IMPLIES_EXEC;
58808+ } else
58809+#endif
58810+
58811 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
58812 current->personality |= READ_IMPLIES_EXEC;
58813
58814@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
58815 #else
58816 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
58817 #endif
58818+
58819+#ifdef CONFIG_PAX_RANDMMAP
58820+ /* PaX: randomize base address at the default exe base if requested */
58821+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
58822+#ifdef CONFIG_SPARC64
58823+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
58824+#else
58825+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
58826+#endif
58827+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
58828+ elf_flags |= MAP_FIXED;
58829+ }
58830+#endif
58831+
58832 }
58833
58834 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
58835@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
58836 * allowed task size. Note that p_filesz must always be
58837 * <= p_memsz so it is only necessary to check p_memsz.
58838 */
58839- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58840- elf_ppnt->p_memsz > TASK_SIZE ||
58841- TASK_SIZE - elf_ppnt->p_memsz < k) {
58842+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58843+ elf_ppnt->p_memsz > pax_task_size ||
58844+ pax_task_size - elf_ppnt->p_memsz < k) {
58845 /* set_brk can never work. Avoid overflows. */
58846 retval = -EINVAL;
58847 goto out_free_dentry;
58848@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
58849 if (retval)
58850 goto out_free_dentry;
58851 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
58852- retval = -EFAULT; /* Nobody gets to see this, but.. */
58853- goto out_free_dentry;
58854+ /*
58855+ * This bss-zeroing can fail if the ELF
58856+ * file specifies odd protections. So
58857+ * we don't check the return value
58858+ */
58859 }
58860
58861+#ifdef CONFIG_PAX_RANDMMAP
58862+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58863+ unsigned long start, size, flags;
58864+ vm_flags_t vm_flags;
58865+
58866+ start = ELF_PAGEALIGN(elf_brk);
58867+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
58868+ flags = MAP_FIXED | MAP_PRIVATE;
58869+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
58870+
58871+ down_write(&current->mm->mmap_sem);
58872+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
58873+ retval = -ENOMEM;
58874+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
58875+// if (current->personality & ADDR_NO_RANDOMIZE)
58876+// vm_flags |= VM_READ | VM_MAYREAD;
58877+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
58878+ retval = IS_ERR_VALUE(start) ? start : 0;
58879+ }
58880+ up_write(&current->mm->mmap_sem);
58881+ if (retval == 0)
58882+ retval = set_brk(start + size, start + size + PAGE_SIZE);
58883+ if (retval < 0)
58884+ goto out_free_dentry;
58885+ }
58886+#endif
58887+
58888 if (elf_interpreter) {
58889- unsigned long interp_map_addr = 0;
58890-
58891 elf_entry = load_elf_interp(&loc->interp_elf_ex,
58892 interpreter,
58893- &interp_map_addr,
58894 load_bias, interp_elf_phdata);
58895 if (!IS_ERR((void *)elf_entry)) {
58896 /*
58897@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
58898 * Decide what to dump of a segment, part, all or none.
58899 */
58900 static unsigned long vma_dump_size(struct vm_area_struct *vma,
58901- unsigned long mm_flags)
58902+ unsigned long mm_flags, long signr)
58903 {
58904 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
58905
58906@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
58907 if (vma->vm_file == NULL)
58908 return 0;
58909
58910- if (FILTER(MAPPED_PRIVATE))
58911+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
58912 goto whole;
58913
58914 /*
58915@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
58916 {
58917 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
58918 int i = 0;
58919- do
58920+ do {
58921 i += 2;
58922- while (auxv[i - 2] != AT_NULL);
58923+ } while (auxv[i - 2] != AT_NULL);
58924 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
58925 }
58926
58927@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
58928 {
58929 mm_segment_t old_fs = get_fs();
58930 set_fs(KERNEL_DS);
58931- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
58932+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
58933 set_fs(old_fs);
58934 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
58935 }
58936@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
58937 vma = next_vma(vma, gate_vma)) {
58938 unsigned long dump_size;
58939
58940- dump_size = vma_dump_size(vma, cprm->mm_flags);
58941+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
58942 vma_filesz[i++] = dump_size;
58943 vma_data_size += dump_size;
58944 }
58945@@ -2314,6 +2794,167 @@ out:
58946
58947 #endif /* CONFIG_ELF_CORE */
58948
58949+#ifdef CONFIG_PAX_MPROTECT
58950+/* PaX: non-PIC ELF libraries need relocations on their executable segments
58951+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
58952+ * we'll remove VM_MAYWRITE for good on RELRO segments.
58953+ *
58954+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
58955+ * basis because we want to allow the common case and not the special ones.
58956+ */
58957+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
58958+{
58959+ struct elfhdr elf_h;
58960+ struct elf_phdr elf_p;
58961+ unsigned long i;
58962+ unsigned long oldflags;
58963+ bool is_textrel_rw, is_textrel_rx, is_relro;
58964+
58965+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
58966+ return;
58967+
58968+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
58969+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
58970+
58971+#ifdef CONFIG_PAX_ELFRELOCS
58972+ /* possible TEXTREL */
58973+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
58974+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
58975+#else
58976+ is_textrel_rw = false;
58977+ is_textrel_rx = false;
58978+#endif
58979+
58980+ /* possible RELRO */
58981+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
58982+
58983+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
58984+ return;
58985+
58986+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
58987+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
58988+
58989+#ifdef CONFIG_PAX_ETEXECRELOCS
58990+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
58991+#else
58992+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
58993+#endif
58994+
58995+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
58996+ !elf_check_arch(&elf_h) ||
58997+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
58998+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
58999+ return;
59000+
59001+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59002+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59003+ return;
59004+ switch (elf_p.p_type) {
59005+ case PT_DYNAMIC:
59006+ if (!is_textrel_rw && !is_textrel_rx)
59007+ continue;
59008+ i = 0UL;
59009+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
59010+ elf_dyn dyn;
59011+
59012+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
59013+ break;
59014+ if (dyn.d_tag == DT_NULL)
59015+ break;
59016+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59017+ gr_log_textrel(vma);
59018+ if (is_textrel_rw)
59019+ vma->vm_flags |= VM_MAYWRITE;
59020+ else
59021+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59022+ vma->vm_flags &= ~VM_MAYWRITE;
59023+ break;
59024+ }
59025+ i++;
59026+ }
59027+ is_textrel_rw = false;
59028+ is_textrel_rx = false;
59029+ continue;
59030+
59031+ case PT_GNU_RELRO:
59032+ if (!is_relro)
59033+ continue;
59034+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59035+ vma->vm_flags &= ~VM_MAYWRITE;
59036+ is_relro = false;
59037+ continue;
59038+
59039+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59040+ case PT_PAX_FLAGS: {
59041+ const char *msg_mprotect = "", *msg_emutramp = "";
59042+ char *buffer_lib, *buffer_exe;
59043+
59044+ if (elf_p.p_flags & PF_NOMPROTECT)
59045+ msg_mprotect = "MPROTECT disabled";
59046+
59047+#ifdef CONFIG_PAX_EMUTRAMP
59048+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59049+ msg_emutramp = "EMUTRAMP enabled";
59050+#endif
59051+
59052+ if (!msg_mprotect[0] && !msg_emutramp[0])
59053+ continue;
59054+
59055+ if (!printk_ratelimit())
59056+ continue;
59057+
59058+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59059+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59060+ if (buffer_lib && buffer_exe) {
59061+ char *path_lib, *path_exe;
59062+
59063+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59064+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59065+
59066+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59067+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59068+
59069+ }
59070+ free_page((unsigned long)buffer_exe);
59071+ free_page((unsigned long)buffer_lib);
59072+ continue;
59073+ }
59074+#endif
59075+
59076+ }
59077+ }
59078+}
59079+#endif
59080+
59081+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59082+
59083+extern int grsec_enable_log_rwxmaps;
59084+
59085+static void elf_handle_mmap(struct file *file)
59086+{
59087+ struct elfhdr elf_h;
59088+ struct elf_phdr elf_p;
59089+ unsigned long i;
59090+
59091+ if (!grsec_enable_log_rwxmaps)
59092+ return;
59093+
59094+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59095+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59096+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59097+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59098+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59099+ return;
59100+
59101+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59102+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59103+ return;
59104+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59105+ gr_log_ptgnustack(file);
59106+ }
59107+}
59108+#endif
59109+
59110 static int __init init_elf_binfmt(void)
59111 {
59112 register_binfmt(&elf_format);
59113diff --git a/fs/block_dev.c b/fs/block_dev.c
59114index b48c41b..e070416 100644
59115--- a/fs/block_dev.c
59116+++ b/fs/block_dev.c
59117@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59118 else if (bdev->bd_contains == bdev)
59119 return true; /* is a whole device which isn't held */
59120
59121- else if (whole->bd_holder == bd_may_claim)
59122+ else if (whole->bd_holder == (void *)bd_may_claim)
59123 return true; /* is a partition of a device that is being partitioned */
59124 else if (whole->bd_holder != NULL)
59125 return false; /* is a partition of a held device */
59126diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59127index f54511d..58acdec 100644
59128--- a/fs/btrfs/ctree.c
59129+++ b/fs/btrfs/ctree.c
59130@@ -1173,9 +1173,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59131 free_extent_buffer(buf);
59132 add_root_to_dirty_list(root);
59133 } else {
59134- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59135- parent_start = parent->start;
59136- else
59137+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59138+ if (parent)
59139+ parent_start = parent->start;
59140+ else
59141+ parent_start = 0;
59142+ } else
59143 parent_start = 0;
59144
59145 WARN_ON(trans->transid != btrfs_header_generation(parent));
59146diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59147index de4e70f..b41dc45 100644
59148--- a/fs/btrfs/delayed-inode.c
59149+++ b/fs/btrfs/delayed-inode.c
59150@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59151
59152 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59153 {
59154- int seq = atomic_inc_return(&delayed_root->items_seq);
59155+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59156 if ((atomic_dec_return(&delayed_root->items) <
59157 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59158 waitqueue_active(&delayed_root->wait))
59159@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59160
59161 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59162 {
59163- int val = atomic_read(&delayed_root->items_seq);
59164+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59165
59166 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59167 return 1;
59168@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59169 int seq;
59170 int ret;
59171
59172- seq = atomic_read(&delayed_root->items_seq);
59173+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59174
59175 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59176 if (ret)
59177diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59178index f70119f..ab5894d 100644
59179--- a/fs/btrfs/delayed-inode.h
59180+++ b/fs/btrfs/delayed-inode.h
59181@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59182 */
59183 struct list_head prepare_list;
59184 atomic_t items; /* for delayed items */
59185- atomic_t items_seq; /* for delayed items */
59186+ atomic_unchecked_t items_seq; /* for delayed items */
59187 int nodes; /* for delayed nodes */
59188 wait_queue_head_t wait;
59189 };
59190@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59191 struct btrfs_delayed_root *delayed_root)
59192 {
59193 atomic_set(&delayed_root->items, 0);
59194- atomic_set(&delayed_root->items_seq, 0);
59195+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59196 delayed_root->nodes = 0;
59197 spin_lock_init(&delayed_root->lock);
59198 init_waitqueue_head(&delayed_root->wait);
59199diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59200index 6f49b28..483410f 100644
59201--- a/fs/btrfs/super.c
59202+++ b/fs/btrfs/super.c
59203@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59204 function, line, errstr);
59205 return;
59206 }
59207- ACCESS_ONCE(trans->transaction->aborted) = errno;
59208+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59209 /* Wake up anybody who may be waiting on this transaction */
59210 wake_up(&root->fs_info->transaction_wait);
59211 wake_up(&root->fs_info->transaction_blocked_wait);
59212diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59213index 92db3f6..898a561 100644
59214--- a/fs/btrfs/sysfs.c
59215+++ b/fs/btrfs/sysfs.c
59216@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59217 for (set = 0; set < FEAT_MAX; set++) {
59218 int i;
59219 struct attribute *attrs[2];
59220- struct attribute_group agroup = {
59221+ attribute_group_no_const agroup = {
59222 .name = "features",
59223 .attrs = attrs,
59224 };
59225diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
59226index 2299bfd..4098e72 100644
59227--- a/fs/btrfs/tests/free-space-tests.c
59228+++ b/fs/btrfs/tests/free-space-tests.c
59229@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59230 * extent entry.
59231 */
59232 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
59233- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59234+ pax_open_kernel();
59235+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59236+ pax_close_kernel();
59237
59238 /*
59239 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
59240@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59241 if (ret)
59242 return ret;
59243
59244- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59245+ pax_open_kernel();
59246+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59247+ pax_close_kernel();
59248 __btrfs_remove_free_space_cache(cache->free_space_ctl);
59249
59250 return 0;
59251diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59252index 154990c..d0cf699 100644
59253--- a/fs/btrfs/tree-log.h
59254+++ b/fs/btrfs/tree-log.h
59255@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59256 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59257 struct btrfs_trans_handle *trans)
59258 {
59259- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59260+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59261 }
59262
59263 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59264diff --git a/fs/buffer.c b/fs/buffer.c
59265index 20805db..2e8fc69 100644
59266--- a/fs/buffer.c
59267+++ b/fs/buffer.c
59268@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
59269 bh_cachep = kmem_cache_create("buffer_head",
59270 sizeof(struct buffer_head), 0,
59271 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59272- SLAB_MEM_SPREAD),
59273+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59274 NULL);
59275
59276 /*
59277diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59278index fbb08e9..0fda764 100644
59279--- a/fs/cachefiles/bind.c
59280+++ b/fs/cachefiles/bind.c
59281@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59282 args);
59283
59284 /* start by checking things over */
59285- ASSERT(cache->fstop_percent >= 0 &&
59286- cache->fstop_percent < cache->fcull_percent &&
59287+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59288 cache->fcull_percent < cache->frun_percent &&
59289 cache->frun_percent < 100);
59290
59291- ASSERT(cache->bstop_percent >= 0 &&
59292- cache->bstop_percent < cache->bcull_percent &&
59293+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59294 cache->bcull_percent < cache->brun_percent &&
59295 cache->brun_percent < 100);
59296
59297diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59298index ce1b115..4a6852c 100644
59299--- a/fs/cachefiles/daemon.c
59300+++ b/fs/cachefiles/daemon.c
59301@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59302 if (n > buflen)
59303 return -EMSGSIZE;
59304
59305- if (copy_to_user(_buffer, buffer, n) != 0)
59306+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59307 return -EFAULT;
59308
59309 return n;
59310@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59311 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59312 return -EIO;
59313
59314- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59315+ if (datalen > PAGE_SIZE - 1)
59316 return -EOPNOTSUPP;
59317
59318 /* drag the command string into the kernel so we can parse it */
59319@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59320 if (args[0] != '%' || args[1] != '\0')
59321 return -EINVAL;
59322
59323- if (fstop < 0 || fstop >= cache->fcull_percent)
59324+ if (fstop >= cache->fcull_percent)
59325 return cachefiles_daemon_range_error(cache, args);
59326
59327 cache->fstop_percent = fstop;
59328@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59329 if (args[0] != '%' || args[1] != '\0')
59330 return -EINVAL;
59331
59332- if (bstop < 0 || bstop >= cache->bcull_percent)
59333+ if (bstop >= cache->bcull_percent)
59334 return cachefiles_daemon_range_error(cache, args);
59335
59336 cache->bstop_percent = bstop;
59337diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59338index 8c52472..c4e3a69 100644
59339--- a/fs/cachefiles/internal.h
59340+++ b/fs/cachefiles/internal.h
59341@@ -66,7 +66,7 @@ struct cachefiles_cache {
59342 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59343 struct rb_root active_nodes; /* active nodes (can't be culled) */
59344 rwlock_t active_lock; /* lock for active_nodes */
59345- atomic_t gravecounter; /* graveyard uniquifier */
59346+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59347 unsigned frun_percent; /* when to stop culling (% files) */
59348 unsigned fcull_percent; /* when to start culling (% files) */
59349 unsigned fstop_percent; /* when to stop allocating (% files) */
59350@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59351 * proc.c
59352 */
59353 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59354-extern atomic_t cachefiles_lookup_histogram[HZ];
59355-extern atomic_t cachefiles_mkdir_histogram[HZ];
59356-extern atomic_t cachefiles_create_histogram[HZ];
59357+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59358+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59359+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59360
59361 extern int __init cachefiles_proc_init(void);
59362 extern void cachefiles_proc_cleanup(void);
59363 static inline
59364-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59365+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59366 {
59367 unsigned long jif = jiffies - start_jif;
59368 if (jif >= HZ)
59369 jif = HZ - 1;
59370- atomic_inc(&histogram[jif]);
59371+ atomic_inc_unchecked(&histogram[jif]);
59372 }
59373
59374 #else
59375diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59376index 7f8e83f..8951aa4 100644
59377--- a/fs/cachefiles/namei.c
59378+++ b/fs/cachefiles/namei.c
59379@@ -309,7 +309,7 @@ try_again:
59380 /* first step is to make up a grave dentry in the graveyard */
59381 sprintf(nbuffer, "%08x%08x",
59382 (uint32_t) get_seconds(),
59383- (uint32_t) atomic_inc_return(&cache->gravecounter));
59384+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59385
59386 /* do the multiway lock magic */
59387 trap = lock_rename(cache->graveyard, dir);
59388diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59389index eccd339..4c1d995 100644
59390--- a/fs/cachefiles/proc.c
59391+++ b/fs/cachefiles/proc.c
59392@@ -14,9 +14,9 @@
59393 #include <linux/seq_file.h>
59394 #include "internal.h"
59395
59396-atomic_t cachefiles_lookup_histogram[HZ];
59397-atomic_t cachefiles_mkdir_histogram[HZ];
59398-atomic_t cachefiles_create_histogram[HZ];
59399+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59400+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59401+atomic_unchecked_t cachefiles_create_histogram[HZ];
59402
59403 /*
59404 * display the latency histogram
59405@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59406 return 0;
59407 default:
59408 index = (unsigned long) v - 3;
59409- x = atomic_read(&cachefiles_lookup_histogram[index]);
59410- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59411- z = atomic_read(&cachefiles_create_histogram[index]);
59412+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59413+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59414+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59415 if (x == 0 && y == 0 && z == 0)
59416 return 0;
59417
59418diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59419index c241603..56bae60 100644
59420--- a/fs/ceph/dir.c
59421+++ b/fs/ceph/dir.c
59422@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59423 struct dentry *dentry, *last;
59424 struct ceph_dentry_info *di;
59425 int err = 0;
59426+ char d_name[DNAME_INLINE_LEN];
59427+ const unsigned char *name;
59428
59429 /* claim ref on last dentry we returned */
59430 last = fi->dentry;
59431@@ -192,7 +194,12 @@ more:
59432
59433 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
59434 dentry, dentry, dentry->d_inode);
59435- if (!dir_emit(ctx, dentry->d_name.name,
59436+ name = dentry->d_name.name;
59437+ if (name == dentry->d_iname) {
59438+ memcpy(d_name, name, dentry->d_name.len);
59439+ name = d_name;
59440+ }
59441+ if (!dir_emit(ctx, name,
59442 dentry->d_name.len,
59443 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59444 dentry->d_inode->i_mode >> 12)) {
59445@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59446 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59447 struct ceph_mds_client *mdsc = fsc->mdsc;
59448 unsigned frag = fpos_frag(ctx->pos);
59449- int off = fpos_off(ctx->pos);
59450+ unsigned int off = fpos_off(ctx->pos);
59451 int err;
59452 u32 ftype;
59453 struct ceph_mds_reply_info_parsed *rinfo;
59454diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59455index 50f06cd..c7eba3e 100644
59456--- a/fs/ceph/super.c
59457+++ b/fs/ceph/super.c
59458@@ -896,7 +896,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59459 /*
59460 * construct our own bdi so we can control readahead, etc.
59461 */
59462-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59463+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59464
59465 static int ceph_register_bdi(struct super_block *sb,
59466 struct ceph_fs_client *fsc)
59467@@ -913,7 +913,7 @@ static int ceph_register_bdi(struct super_block *sb,
59468 default_backing_dev_info.ra_pages;
59469
59470 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59471- atomic_long_inc_return(&bdi_seq));
59472+ atomic_long_inc_return_unchecked(&bdi_seq));
59473 if (!err)
59474 sb->s_bdi = &fsc->backing_dev_info;
59475 return err;
59476diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59477index 7febcf2..62a5721 100644
59478--- a/fs/cifs/cifs_debug.c
59479+++ b/fs/cifs/cifs_debug.c
59480@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59481
59482 if (strtobool(&c, &bv) == 0) {
59483 #ifdef CONFIG_CIFS_STATS2
59484- atomic_set(&totBufAllocCount, 0);
59485- atomic_set(&totSmBufAllocCount, 0);
59486+ atomic_set_unchecked(&totBufAllocCount, 0);
59487+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59488 #endif /* CONFIG_CIFS_STATS2 */
59489 spin_lock(&cifs_tcp_ses_lock);
59490 list_for_each(tmp1, &cifs_tcp_ses_list) {
59491@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59492 tcon = list_entry(tmp3,
59493 struct cifs_tcon,
59494 tcon_list);
59495- atomic_set(&tcon->num_smbs_sent, 0);
59496+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59497 if (server->ops->clear_stats)
59498 server->ops->clear_stats(tcon);
59499 }
59500@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59501 smBufAllocCount.counter, cifs_min_small);
59502 #ifdef CONFIG_CIFS_STATS2
59503 seq_printf(m, "Total Large %d Small %d Allocations\n",
59504- atomic_read(&totBufAllocCount),
59505- atomic_read(&totSmBufAllocCount));
59506+ atomic_read_unchecked(&totBufAllocCount),
59507+ atomic_read_unchecked(&totSmBufAllocCount));
59508 #endif /* CONFIG_CIFS_STATS2 */
59509
59510 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59511@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59512 if (tcon->need_reconnect)
59513 seq_puts(m, "\tDISCONNECTED ");
59514 seq_printf(m, "\nSMBs: %d",
59515- atomic_read(&tcon->num_smbs_sent));
59516+ atomic_read_unchecked(&tcon->num_smbs_sent));
59517 if (server->ops->print_stats)
59518 server->ops->print_stats(m, tcon);
59519 }
59520diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59521index d72fe37..ded5511 100644
59522--- a/fs/cifs/cifsfs.c
59523+++ b/fs/cifs/cifsfs.c
59524@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
59525 */
59526 cifs_req_cachep = kmem_cache_create("cifs_request",
59527 CIFSMaxBufSize + max_hdr_size, 0,
59528- SLAB_HWCACHE_ALIGN, NULL);
59529+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
59530 if (cifs_req_cachep == NULL)
59531 return -ENOMEM;
59532
59533@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
59534 efficient to alloc 1 per page off the slab compared to 17K (5page)
59535 alloc of large cifs buffers even when page debugging is on */
59536 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
59537- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
59538+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
59539 NULL);
59540 if (cifs_sm_req_cachep == NULL) {
59541 mempool_destroy(cifs_req_poolp);
59542@@ -1204,8 +1204,8 @@ init_cifs(void)
59543 atomic_set(&bufAllocCount, 0);
59544 atomic_set(&smBufAllocCount, 0);
59545 #ifdef CONFIG_CIFS_STATS2
59546- atomic_set(&totBufAllocCount, 0);
59547- atomic_set(&totSmBufAllocCount, 0);
59548+ atomic_set_unchecked(&totBufAllocCount, 0);
59549+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59550 #endif /* CONFIG_CIFS_STATS2 */
59551
59552 atomic_set(&midCount, 0);
59553diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
59554index 22b289a..bbbba08 100644
59555--- a/fs/cifs/cifsglob.h
59556+++ b/fs/cifs/cifsglob.h
59557@@ -823,35 +823,35 @@ struct cifs_tcon {
59558 __u16 Flags; /* optional support bits */
59559 enum statusEnum tidStatus;
59560 #ifdef CONFIG_CIFS_STATS
59561- atomic_t num_smbs_sent;
59562+ atomic_unchecked_t num_smbs_sent;
59563 union {
59564 struct {
59565- atomic_t num_writes;
59566- atomic_t num_reads;
59567- atomic_t num_flushes;
59568- atomic_t num_oplock_brks;
59569- atomic_t num_opens;
59570- atomic_t num_closes;
59571- atomic_t num_deletes;
59572- atomic_t num_mkdirs;
59573- atomic_t num_posixopens;
59574- atomic_t num_posixmkdirs;
59575- atomic_t num_rmdirs;
59576- atomic_t num_renames;
59577- atomic_t num_t2renames;
59578- atomic_t num_ffirst;
59579- atomic_t num_fnext;
59580- atomic_t num_fclose;
59581- atomic_t num_hardlinks;
59582- atomic_t num_symlinks;
59583- atomic_t num_locks;
59584- atomic_t num_acl_get;
59585- atomic_t num_acl_set;
59586+ atomic_unchecked_t num_writes;
59587+ atomic_unchecked_t num_reads;
59588+ atomic_unchecked_t num_flushes;
59589+ atomic_unchecked_t num_oplock_brks;
59590+ atomic_unchecked_t num_opens;
59591+ atomic_unchecked_t num_closes;
59592+ atomic_unchecked_t num_deletes;
59593+ atomic_unchecked_t num_mkdirs;
59594+ atomic_unchecked_t num_posixopens;
59595+ atomic_unchecked_t num_posixmkdirs;
59596+ atomic_unchecked_t num_rmdirs;
59597+ atomic_unchecked_t num_renames;
59598+ atomic_unchecked_t num_t2renames;
59599+ atomic_unchecked_t num_ffirst;
59600+ atomic_unchecked_t num_fnext;
59601+ atomic_unchecked_t num_fclose;
59602+ atomic_unchecked_t num_hardlinks;
59603+ atomic_unchecked_t num_symlinks;
59604+ atomic_unchecked_t num_locks;
59605+ atomic_unchecked_t num_acl_get;
59606+ atomic_unchecked_t num_acl_set;
59607 } cifs_stats;
59608 #ifdef CONFIG_CIFS_SMB2
59609 struct {
59610- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59611- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59612+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59613+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59614 } smb2_stats;
59615 #endif /* CONFIG_CIFS_SMB2 */
59616 } stats;
59617@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
59618 }
59619
59620 #ifdef CONFIG_CIFS_STATS
59621-#define cifs_stats_inc atomic_inc
59622+#define cifs_stats_inc atomic_inc_unchecked
59623
59624 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
59625 unsigned int bytes)
59626@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
59627 /* Various Debug counters */
59628 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
59629 #ifdef CONFIG_CIFS_STATS2
59630-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
59631-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
59632+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
59633+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
59634 #endif
59635 GLOBAL_EXTERN atomic_t smBufAllocCount;
59636 GLOBAL_EXTERN atomic_t midCount;
59637diff --git a/fs/cifs/file.c b/fs/cifs/file.c
59638index 74f1287..7ef0237 100644
59639--- a/fs/cifs/file.c
59640+++ b/fs/cifs/file.c
59641@@ -2060,10 +2060,14 @@ static int cifs_writepages(struct address_space *mapping,
59642 index = mapping->writeback_index; /* Start from prev offset */
59643 end = -1;
59644 } else {
59645- index = wbc->range_start >> PAGE_CACHE_SHIFT;
59646- end = wbc->range_end >> PAGE_CACHE_SHIFT;
59647- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
59648+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
59649 range_whole = true;
59650+ index = 0;
59651+ end = ULONG_MAX;
59652+ } else {
59653+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
59654+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
59655+ }
59656 scanned = true;
59657 }
59658 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
59659diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
59660index 3379463..3af418a 100644
59661--- a/fs/cifs/misc.c
59662+++ b/fs/cifs/misc.c
59663@@ -170,7 +170,7 @@ cifs_buf_get(void)
59664 memset(ret_buf, 0, buf_size + 3);
59665 atomic_inc(&bufAllocCount);
59666 #ifdef CONFIG_CIFS_STATS2
59667- atomic_inc(&totBufAllocCount);
59668+ atomic_inc_unchecked(&totBufAllocCount);
59669 #endif /* CONFIG_CIFS_STATS2 */
59670 }
59671
59672@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
59673 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
59674 atomic_inc(&smBufAllocCount);
59675 #ifdef CONFIG_CIFS_STATS2
59676- atomic_inc(&totSmBufAllocCount);
59677+ atomic_inc_unchecked(&totSmBufAllocCount);
59678 #endif /* CONFIG_CIFS_STATS2 */
59679
59680 }
59681diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
59682index d297903..1cb7516 100644
59683--- a/fs/cifs/smb1ops.c
59684+++ b/fs/cifs/smb1ops.c
59685@@ -622,27 +622,27 @@ static void
59686 cifs_clear_stats(struct cifs_tcon *tcon)
59687 {
59688 #ifdef CONFIG_CIFS_STATS
59689- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
59690- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
59691- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
59692- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59693- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
59694- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
59695- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59696- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
59697- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
59698- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
59699- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
59700- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
59701- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
59702- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
59703- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
59704- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
59705- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
59706- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
59707- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
59708- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
59709- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
59710+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
59711+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
59712+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
59713+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59714+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
59715+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
59716+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59717+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
59718+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
59719+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
59720+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
59721+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
59722+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
59723+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
59724+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
59725+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
59726+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
59727+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
59728+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
59729+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
59730+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
59731 #endif
59732 }
59733
59734@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59735 {
59736 #ifdef CONFIG_CIFS_STATS
59737 seq_printf(m, " Oplocks breaks: %d",
59738- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
59739+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
59740 seq_printf(m, "\nReads: %d Bytes: %llu",
59741- atomic_read(&tcon->stats.cifs_stats.num_reads),
59742+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
59743 (long long)(tcon->bytes_read));
59744 seq_printf(m, "\nWrites: %d Bytes: %llu",
59745- atomic_read(&tcon->stats.cifs_stats.num_writes),
59746+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
59747 (long long)(tcon->bytes_written));
59748 seq_printf(m, "\nFlushes: %d",
59749- atomic_read(&tcon->stats.cifs_stats.num_flushes));
59750+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
59751 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
59752- atomic_read(&tcon->stats.cifs_stats.num_locks),
59753- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
59754- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
59755+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
59756+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
59757+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
59758 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
59759- atomic_read(&tcon->stats.cifs_stats.num_opens),
59760- atomic_read(&tcon->stats.cifs_stats.num_closes),
59761- atomic_read(&tcon->stats.cifs_stats.num_deletes));
59762+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
59763+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
59764+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
59765 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
59766- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
59767- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
59768+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
59769+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
59770 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
59771- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
59772- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
59773+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
59774+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
59775 seq_printf(m, "\nRenames: %d T2 Renames %d",
59776- atomic_read(&tcon->stats.cifs_stats.num_renames),
59777- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
59778+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
59779+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
59780 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
59781- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
59782- atomic_read(&tcon->stats.cifs_stats.num_fnext),
59783- atomic_read(&tcon->stats.cifs_stats.num_fclose));
59784+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
59785+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
59786+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
59787 #endif
59788 }
59789
59790diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
59791index 96b5d40..e5db0c1 100644
59792--- a/fs/cifs/smb2ops.c
59793+++ b/fs/cifs/smb2ops.c
59794@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
59795 #ifdef CONFIG_CIFS_STATS
59796 int i;
59797 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
59798- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59799- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59800+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59801+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59802 }
59803 #endif
59804 }
59805@@ -459,65 +459,65 @@ static void
59806 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59807 {
59808 #ifdef CONFIG_CIFS_STATS
59809- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59810- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59811+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59812+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59813 seq_printf(m, "\nNegotiates: %d sent %d failed",
59814- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
59815- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
59816+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
59817+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
59818 seq_printf(m, "\nSessionSetups: %d sent %d failed",
59819- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
59820- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
59821+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
59822+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
59823 seq_printf(m, "\nLogoffs: %d sent %d failed",
59824- atomic_read(&sent[SMB2_LOGOFF_HE]),
59825- atomic_read(&failed[SMB2_LOGOFF_HE]));
59826+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
59827+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
59828 seq_printf(m, "\nTreeConnects: %d sent %d failed",
59829- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
59830- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
59831+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
59832+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
59833 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
59834- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
59835- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
59836+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
59837+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
59838 seq_printf(m, "\nCreates: %d sent %d failed",
59839- atomic_read(&sent[SMB2_CREATE_HE]),
59840- atomic_read(&failed[SMB2_CREATE_HE]));
59841+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
59842+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
59843 seq_printf(m, "\nCloses: %d sent %d failed",
59844- atomic_read(&sent[SMB2_CLOSE_HE]),
59845- atomic_read(&failed[SMB2_CLOSE_HE]));
59846+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
59847+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
59848 seq_printf(m, "\nFlushes: %d sent %d failed",
59849- atomic_read(&sent[SMB2_FLUSH_HE]),
59850- atomic_read(&failed[SMB2_FLUSH_HE]));
59851+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
59852+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
59853 seq_printf(m, "\nReads: %d sent %d failed",
59854- atomic_read(&sent[SMB2_READ_HE]),
59855- atomic_read(&failed[SMB2_READ_HE]));
59856+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
59857+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
59858 seq_printf(m, "\nWrites: %d sent %d failed",
59859- atomic_read(&sent[SMB2_WRITE_HE]),
59860- atomic_read(&failed[SMB2_WRITE_HE]));
59861+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
59862+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
59863 seq_printf(m, "\nLocks: %d sent %d failed",
59864- atomic_read(&sent[SMB2_LOCK_HE]),
59865- atomic_read(&failed[SMB2_LOCK_HE]));
59866+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
59867+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
59868 seq_printf(m, "\nIOCTLs: %d sent %d failed",
59869- atomic_read(&sent[SMB2_IOCTL_HE]),
59870- atomic_read(&failed[SMB2_IOCTL_HE]));
59871+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
59872+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
59873 seq_printf(m, "\nCancels: %d sent %d failed",
59874- atomic_read(&sent[SMB2_CANCEL_HE]),
59875- atomic_read(&failed[SMB2_CANCEL_HE]));
59876+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
59877+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
59878 seq_printf(m, "\nEchos: %d sent %d failed",
59879- atomic_read(&sent[SMB2_ECHO_HE]),
59880- atomic_read(&failed[SMB2_ECHO_HE]));
59881+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
59882+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
59883 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
59884- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
59885- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
59886+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
59887+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
59888 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
59889- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
59890- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
59891+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
59892+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
59893 seq_printf(m, "\nQueryInfos: %d sent %d failed",
59894- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
59895- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
59896+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
59897+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
59898 seq_printf(m, "\nSetInfos: %d sent %d failed",
59899- atomic_read(&sent[SMB2_SET_INFO_HE]),
59900- atomic_read(&failed[SMB2_SET_INFO_HE]));
59901+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
59902+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
59903 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
59904- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
59905- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
59906+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
59907+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
59908 #endif
59909 }
59910
59911diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
59912index 3417340..b942390 100644
59913--- a/fs/cifs/smb2pdu.c
59914+++ b/fs/cifs/smb2pdu.c
59915@@ -2144,8 +2144,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
59916 default:
59917 cifs_dbg(VFS, "info level %u isn't supported\n",
59918 srch_inf->info_level);
59919- rc = -EINVAL;
59920- goto qdir_exit;
59921+ return -EINVAL;
59922 }
59923
59924 req->FileIndex = cpu_to_le32(index);
59925diff --git a/fs/coda/cache.c b/fs/coda/cache.c
59926index 46ee6f2..89a9e7f 100644
59927--- a/fs/coda/cache.c
59928+++ b/fs/coda/cache.c
59929@@ -24,7 +24,7 @@
59930 #include "coda_linux.h"
59931 #include "coda_cache.h"
59932
59933-static atomic_t permission_epoch = ATOMIC_INIT(0);
59934+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
59935
59936 /* replace or extend an acl cache hit */
59937 void coda_cache_enter(struct inode *inode, int mask)
59938@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
59939 struct coda_inode_info *cii = ITOC(inode);
59940
59941 spin_lock(&cii->c_lock);
59942- cii->c_cached_epoch = atomic_read(&permission_epoch);
59943+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
59944 if (!uid_eq(cii->c_uid, current_fsuid())) {
59945 cii->c_uid = current_fsuid();
59946 cii->c_cached_perm = mask;
59947@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
59948 {
59949 struct coda_inode_info *cii = ITOC(inode);
59950 spin_lock(&cii->c_lock);
59951- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
59952+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
59953 spin_unlock(&cii->c_lock);
59954 }
59955
59956 /* remove all acl caches */
59957 void coda_cache_clear_all(struct super_block *sb)
59958 {
59959- atomic_inc(&permission_epoch);
59960+ atomic_inc_unchecked(&permission_epoch);
59961 }
59962
59963
59964@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
59965 spin_lock(&cii->c_lock);
59966 hit = (mask & cii->c_cached_perm) == mask &&
59967 uid_eq(cii->c_uid, current_fsuid()) &&
59968- cii->c_cached_epoch == atomic_read(&permission_epoch);
59969+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
59970 spin_unlock(&cii->c_lock);
59971
59972 return hit;
59973diff --git a/fs/compat.c b/fs/compat.c
59974index 6fd272d..dd34ba2 100644
59975--- a/fs/compat.c
59976+++ b/fs/compat.c
59977@@ -54,7 +54,7 @@
59978 #include <asm/ioctls.h>
59979 #include "internal.h"
59980
59981-int compat_log = 1;
59982+int compat_log = 0;
59983
59984 int compat_printk(const char *fmt, ...)
59985 {
59986@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
59987
59988 set_fs(KERNEL_DS);
59989 /* The __user pointer cast is valid because of the set_fs() */
59990- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
59991+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
59992 set_fs(oldfs);
59993 /* truncating is ok because it's a user address */
59994 if (!ret)
59995@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
59996 goto out;
59997
59998 ret = -EINVAL;
59999- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60000+ if (nr_segs > UIO_MAXIOV)
60001 goto out;
60002 if (nr_segs > fast_segs) {
60003 ret = -ENOMEM;
60004@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
60005 struct compat_readdir_callback {
60006 struct dir_context ctx;
60007 struct compat_old_linux_dirent __user *dirent;
60008+ struct file * file;
60009 int result;
60010 };
60011
60012@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
60013 buf->result = -EOVERFLOW;
60014 return -EOVERFLOW;
60015 }
60016+
60017+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60018+ return 0;
60019+
60020 buf->result++;
60021 dirent = buf->dirent;
60022 if (!access_ok(VERIFY_WRITE, dirent,
60023@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60024 if (!f.file)
60025 return -EBADF;
60026
60027+ buf.file = f.file;
60028 error = iterate_dir(f.file, &buf.ctx);
60029 if (buf.result)
60030 error = buf.result;
60031@@ -913,6 +919,7 @@ struct compat_getdents_callback {
60032 struct dir_context ctx;
60033 struct compat_linux_dirent __user *current_dir;
60034 struct compat_linux_dirent __user *previous;
60035+ struct file * file;
60036 int count;
60037 int error;
60038 };
60039@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
60040 buf->error = -EOVERFLOW;
60041 return -EOVERFLOW;
60042 }
60043+
60044+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60045+ return 0;
60046+
60047 dirent = buf->previous;
60048 if (dirent) {
60049 if (__put_user(offset, &dirent->d_off))
60050@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60051 if (!f.file)
60052 return -EBADF;
60053
60054+ buf.file = f.file;
60055 error = iterate_dir(f.file, &buf.ctx);
60056 if (error >= 0)
60057 error = buf.error;
60058@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
60059 struct dir_context ctx;
60060 struct linux_dirent64 __user *current_dir;
60061 struct linux_dirent64 __user *previous;
60062+ struct file * file;
60063 int count;
60064 int error;
60065 };
60066@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
60067 buf->error = -EINVAL; /* only used if we fail.. */
60068 if (reclen > buf->count)
60069 return -EINVAL;
60070+
60071+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60072+ return 0;
60073+
60074 dirent = buf->previous;
60075
60076 if (dirent) {
60077@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60078 if (!f.file)
60079 return -EBADF;
60080
60081+ buf.file = f.file;
60082 error = iterate_dir(f.file, &buf.ctx);
60083 if (error >= 0)
60084 error = buf.error;
60085diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60086index 4d24d17..4f8c09e 100644
60087--- a/fs/compat_binfmt_elf.c
60088+++ b/fs/compat_binfmt_elf.c
60089@@ -30,11 +30,13 @@
60090 #undef elf_phdr
60091 #undef elf_shdr
60092 #undef elf_note
60093+#undef elf_dyn
60094 #undef elf_addr_t
60095 #define elfhdr elf32_hdr
60096 #define elf_phdr elf32_phdr
60097 #define elf_shdr elf32_shdr
60098 #define elf_note elf32_note
60099+#define elf_dyn Elf32_Dyn
60100 #define elf_addr_t Elf32_Addr
60101
60102 /*
60103diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60104index afec645..9c65620 100644
60105--- a/fs/compat_ioctl.c
60106+++ b/fs/compat_ioctl.c
60107@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60108 return -EFAULT;
60109 if (__get_user(udata, &ss32->iomem_base))
60110 return -EFAULT;
60111- ss.iomem_base = compat_ptr(udata);
60112+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60113 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60114 __get_user(ss.port_high, &ss32->port_high))
60115 return -EFAULT;
60116@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60117 for (i = 0; i < nmsgs; i++) {
60118 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60119 return -EFAULT;
60120- if (get_user(datap, &umsgs[i].buf) ||
60121- put_user(compat_ptr(datap), &tmsgs[i].buf))
60122+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60123+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60124 return -EFAULT;
60125 }
60126 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60127@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60128 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60129 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60130 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60131- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60132+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60133 return -EFAULT;
60134
60135 return ioctl_preallocate(file, p);
60136@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60137 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60138 {
60139 unsigned int a, b;
60140- a = *(unsigned int *)p;
60141- b = *(unsigned int *)q;
60142+ a = *(const unsigned int *)p;
60143+ b = *(const unsigned int *)q;
60144 if (a > b)
60145 return 1;
60146 if (a < b)
60147diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60148index c9c298b..544d100 100644
60149--- a/fs/configfs/dir.c
60150+++ b/fs/configfs/dir.c
60151@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60152 }
60153 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60154 struct configfs_dirent *next;
60155- const char *name;
60156+ const unsigned char * name;
60157+ char d_name[sizeof(next->s_dentry->d_iname)];
60158 int len;
60159 struct inode *inode = NULL;
60160
60161@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60162 continue;
60163
60164 name = configfs_get_name(next);
60165- len = strlen(name);
60166+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60167+ len = next->s_dentry->d_name.len;
60168+ memcpy(d_name, name, len);
60169+ name = d_name;
60170+ } else
60171+ len = strlen(name);
60172
60173 /*
60174 * We'll have a dentry and an inode for
60175diff --git a/fs/coredump.c b/fs/coredump.c
60176index b5c86ff..0dac262 100644
60177--- a/fs/coredump.c
60178+++ b/fs/coredump.c
60179@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
60180 struct pipe_inode_info *pipe = file->private_data;
60181
60182 pipe_lock(pipe);
60183- pipe->readers++;
60184- pipe->writers--;
60185+ atomic_inc(&pipe->readers);
60186+ atomic_dec(&pipe->writers);
60187 wake_up_interruptible_sync(&pipe->wait);
60188 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60189 pipe_unlock(pipe);
60190@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
60191 * We actually want wait_event_freezable() but then we need
60192 * to clear TIF_SIGPENDING and improve dump_interrupted().
60193 */
60194- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60195+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60196
60197 pipe_lock(pipe);
60198- pipe->readers--;
60199- pipe->writers++;
60200+ atomic_dec(&pipe->readers);
60201+ atomic_inc(&pipe->writers);
60202 pipe_unlock(pipe);
60203 }
60204
60205@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
60206 struct files_struct *displaced;
60207 bool need_nonrelative = false;
60208 bool core_dumped = false;
60209- static atomic_t core_dump_count = ATOMIC_INIT(0);
60210+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60211+ long signr = siginfo->si_signo;
60212+ int dumpable;
60213 struct coredump_params cprm = {
60214 .siginfo = siginfo,
60215 .regs = signal_pt_regs(),
60216@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
60217 .mm_flags = mm->flags,
60218 };
60219
60220- audit_core_dumps(siginfo->si_signo);
60221+ audit_core_dumps(signr);
60222+
60223+ dumpable = __get_dumpable(cprm.mm_flags);
60224+
60225+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60226+ gr_handle_brute_attach(dumpable);
60227
60228 binfmt = mm->binfmt;
60229 if (!binfmt || !binfmt->core_dump)
60230 goto fail;
60231- if (!__get_dumpable(cprm.mm_flags))
60232+ if (!dumpable)
60233 goto fail;
60234
60235 cred = prepare_creds();
60236@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
60237 need_nonrelative = true;
60238 }
60239
60240- retval = coredump_wait(siginfo->si_signo, &core_state);
60241+ retval = coredump_wait(signr, &core_state);
60242 if (retval < 0)
60243 goto fail_creds;
60244
60245@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
60246 }
60247 cprm.limit = RLIM_INFINITY;
60248
60249- dump_count = atomic_inc_return(&core_dump_count);
60250+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60251 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60252 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60253 task_tgid_vnr(current), current->comm);
60254@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
60255 } else {
60256 struct inode *inode;
60257
60258+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60259+
60260 if (cprm.limit < binfmt->min_coredump)
60261 goto fail_unlock;
60262
60263@@ -681,7 +690,7 @@ close_fail:
60264 filp_close(cprm.file, NULL);
60265 fail_dropcount:
60266 if (ispipe)
60267- atomic_dec(&core_dump_count);
60268+ atomic_dec_unchecked(&core_dump_count);
60269 fail_unlock:
60270 kfree(cn.corename);
60271 coredump_finish(mm, core_dumped);
60272@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60273 struct file *file = cprm->file;
60274 loff_t pos = file->f_pos;
60275 ssize_t n;
60276+
60277+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60278 if (cprm->written + nr > cprm->limit)
60279 return 0;
60280 while (nr) {
60281diff --git a/fs/dcache.c b/fs/dcache.c
60282index e368d4f..b40ba59 100644
60283--- a/fs/dcache.c
60284+++ b/fs/dcache.c
60285@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
60286 * dentry_iput drops the locks, at which point nobody (except
60287 * transient RCU lookups) can reach this dentry.
60288 */
60289- BUG_ON((int)dentry->d_lockref.count > 0);
60290+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
60291 this_cpu_dec(nr_dentry);
60292 if (dentry->d_op && dentry->d_op->d_release)
60293 dentry->d_op->d_release(dentry);
60294@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60295 struct dentry *parent = dentry->d_parent;
60296 if (IS_ROOT(dentry))
60297 return NULL;
60298- if (unlikely((int)dentry->d_lockref.count < 0))
60299+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
60300 return NULL;
60301 if (likely(spin_trylock(&parent->d_lock)))
60302 return parent;
60303@@ -638,7 +638,7 @@ repeat:
60304 dentry->d_flags |= DCACHE_REFERENCED;
60305 dentry_lru_add(dentry);
60306
60307- dentry->d_lockref.count--;
60308+ __lockref_dec(&dentry->d_lockref);
60309 spin_unlock(&dentry->d_lock);
60310 return;
60311
60312@@ -653,7 +653,7 @@ EXPORT_SYMBOL(dput);
60313 /* This must be called with d_lock held */
60314 static inline void __dget_dlock(struct dentry *dentry)
60315 {
60316- dentry->d_lockref.count++;
60317+ __lockref_inc(&dentry->d_lockref);
60318 }
60319
60320 static inline void __dget(struct dentry *dentry)
60321@@ -694,8 +694,8 @@ repeat:
60322 goto repeat;
60323 }
60324 rcu_read_unlock();
60325- BUG_ON(!ret->d_lockref.count);
60326- ret->d_lockref.count++;
60327+ BUG_ON(!__lockref_read(&ret->d_lockref));
60328+ __lockref_inc(&ret->d_lockref);
60329 spin_unlock(&ret->d_lock);
60330 return ret;
60331 }
60332@@ -773,9 +773,9 @@ restart:
60333 spin_lock(&inode->i_lock);
60334 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
60335 spin_lock(&dentry->d_lock);
60336- if (!dentry->d_lockref.count) {
60337+ if (!__lockref_read(&dentry->d_lockref)) {
60338 struct dentry *parent = lock_parent(dentry);
60339- if (likely(!dentry->d_lockref.count)) {
60340+ if (likely(!__lockref_read(&dentry->d_lockref))) {
60341 __dentry_kill(dentry);
60342 dput(parent);
60343 goto restart;
60344@@ -810,7 +810,7 @@ static void shrink_dentry_list(struct list_head *list)
60345 * We found an inuse dentry which was not removed from
60346 * the LRU because of laziness during lookup. Do not free it.
60347 */
60348- if ((int)dentry->d_lockref.count > 0) {
60349+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
60350 spin_unlock(&dentry->d_lock);
60351 if (parent)
60352 spin_unlock(&parent->d_lock);
60353@@ -848,8 +848,8 @@ static void shrink_dentry_list(struct list_head *list)
60354 dentry = parent;
60355 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60356 parent = lock_parent(dentry);
60357- if (dentry->d_lockref.count != 1) {
60358- dentry->d_lockref.count--;
60359+ if (__lockref_read(&dentry->d_lockref) != 1) {
60360+ __lockref_inc(&dentry->d_lockref);
60361 spin_unlock(&dentry->d_lock);
60362 if (parent)
60363 spin_unlock(&parent->d_lock);
60364@@ -889,7 +889,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
60365 * counts, just remove them from the LRU. Otherwise give them
60366 * another pass through the LRU.
60367 */
60368- if (dentry->d_lockref.count) {
60369+ if (__lockref_read(&dentry->d_lockref) > 0) {
60370 d_lru_isolate(dentry);
60371 spin_unlock(&dentry->d_lock);
60372 return LRU_REMOVED;
60373@@ -1225,7 +1225,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60374 } else {
60375 if (dentry->d_flags & DCACHE_LRU_LIST)
60376 d_lru_del(dentry);
60377- if (!dentry->d_lockref.count) {
60378+ if (!__lockref_read(&dentry->d_lockref)) {
60379 d_shrink_add(dentry, &data->dispose);
60380 data->found++;
60381 }
60382@@ -1273,7 +1273,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60383 return D_WALK_CONTINUE;
60384
60385 /* root with refcount 1 is fine */
60386- if (dentry == _data && dentry->d_lockref.count == 1)
60387+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60388 return D_WALK_CONTINUE;
60389
60390 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60391@@ -1282,7 +1282,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60392 dentry->d_inode ?
60393 dentry->d_inode->i_ino : 0UL,
60394 dentry,
60395- dentry->d_lockref.count,
60396+ __lockref_read(&dentry->d_lockref),
60397 dentry->d_sb->s_type->name,
60398 dentry->d_sb->s_id);
60399 WARN_ON(1);
60400@@ -1423,7 +1423,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60401 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60402 if (name->len > DNAME_INLINE_LEN-1) {
60403 size_t size = offsetof(struct external_name, name[1]);
60404- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
60405+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
60406 if (!p) {
60407 kmem_cache_free(dentry_cache, dentry);
60408 return NULL;
60409@@ -1443,7 +1443,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60410 smp_wmb();
60411 dentry->d_name.name = dname;
60412
60413- dentry->d_lockref.count = 1;
60414+ __lockref_set(&dentry->d_lockref, 1);
60415 dentry->d_flags = 0;
60416 spin_lock_init(&dentry->d_lock);
60417 seqcount_init(&dentry->d_seq);
60418@@ -1452,6 +1452,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60419 dentry->d_sb = sb;
60420 dentry->d_op = NULL;
60421 dentry->d_fsdata = NULL;
60422+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
60423+ atomic_set(&dentry->chroot_refcnt, 0);
60424+#endif
60425 INIT_HLIST_BL_NODE(&dentry->d_hash);
60426 INIT_LIST_HEAD(&dentry->d_lru);
60427 INIT_LIST_HEAD(&dentry->d_subdirs);
60428@@ -2151,7 +2154,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60429 goto next;
60430 }
60431
60432- dentry->d_lockref.count++;
60433+ __lockref_inc(&dentry->d_lockref);
60434 found = dentry;
60435 spin_unlock(&dentry->d_lock);
60436 break;
60437@@ -2250,7 +2253,7 @@ again:
60438 spin_lock(&dentry->d_lock);
60439 inode = dentry->d_inode;
60440 isdir = S_ISDIR(inode->i_mode);
60441- if (dentry->d_lockref.count == 1) {
60442+ if (__lockref_read(&dentry->d_lockref) == 1) {
60443 if (!spin_trylock(&inode->i_lock)) {
60444 spin_unlock(&dentry->d_lock);
60445 cpu_relax();
60446@@ -3203,7 +3206,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60447
60448 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60449 dentry->d_flags |= DCACHE_GENOCIDE;
60450- dentry->d_lockref.count--;
60451+ __lockref_dec(&dentry->d_lockref);
60452 }
60453 }
60454 return D_WALK_CONTINUE;
60455@@ -3319,7 +3322,8 @@ void __init vfs_caches_init(unsigned long mempages)
60456 mempages -= reserve;
60457
60458 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60459- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60460+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60461+ SLAB_NO_SANITIZE, NULL);
60462
60463 dcache_init();
60464 inode_init();
60465diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60466index 6f0ce53..780f4f8 100644
60467--- a/fs/debugfs/inode.c
60468+++ b/fs/debugfs/inode.c
60469@@ -425,7 +425,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
60470 */
60471 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60472 {
60473+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60474+ return __create_file(name, S_IFDIR | S_IRWXU,
60475+#else
60476 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
60477+#endif
60478 parent, NULL, NULL);
60479 }
60480 EXPORT_SYMBOL_GPL(debugfs_create_dir);
60481diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60482index 1686dc2..9611c50 100644
60483--- a/fs/ecryptfs/inode.c
60484+++ b/fs/ecryptfs/inode.c
60485@@ -664,7 +664,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60486 old_fs = get_fs();
60487 set_fs(get_ds());
60488 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60489- (char __user *)lower_buf,
60490+ (char __force_user *)lower_buf,
60491 PATH_MAX);
60492 set_fs(old_fs);
60493 if (rc < 0)
60494diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60495index e4141f2..d8263e8 100644
60496--- a/fs/ecryptfs/miscdev.c
60497+++ b/fs/ecryptfs/miscdev.c
60498@@ -304,7 +304,7 @@ check_list:
60499 goto out_unlock_msg_ctx;
60500 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60501 if (msg_ctx->msg) {
60502- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60503+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60504 goto out_unlock_msg_ctx;
60505 i += packet_length_size;
60506 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60507diff --git a/fs/exec.c b/fs/exec.c
60508index ad8798e..5f872c9 100644
60509--- a/fs/exec.c
60510+++ b/fs/exec.c
60511@@ -56,8 +56,20 @@
60512 #include <linux/pipe_fs_i.h>
60513 #include <linux/oom.h>
60514 #include <linux/compat.h>
60515+#include <linux/random.h>
60516+#include <linux/seq_file.h>
60517+#include <linux/coredump.h>
60518+#include <linux/mman.h>
60519+
60520+#ifdef CONFIG_PAX_REFCOUNT
60521+#include <linux/kallsyms.h>
60522+#include <linux/kdebug.h>
60523+#endif
60524+
60525+#include <trace/events/fs.h>
60526
60527 #include <asm/uaccess.h>
60528+#include <asm/sections.h>
60529 #include <asm/mmu_context.h>
60530 #include <asm/tlb.h>
60531
60532@@ -66,19 +78,34 @@
60533
60534 #include <trace/events/sched.h>
60535
60536+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60537+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
60538+{
60539+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
60540+}
60541+#endif
60542+
60543+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
60544+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60545+EXPORT_SYMBOL(pax_set_initial_flags_func);
60546+#endif
60547+
60548 int suid_dumpable = 0;
60549
60550 static LIST_HEAD(formats);
60551 static DEFINE_RWLOCK(binfmt_lock);
60552
60553+extern int gr_process_kernel_exec_ban(void);
60554+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
60555+
60556 void __register_binfmt(struct linux_binfmt * fmt, int insert)
60557 {
60558 BUG_ON(!fmt);
60559 if (WARN_ON(!fmt->load_binary))
60560 return;
60561 write_lock(&binfmt_lock);
60562- insert ? list_add(&fmt->lh, &formats) :
60563- list_add_tail(&fmt->lh, &formats);
60564+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
60565+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
60566 write_unlock(&binfmt_lock);
60567 }
60568
60569@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
60570 void unregister_binfmt(struct linux_binfmt * fmt)
60571 {
60572 write_lock(&binfmt_lock);
60573- list_del(&fmt->lh);
60574+ pax_list_del((struct list_head *)&fmt->lh);
60575 write_unlock(&binfmt_lock);
60576 }
60577
60578@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60579 int write)
60580 {
60581 struct page *page;
60582- int ret;
60583
60584-#ifdef CONFIG_STACK_GROWSUP
60585- if (write) {
60586- ret = expand_downwards(bprm->vma, pos);
60587- if (ret < 0)
60588- return NULL;
60589- }
60590-#endif
60591- ret = get_user_pages(current, bprm->mm, pos,
60592- 1, write, 1, &page, NULL);
60593- if (ret <= 0)
60594+ if (0 > expand_downwards(bprm->vma, pos))
60595+ return NULL;
60596+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
60597 return NULL;
60598
60599 if (write) {
60600@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60601 if (size <= ARG_MAX)
60602 return page;
60603
60604+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60605+ // only allow 512KB for argv+env on suid/sgid binaries
60606+ // to prevent easy ASLR exhaustion
60607+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
60608+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
60609+ (size > (512 * 1024))) {
60610+ put_page(page);
60611+ return NULL;
60612+ }
60613+#endif
60614+
60615 /*
60616 * Limit to 1/4-th the stack size for the argv+env strings.
60617 * This ensures that:
60618@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60619 vma->vm_end = STACK_TOP_MAX;
60620 vma->vm_start = vma->vm_end - PAGE_SIZE;
60621 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
60622+
60623+#ifdef CONFIG_PAX_SEGMEXEC
60624+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
60625+#endif
60626+
60627 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
60628 INIT_LIST_HEAD(&vma->anon_vma_chain);
60629
60630@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60631 arch_bprm_mm_init(mm, vma);
60632 up_write(&mm->mmap_sem);
60633 bprm->p = vma->vm_end - sizeof(void *);
60634+
60635+#ifdef CONFIG_PAX_RANDUSTACK
60636+ if (randomize_va_space)
60637+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
60638+#endif
60639+
60640 return 0;
60641 err:
60642 up_write(&mm->mmap_sem);
60643@@ -396,7 +437,7 @@ struct user_arg_ptr {
60644 } ptr;
60645 };
60646
60647-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60648+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60649 {
60650 const char __user *native;
60651
60652@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60653 compat_uptr_t compat;
60654
60655 if (get_user(compat, argv.ptr.compat + nr))
60656- return ERR_PTR(-EFAULT);
60657+ return (const char __force_user *)ERR_PTR(-EFAULT);
60658
60659 return compat_ptr(compat);
60660 }
60661 #endif
60662
60663 if (get_user(native, argv.ptr.native + nr))
60664- return ERR_PTR(-EFAULT);
60665+ return (const char __force_user *)ERR_PTR(-EFAULT);
60666
60667 return native;
60668 }
60669@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
60670 if (!p)
60671 break;
60672
60673- if (IS_ERR(p))
60674+ if (IS_ERR((const char __force_kernel *)p))
60675 return -EFAULT;
60676
60677 if (i >= max)
60678@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
60679
60680 ret = -EFAULT;
60681 str = get_user_arg_ptr(argv, argc);
60682- if (IS_ERR(str))
60683+ if (IS_ERR((const char __force_kernel *)str))
60684 goto out;
60685
60686 len = strnlen_user(str, MAX_ARG_STRLEN);
60687@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
60688 int r;
60689 mm_segment_t oldfs = get_fs();
60690 struct user_arg_ptr argv = {
60691- .ptr.native = (const char __user *const __user *)__argv,
60692+ .ptr.native = (const char __user * const __force_user *)__argv,
60693 };
60694
60695 set_fs(KERNEL_DS);
60696@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60697 unsigned long new_end = old_end - shift;
60698 struct mmu_gather tlb;
60699
60700- BUG_ON(new_start > new_end);
60701+ if (new_start >= new_end || new_start < mmap_min_addr)
60702+ return -ENOMEM;
60703
60704 /*
60705 * ensure there are no vmas between where we want to go
60706@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60707 if (vma != find_vma(mm, new_start))
60708 return -EFAULT;
60709
60710+#ifdef CONFIG_PAX_SEGMEXEC
60711+ BUG_ON(pax_find_mirror_vma(vma));
60712+#endif
60713+
60714 /*
60715 * cover the whole range: [new_start, old_end)
60716 */
60717@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60718 stack_top = arch_align_stack(stack_top);
60719 stack_top = PAGE_ALIGN(stack_top);
60720
60721- if (unlikely(stack_top < mmap_min_addr) ||
60722- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
60723- return -ENOMEM;
60724-
60725 stack_shift = vma->vm_end - stack_top;
60726
60727 bprm->p -= stack_shift;
60728@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
60729 bprm->exec -= stack_shift;
60730
60731 down_write(&mm->mmap_sem);
60732+
60733+ /* Move stack pages down in memory. */
60734+ if (stack_shift) {
60735+ ret = shift_arg_pages(vma, stack_shift);
60736+ if (ret)
60737+ goto out_unlock;
60738+ }
60739+
60740 vm_flags = VM_STACK_FLAGS;
60741
60742+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60743+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60744+ vm_flags &= ~VM_EXEC;
60745+
60746+#ifdef CONFIG_PAX_MPROTECT
60747+ if (mm->pax_flags & MF_PAX_MPROTECT)
60748+ vm_flags &= ~VM_MAYEXEC;
60749+#endif
60750+
60751+ }
60752+#endif
60753+
60754 /*
60755 * Adjust stack execute permissions; explicitly enable for
60756 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
60757@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60758 goto out_unlock;
60759 BUG_ON(prev != vma);
60760
60761- /* Move stack pages down in memory. */
60762- if (stack_shift) {
60763- ret = shift_arg_pages(vma, stack_shift);
60764- if (ret)
60765- goto out_unlock;
60766- }
60767-
60768 /* mprotect_fixup is overkill to remove the temporary stack flags */
60769 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
60770
60771@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
60772 #endif
60773 current->mm->start_stack = bprm->p;
60774 ret = expand_stack(vma, stack_base);
60775+
60776+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
60777+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
60778+ unsigned long size;
60779+ vm_flags_t vm_flags;
60780+
60781+ size = STACK_TOP - vma->vm_end;
60782+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
60783+
60784+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
60785+
60786+#ifdef CONFIG_X86
60787+ if (!ret) {
60788+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
60789+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
60790+ }
60791+#endif
60792+
60793+ }
60794+#endif
60795+
60796 if (ret)
60797 ret = -EFAULT;
60798
60799@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
60800 if (err)
60801 goto exit;
60802
60803- if (name->name[0] != '\0')
60804+ if (name->name[0] != '\0') {
60805 fsnotify_open(file);
60806+ trace_open_exec(name->name);
60807+ }
60808
60809 out:
60810 return file;
60811@@ -809,7 +887,7 @@ int kernel_read(struct file *file, loff_t offset,
60812 old_fs = get_fs();
60813 set_fs(get_ds());
60814 /* The cast to a user pointer is valid due to the set_fs() */
60815- result = vfs_read(file, (void __user *)addr, count, &pos);
60816+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
60817 set_fs(old_fs);
60818 return result;
60819 }
60820@@ -854,6 +932,7 @@ static int exec_mmap(struct mm_struct *mm)
60821 tsk->mm = mm;
60822 tsk->active_mm = mm;
60823 activate_mm(active_mm, mm);
60824+ populate_stack();
60825 tsk->mm->vmacache_seqnum = 0;
60826 vmacache_flush(tsk);
60827 task_unlock(tsk);
60828@@ -1252,7 +1331,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
60829 }
60830 rcu_read_unlock();
60831
60832- if (p->fs->users > n_fs)
60833+ if (atomic_read(&p->fs->users) > n_fs)
60834 bprm->unsafe |= LSM_UNSAFE_SHARE;
60835 else
60836 p->fs->in_exec = 1;
60837@@ -1433,6 +1512,31 @@ static int exec_binprm(struct linux_binprm *bprm)
60838 return ret;
60839 }
60840
60841+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60842+static DEFINE_PER_CPU(u64, exec_counter);
60843+static int __init init_exec_counters(void)
60844+{
60845+ unsigned int cpu;
60846+
60847+ for_each_possible_cpu(cpu) {
60848+ per_cpu(exec_counter, cpu) = (u64)cpu;
60849+ }
60850+
60851+ return 0;
60852+}
60853+early_initcall(init_exec_counters);
60854+static inline void increment_exec_counter(void)
60855+{
60856+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
60857+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
60858+}
60859+#else
60860+static inline void increment_exec_counter(void) {}
60861+#endif
60862+
60863+extern void gr_handle_exec_args(struct linux_binprm *bprm,
60864+ struct user_arg_ptr argv);
60865+
60866 /*
60867 * sys_execve() executes a new program.
60868 */
60869@@ -1441,6 +1545,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60870 struct user_arg_ptr envp,
60871 int flags)
60872 {
60873+#ifdef CONFIG_GRKERNSEC
60874+ struct file *old_exec_file;
60875+ struct acl_subject_label *old_acl;
60876+ struct rlimit old_rlim[RLIM_NLIMITS];
60877+#endif
60878 char *pathbuf = NULL;
60879 struct linux_binprm *bprm;
60880 struct file *file;
60881@@ -1450,6 +1559,8 @@ static int do_execveat_common(int fd, struct filename *filename,
60882 if (IS_ERR(filename))
60883 return PTR_ERR(filename);
60884
60885+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
60886+
60887 /*
60888 * We move the actual failure in case of RLIMIT_NPROC excess from
60889 * set*uid() to execve() because too many poorly written programs
60890@@ -1487,6 +1598,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60891 if (IS_ERR(file))
60892 goto out_unmark;
60893
60894+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
60895+ retval = -EPERM;
60896+ goto out_unmark;
60897+ }
60898+
60899 sched_exec();
60900
60901 bprm->file = file;
60902@@ -1513,6 +1629,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60903 }
60904 bprm->interp = bprm->filename;
60905
60906+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
60907+ retval = -EACCES;
60908+ goto out_unmark;
60909+ }
60910+
60911 retval = bprm_mm_init(bprm);
60912 if (retval)
60913 goto out_unmark;
60914@@ -1529,24 +1650,70 @@ static int do_execveat_common(int fd, struct filename *filename,
60915 if (retval < 0)
60916 goto out;
60917
60918+#ifdef CONFIG_GRKERNSEC
60919+ old_acl = current->acl;
60920+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
60921+ old_exec_file = current->exec_file;
60922+ get_file(file);
60923+ current->exec_file = file;
60924+#endif
60925+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60926+ /* limit suid stack to 8MB
60927+ * we saved the old limits above and will restore them if this exec fails
60928+ */
60929+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
60930+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
60931+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
60932+#endif
60933+
60934+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
60935+ retval = -EPERM;
60936+ goto out_fail;
60937+ }
60938+
60939+ if (!gr_tpe_allow(file)) {
60940+ retval = -EACCES;
60941+ goto out_fail;
60942+ }
60943+
60944+ if (gr_check_crash_exec(file)) {
60945+ retval = -EACCES;
60946+ goto out_fail;
60947+ }
60948+
60949+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
60950+ bprm->unsafe);
60951+ if (retval < 0)
60952+ goto out_fail;
60953+
60954 retval = copy_strings_kernel(1, &bprm->filename, bprm);
60955 if (retval < 0)
60956- goto out;
60957+ goto out_fail;
60958
60959 bprm->exec = bprm->p;
60960 retval = copy_strings(bprm->envc, envp, bprm);
60961 if (retval < 0)
60962- goto out;
60963+ goto out_fail;
60964
60965 retval = copy_strings(bprm->argc, argv, bprm);
60966 if (retval < 0)
60967- goto out;
60968+ goto out_fail;
60969+
60970+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
60971+
60972+ gr_handle_exec_args(bprm, argv);
60973
60974 retval = exec_binprm(bprm);
60975 if (retval < 0)
60976- goto out;
60977+ goto out_fail;
60978+#ifdef CONFIG_GRKERNSEC
60979+ if (old_exec_file)
60980+ fput(old_exec_file);
60981+#endif
60982
60983 /* execve succeeded */
60984+
60985+ increment_exec_counter();
60986 current->fs->in_exec = 0;
60987 current->in_execve = 0;
60988 acct_update_integrals(current);
60989@@ -1558,6 +1725,14 @@ static int do_execveat_common(int fd, struct filename *filename,
60990 put_files_struct(displaced);
60991 return retval;
60992
60993+out_fail:
60994+#ifdef CONFIG_GRKERNSEC
60995+ current->acl = old_acl;
60996+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
60997+ fput(current->exec_file);
60998+ current->exec_file = old_exec_file;
60999+#endif
61000+
61001 out:
61002 if (bprm->mm) {
61003 acct_arg_size(bprm, 0);
61004@@ -1704,3 +1879,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
61005 argv, envp, flags);
61006 }
61007 #endif
61008+
61009+int pax_check_flags(unsigned long *flags)
61010+{
61011+ int retval = 0;
61012+
61013+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61014+ if (*flags & MF_PAX_SEGMEXEC)
61015+ {
61016+ *flags &= ~MF_PAX_SEGMEXEC;
61017+ retval = -EINVAL;
61018+ }
61019+#endif
61020+
61021+ if ((*flags & MF_PAX_PAGEEXEC)
61022+
61023+#ifdef CONFIG_PAX_PAGEEXEC
61024+ && (*flags & MF_PAX_SEGMEXEC)
61025+#endif
61026+
61027+ )
61028+ {
61029+ *flags &= ~MF_PAX_PAGEEXEC;
61030+ retval = -EINVAL;
61031+ }
61032+
61033+ if ((*flags & MF_PAX_MPROTECT)
61034+
61035+#ifdef CONFIG_PAX_MPROTECT
61036+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61037+#endif
61038+
61039+ )
61040+ {
61041+ *flags &= ~MF_PAX_MPROTECT;
61042+ retval = -EINVAL;
61043+ }
61044+
61045+ if ((*flags & MF_PAX_EMUTRAMP)
61046+
61047+#ifdef CONFIG_PAX_EMUTRAMP
61048+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61049+#endif
61050+
61051+ )
61052+ {
61053+ *flags &= ~MF_PAX_EMUTRAMP;
61054+ retval = -EINVAL;
61055+ }
61056+
61057+ return retval;
61058+}
61059+
61060+EXPORT_SYMBOL(pax_check_flags);
61061+
61062+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61063+char *pax_get_path(const struct path *path, char *buf, int buflen)
61064+{
61065+ char *pathname = d_path(path, buf, buflen);
61066+
61067+ if (IS_ERR(pathname))
61068+ goto toolong;
61069+
61070+ pathname = mangle_path(buf, pathname, "\t\n\\");
61071+ if (!pathname)
61072+ goto toolong;
61073+
61074+ *pathname = 0;
61075+ return buf;
61076+
61077+toolong:
61078+ return "<path too long>";
61079+}
61080+EXPORT_SYMBOL(pax_get_path);
61081+
61082+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61083+{
61084+ struct task_struct *tsk = current;
61085+ struct mm_struct *mm = current->mm;
61086+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61087+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61088+ char *path_exec = NULL;
61089+ char *path_fault = NULL;
61090+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61091+ siginfo_t info = { };
61092+
61093+ if (buffer_exec && buffer_fault) {
61094+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61095+
61096+ down_read(&mm->mmap_sem);
61097+ vma = mm->mmap;
61098+ while (vma && (!vma_exec || !vma_fault)) {
61099+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61100+ vma_exec = vma;
61101+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61102+ vma_fault = vma;
61103+ vma = vma->vm_next;
61104+ }
61105+ if (vma_exec)
61106+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61107+ if (vma_fault) {
61108+ start = vma_fault->vm_start;
61109+ end = vma_fault->vm_end;
61110+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61111+ if (vma_fault->vm_file)
61112+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61113+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61114+ path_fault = "<heap>";
61115+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61116+ path_fault = "<stack>";
61117+ else
61118+ path_fault = "<anonymous mapping>";
61119+ }
61120+ up_read(&mm->mmap_sem);
61121+ }
61122+ if (tsk->signal->curr_ip)
61123+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61124+ else
61125+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61126+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61127+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61128+ free_page((unsigned long)buffer_exec);
61129+ free_page((unsigned long)buffer_fault);
61130+ pax_report_insns(regs, pc, sp);
61131+ info.si_signo = SIGKILL;
61132+ info.si_errno = 0;
61133+ info.si_code = SI_KERNEL;
61134+ info.si_pid = 0;
61135+ info.si_uid = 0;
61136+ do_coredump(&info);
61137+}
61138+#endif
61139+
61140+#ifdef CONFIG_PAX_REFCOUNT
61141+void pax_report_refcount_overflow(struct pt_regs *regs)
61142+{
61143+ if (current->signal->curr_ip)
61144+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61145+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61146+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61147+ else
61148+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61149+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61150+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61151+ preempt_disable();
61152+ show_regs(regs);
61153+ preempt_enable();
61154+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61155+}
61156+#endif
61157+
61158+#ifdef CONFIG_PAX_USERCOPY
61159+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61160+static noinline int check_stack_object(const void *obj, unsigned long len)
61161+{
61162+ const void * const stack = task_stack_page(current);
61163+ const void * const stackend = stack + THREAD_SIZE;
61164+
61165+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61166+ const void *frame = NULL;
61167+ const void *oldframe;
61168+#endif
61169+
61170+ if (obj + len < obj)
61171+ return -1;
61172+
61173+ if (obj + len <= stack || stackend <= obj)
61174+ return 0;
61175+
61176+ if (obj < stack || stackend < obj + len)
61177+ return -1;
61178+
61179+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61180+ oldframe = __builtin_frame_address(1);
61181+ if (oldframe)
61182+ frame = __builtin_frame_address(2);
61183+ /*
61184+ low ----------------------------------------------> high
61185+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61186+ ^----------------^
61187+ allow copies only within here
61188+ */
61189+ while (stack <= frame && frame < stackend) {
61190+ /* if obj + len extends past the last frame, this
61191+ check won't pass and the next frame will be 0,
61192+ causing us to bail out and correctly report
61193+ the copy as invalid
61194+ */
61195+ if (obj + len <= frame)
61196+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61197+ oldframe = frame;
61198+ frame = *(const void * const *)frame;
61199+ }
61200+ return -1;
61201+#else
61202+ return 1;
61203+#endif
61204+}
61205+
61206+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61207+{
61208+ if (current->signal->curr_ip)
61209+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61210+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61211+ else
61212+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61213+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61214+ dump_stack();
61215+ gr_handle_kernel_exploit();
61216+ do_group_exit(SIGKILL);
61217+}
61218+#endif
61219+
61220+#ifdef CONFIG_PAX_USERCOPY
61221+
61222+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61223+{
61224+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61225+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61226+#ifdef CONFIG_MODULES
61227+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61228+#else
61229+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61230+#endif
61231+
61232+#else
61233+ unsigned long textlow = (unsigned long)_stext;
61234+ unsigned long texthigh = (unsigned long)_etext;
61235+
61236+#ifdef CONFIG_X86_64
61237+ /* check against linear mapping as well */
61238+ if (high > (unsigned long)__va(__pa(textlow)) &&
61239+ low < (unsigned long)__va(__pa(texthigh)))
61240+ return true;
61241+#endif
61242+
61243+#endif
61244+
61245+ if (high <= textlow || low >= texthigh)
61246+ return false;
61247+ else
61248+ return true;
61249+}
61250+#endif
61251+
61252+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61253+{
61254+#ifdef CONFIG_PAX_USERCOPY
61255+ const char *type;
61256+#endif
61257+
61258+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
61259+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61260+ unsigned long currentsp = (unsigned long)&stackstart;
61261+ if (unlikely((currentsp < stackstart + 512 ||
61262+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61263+ BUG();
61264+#endif
61265+
61266+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61267+ if (const_size)
61268+ return;
61269+#endif
61270+
61271+#ifdef CONFIG_PAX_USERCOPY
61272+ if (!n)
61273+ return;
61274+
61275+ type = check_heap_object(ptr, n);
61276+ if (!type) {
61277+ int ret = check_stack_object(ptr, n);
61278+ if (ret == 1 || ret == 2)
61279+ return;
61280+ if (ret == 0) {
61281+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61282+ type = "<kernel text>";
61283+ else
61284+ return;
61285+ } else
61286+ type = "<process stack>";
61287+ }
61288+
61289+ pax_report_usercopy(ptr, n, to_user, type);
61290+#endif
61291+
61292+}
61293+EXPORT_SYMBOL(__check_object_size);
61294+
61295+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61296+void pax_track_stack(void)
61297+{
61298+ unsigned long sp = (unsigned long)&sp;
61299+ if (sp < current_thread_info()->lowest_stack &&
61300+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
61301+ current_thread_info()->lowest_stack = sp;
61302+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61303+ BUG();
61304+}
61305+EXPORT_SYMBOL(pax_track_stack);
61306+#endif
61307+
61308+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61309+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61310+{
61311+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61312+ dump_stack();
61313+ do_group_exit(SIGKILL);
61314+}
61315+EXPORT_SYMBOL(report_size_overflow);
61316+#endif
61317diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61318index 9f9992b..8b59411 100644
61319--- a/fs/ext2/balloc.c
61320+++ b/fs/ext2/balloc.c
61321@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61322
61323 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61324 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61325- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61326+ if (free_blocks < root_blocks + 1 &&
61327 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61328 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61329- !in_group_p (sbi->s_resgid))) {
61330+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61331 return 0;
61332 }
61333 return 1;
61334diff --git a/fs/ext2/super.c b/fs/ext2/super.c
61335index ae55fdd..5e64c27 100644
61336--- a/fs/ext2/super.c
61337+++ b/fs/ext2/super.c
61338@@ -268,10 +268,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
61339 #ifdef CONFIG_EXT2_FS_XATTR
61340 if (test_opt(sb, XATTR_USER))
61341 seq_puts(seq, ",user_xattr");
61342- if (!test_opt(sb, XATTR_USER) &&
61343- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
61344+ if (!test_opt(sb, XATTR_USER))
61345 seq_puts(seq, ",nouser_xattr");
61346- }
61347 #endif
61348
61349 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61350@@ -850,8 +848,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
61351 if (def_mount_opts & EXT2_DEFM_UID16)
61352 set_opt(sbi->s_mount_opt, NO_UID32);
61353 #ifdef CONFIG_EXT2_FS_XATTR
61354- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
61355- set_opt(sbi->s_mount_opt, XATTR_USER);
61356+ /* always enable user xattrs */
61357+ set_opt(sbi->s_mount_opt, XATTR_USER);
61358 #endif
61359 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61360 if (def_mount_opts & EXT2_DEFM_ACL)
61361diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61362index 9142614..97484fa 100644
61363--- a/fs/ext2/xattr.c
61364+++ b/fs/ext2/xattr.c
61365@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61366 struct buffer_head *bh = NULL;
61367 struct ext2_xattr_entry *entry;
61368 char *end;
61369- size_t rest = buffer_size;
61370+ size_t rest = buffer_size, total_size = 0;
61371 int error;
61372
61373 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61374@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61375 buffer += size;
61376 }
61377 rest -= size;
61378+ total_size += size;
61379 }
61380 }
61381- error = buffer_size - rest; /* total size */
61382+ error = total_size;
61383
61384 cleanup:
61385 brelse(bh);
61386diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61387index 158b5d4..2432610 100644
61388--- a/fs/ext3/balloc.c
61389+++ b/fs/ext3/balloc.c
61390@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61391
61392 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61393 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61394- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61395+ if (free_blocks < root_blocks + 1 &&
61396 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61397 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61398- !in_group_p (sbi->s_resgid))) {
61399+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61400 return 0;
61401 }
61402 return 1;
61403diff --git a/fs/ext3/super.c b/fs/ext3/super.c
61404index 9b4e7d7..048d025 100644
61405--- a/fs/ext3/super.c
61406+++ b/fs/ext3/super.c
61407@@ -653,10 +653,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
61408 #ifdef CONFIG_EXT3_FS_XATTR
61409 if (test_opt(sb, XATTR_USER))
61410 seq_puts(seq, ",user_xattr");
61411- if (!test_opt(sb, XATTR_USER) &&
61412- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
61413+ if (!test_opt(sb, XATTR_USER))
61414 seq_puts(seq, ",nouser_xattr");
61415- }
61416 #endif
61417 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61418 if (test_opt(sb, POSIX_ACL))
61419@@ -1758,8 +1756,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
61420 if (def_mount_opts & EXT3_DEFM_UID16)
61421 set_opt(sbi->s_mount_opt, NO_UID32);
61422 #ifdef CONFIG_EXT3_FS_XATTR
61423- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
61424- set_opt(sbi->s_mount_opt, XATTR_USER);
61425+ /* always enable user xattrs */
61426+ set_opt(sbi->s_mount_opt, XATTR_USER);
61427 #endif
61428 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61429 if (def_mount_opts & EXT3_DEFM_ACL)
61430diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61431index c6874be..f8a6ae8 100644
61432--- a/fs/ext3/xattr.c
61433+++ b/fs/ext3/xattr.c
61434@@ -330,7 +330,7 @@ static int
61435 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61436 char *buffer, size_t buffer_size)
61437 {
61438- size_t rest = buffer_size;
61439+ size_t rest = buffer_size, total_size = 0;
61440
61441 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61442 const struct xattr_handler *handler =
61443@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61444 buffer += size;
61445 }
61446 rest -= size;
61447+ total_size += size;
61448 }
61449 }
61450- return buffer_size - rest;
61451+ return total_size;
61452 }
61453
61454 static int
61455diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61456index 83a6f49..d4e4d03 100644
61457--- a/fs/ext4/balloc.c
61458+++ b/fs/ext4/balloc.c
61459@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61460 /* Hm, nope. Are (enough) root reserved clusters available? */
61461 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61462 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61463- capable(CAP_SYS_RESOURCE) ||
61464- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61465+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61466+ capable_nolog(CAP_SYS_RESOURCE)) {
61467
61468 if (free_clusters >= (nclusters + dirty_clusters +
61469 resv_clusters))
61470diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61471index a75fba6..8235fca 100644
61472--- a/fs/ext4/ext4.h
61473+++ b/fs/ext4/ext4.h
61474@@ -1274,19 +1274,19 @@ struct ext4_sb_info {
61475 unsigned long s_mb_last_start;
61476
61477 /* stats for buddy allocator */
61478- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61479- atomic_t s_bal_success; /* we found long enough chunks */
61480- atomic_t s_bal_allocated; /* in blocks */
61481- atomic_t s_bal_ex_scanned; /* total extents scanned */
61482- atomic_t s_bal_goals; /* goal hits */
61483- atomic_t s_bal_breaks; /* too long searches */
61484- atomic_t s_bal_2orders; /* 2^order hits */
61485+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61486+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61487+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61488+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61489+ atomic_unchecked_t s_bal_goals; /* goal hits */
61490+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61491+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61492 spinlock_t s_bal_lock;
61493 unsigned long s_mb_buddies_generated;
61494 unsigned long long s_mb_generation_time;
61495- atomic_t s_mb_lost_chunks;
61496- atomic_t s_mb_preallocated;
61497- atomic_t s_mb_discarded;
61498+ atomic_unchecked_t s_mb_lost_chunks;
61499+ atomic_unchecked_t s_mb_preallocated;
61500+ atomic_unchecked_t s_mb_discarded;
61501 atomic_t s_lock_busy;
61502
61503 /* locality groups */
61504diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61505index 8d1e602..abf497b 100644
61506--- a/fs/ext4/mballoc.c
61507+++ b/fs/ext4/mballoc.c
61508@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
61509 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
61510
61511 if (EXT4_SB(sb)->s_mb_stats)
61512- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
61513+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
61514
61515 break;
61516 }
61517@@ -2211,7 +2211,7 @@ repeat:
61518 ac->ac_status = AC_STATUS_CONTINUE;
61519 ac->ac_flags |= EXT4_MB_HINT_FIRST;
61520 cr = 3;
61521- atomic_inc(&sbi->s_mb_lost_chunks);
61522+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
61523 goto repeat;
61524 }
61525 }
61526@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
61527 if (sbi->s_mb_stats) {
61528 ext4_msg(sb, KERN_INFO,
61529 "mballoc: %u blocks %u reqs (%u success)",
61530- atomic_read(&sbi->s_bal_allocated),
61531- atomic_read(&sbi->s_bal_reqs),
61532- atomic_read(&sbi->s_bal_success));
61533+ atomic_read_unchecked(&sbi->s_bal_allocated),
61534+ atomic_read_unchecked(&sbi->s_bal_reqs),
61535+ atomic_read_unchecked(&sbi->s_bal_success));
61536 ext4_msg(sb, KERN_INFO,
61537 "mballoc: %u extents scanned, %u goal hits, "
61538 "%u 2^N hits, %u breaks, %u lost",
61539- atomic_read(&sbi->s_bal_ex_scanned),
61540- atomic_read(&sbi->s_bal_goals),
61541- atomic_read(&sbi->s_bal_2orders),
61542- atomic_read(&sbi->s_bal_breaks),
61543- atomic_read(&sbi->s_mb_lost_chunks));
61544+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
61545+ atomic_read_unchecked(&sbi->s_bal_goals),
61546+ atomic_read_unchecked(&sbi->s_bal_2orders),
61547+ atomic_read_unchecked(&sbi->s_bal_breaks),
61548+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
61549 ext4_msg(sb, KERN_INFO,
61550 "mballoc: %lu generated and it took %Lu",
61551 sbi->s_mb_buddies_generated,
61552 sbi->s_mb_generation_time);
61553 ext4_msg(sb, KERN_INFO,
61554 "mballoc: %u preallocated, %u discarded",
61555- atomic_read(&sbi->s_mb_preallocated),
61556- atomic_read(&sbi->s_mb_discarded));
61557+ atomic_read_unchecked(&sbi->s_mb_preallocated),
61558+ atomic_read_unchecked(&sbi->s_mb_discarded));
61559 }
61560
61561 free_percpu(sbi->s_locality_groups);
61562@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
61563 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
61564
61565 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
61566- atomic_inc(&sbi->s_bal_reqs);
61567- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61568+ atomic_inc_unchecked(&sbi->s_bal_reqs);
61569+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61570 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
61571- atomic_inc(&sbi->s_bal_success);
61572- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
61573+ atomic_inc_unchecked(&sbi->s_bal_success);
61574+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
61575 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
61576 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
61577- atomic_inc(&sbi->s_bal_goals);
61578+ atomic_inc_unchecked(&sbi->s_bal_goals);
61579 if (ac->ac_found > sbi->s_mb_max_to_scan)
61580- atomic_inc(&sbi->s_bal_breaks);
61581+ atomic_inc_unchecked(&sbi->s_bal_breaks);
61582 }
61583
61584 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
61585@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
61586 trace_ext4_mb_new_inode_pa(ac, pa);
61587
61588 ext4_mb_use_inode_pa(ac, pa);
61589- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
61590+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
61591
61592 ei = EXT4_I(ac->ac_inode);
61593 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61594@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
61595 trace_ext4_mb_new_group_pa(ac, pa);
61596
61597 ext4_mb_use_group_pa(ac, pa);
61598- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61599+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61600
61601 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61602 lg = ac->ac_lg;
61603@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
61604 * from the bitmap and continue.
61605 */
61606 }
61607- atomic_add(free, &sbi->s_mb_discarded);
61608+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
61609
61610 return err;
61611 }
61612@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
61613 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
61614 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
61615 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
61616- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61617+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61618 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
61619
61620 return 0;
61621diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
61622index 8313ca3..8a37d08 100644
61623--- a/fs/ext4/mmp.c
61624+++ b/fs/ext4/mmp.c
61625@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
61626 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
61627 const char *function, unsigned int line, const char *msg)
61628 {
61629- __ext4_warning(sb, function, line, msg);
61630+ __ext4_warning(sb, function, line, "%s", msg);
61631 __ext4_warning(sb, function, line,
61632 "MMP failure info: last update time: %llu, last update "
61633 "node: %s, last update device: %s\n",
61634diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
61635index 8a8ec62..1b02de5 100644
61636--- a/fs/ext4/resize.c
61637+++ b/fs/ext4/resize.c
61638@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61639
61640 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
61641 for (count2 = count; count > 0; count -= count2, block += count2) {
61642- ext4_fsblk_t start;
61643+ ext4_fsblk_t start, diff;
61644 struct buffer_head *bh;
61645 ext4_group_t group;
61646 int err;
61647@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61648 start = ext4_group_first_block_no(sb, group);
61649 group -= flex_gd->groups[0].group;
61650
61651- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
61652- if (count2 > count)
61653- count2 = count;
61654-
61655 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
61656 BUG_ON(flex_gd->count > 1);
61657 continue;
61658@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61659 err = ext4_journal_get_write_access(handle, bh);
61660 if (err)
61661 return err;
61662+
61663+ diff = block - start;
61664+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
61665+ if (count2 > count)
61666+ count2 = count;
61667+
61668 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
61669- block - start, count2);
61670- ext4_set_bits(bh->b_data, block - start, count2);
61671+ diff, count2);
61672+ ext4_set_bits(bh->b_data, diff, count2);
61673
61674 err = ext4_handle_dirty_metadata(handle, NULL, bh);
61675 if (unlikely(err))
61676diff --git a/fs/ext4/super.c b/fs/ext4/super.c
61677index fc29b2c..6c8b255 100644
61678--- a/fs/ext4/super.c
61679+++ b/fs/ext4/super.c
61680@@ -1252,7 +1252,7 @@ static ext4_fsblk_t get_sb_block(void **data)
61681 }
61682
61683 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
61684-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61685+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61686 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
61687
61688 #ifdef CONFIG_QUOTA
61689@@ -2440,7 +2440,7 @@ struct ext4_attr {
61690 int offset;
61691 int deprecated_val;
61692 } u;
61693-};
61694+} __do_const;
61695
61696 static int parse_strtoull(const char *buf,
61697 unsigned long long max, unsigned long long *value)
61698diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
61699index 1e09fc7..0400dd4 100644
61700--- a/fs/ext4/xattr.c
61701+++ b/fs/ext4/xattr.c
61702@@ -399,7 +399,7 @@ static int
61703 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61704 char *buffer, size_t buffer_size)
61705 {
61706- size_t rest = buffer_size;
61707+ size_t rest = buffer_size, total_size = 0;
61708
61709 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
61710 const struct xattr_handler *handler =
61711@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61712 buffer += size;
61713 }
61714 rest -= size;
61715+ total_size += size;
61716 }
61717 }
61718- return buffer_size - rest;
61719+ return total_size;
61720 }
61721
61722 static int
61723diff --git a/fs/fcntl.c b/fs/fcntl.c
61724index ee85cd4..9dd0d20 100644
61725--- a/fs/fcntl.c
61726+++ b/fs/fcntl.c
61727@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
61728 int force)
61729 {
61730 security_file_set_fowner(filp);
61731+ if (gr_handle_chroot_fowner(pid, type))
61732+ return;
61733+ if (gr_check_protected_task_fowner(pid, type))
61734+ return;
61735 f_modown(filp, pid, type, force);
61736 }
61737 EXPORT_SYMBOL(__f_setown);
61738diff --git a/fs/fhandle.c b/fs/fhandle.c
61739index 999ff5c..2281df9 100644
61740--- a/fs/fhandle.c
61741+++ b/fs/fhandle.c
61742@@ -8,6 +8,7 @@
61743 #include <linux/fs_struct.h>
61744 #include <linux/fsnotify.h>
61745 #include <linux/personality.h>
61746+#include <linux/grsecurity.h>
61747 #include <asm/uaccess.h>
61748 #include "internal.h"
61749 #include "mount.h"
61750@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
61751 } else
61752 retval = 0;
61753 /* copy the mount id */
61754- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
61755- sizeof(*mnt_id)) ||
61756+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
61757 copy_to_user(ufh, handle,
61758 sizeof(struct file_handle) + handle_bytes))
61759 retval = -EFAULT;
61760@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61761 * the directory. Ideally we would like CAP_DAC_SEARCH.
61762 * But we don't have that
61763 */
61764- if (!capable(CAP_DAC_READ_SEARCH)) {
61765+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
61766 retval = -EPERM;
61767 goto out_err;
61768 }
61769@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61770 goto out_err;
61771 }
61772 /* copy the full handle */
61773- if (copy_from_user(handle, ufh,
61774- sizeof(struct file_handle) +
61775+ *handle = f_handle;
61776+ if (copy_from_user(&handle->f_handle,
61777+ &ufh->f_handle,
61778 f_handle.handle_bytes)) {
61779 retval = -EFAULT;
61780 goto out_handle;
61781diff --git a/fs/file.c b/fs/file.c
61782index ee738ea..f6c1562 100644
61783--- a/fs/file.c
61784+++ b/fs/file.c
61785@@ -16,6 +16,7 @@
61786 #include <linux/slab.h>
61787 #include <linux/vmalloc.h>
61788 #include <linux/file.h>
61789+#include <linux/security.h>
61790 #include <linux/fdtable.h>
61791 #include <linux/bitops.h>
61792 #include <linux/interrupt.h>
61793@@ -139,7 +140,7 @@ out:
61794 * Return <0 error code on error; 1 on successful completion.
61795 * The files->file_lock should be held on entry, and will be held on exit.
61796 */
61797-static int expand_fdtable(struct files_struct *files, int nr)
61798+static int expand_fdtable(struct files_struct *files, unsigned int nr)
61799 __releases(files->file_lock)
61800 __acquires(files->file_lock)
61801 {
61802@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
61803 * expanded and execution may have blocked.
61804 * The files->file_lock should be held on entry, and will be held on exit.
61805 */
61806-static int expand_files(struct files_struct *files, int nr)
61807+static int expand_files(struct files_struct *files, unsigned int nr)
61808 {
61809 struct fdtable *fdt;
61810
61811@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
61812 if (!file)
61813 return __close_fd(files, fd);
61814
61815+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
61816 if (fd >= rlimit(RLIMIT_NOFILE))
61817 return -EBADF;
61818
61819@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
61820 if (unlikely(oldfd == newfd))
61821 return -EINVAL;
61822
61823+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
61824 if (newfd >= rlimit(RLIMIT_NOFILE))
61825 return -EBADF;
61826
61827@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
61828 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
61829 {
61830 int err;
61831+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
61832 if (from >= rlimit(RLIMIT_NOFILE))
61833 return -EINVAL;
61834 err = alloc_fd(from, flags);
61835diff --git a/fs/filesystems.c b/fs/filesystems.c
61836index 5797d45..7d7d79a 100644
61837--- a/fs/filesystems.c
61838+++ b/fs/filesystems.c
61839@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
61840 int len = dot ? dot - name : strlen(name);
61841
61842 fs = __get_fs_type(name, len);
61843+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61844+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
61845+#else
61846 if (!fs && (request_module("fs-%.*s", len, name) == 0))
61847+#endif
61848 fs = __get_fs_type(name, len);
61849
61850 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
61851diff --git a/fs/fs_struct.c b/fs/fs_struct.c
61852index 7dca743..2f2786d 100644
61853--- a/fs/fs_struct.c
61854+++ b/fs/fs_struct.c
61855@@ -4,6 +4,7 @@
61856 #include <linux/path.h>
61857 #include <linux/slab.h>
61858 #include <linux/fs_struct.h>
61859+#include <linux/grsecurity.h>
61860 #include "internal.h"
61861
61862 /*
61863@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
61864 struct path old_root;
61865
61866 path_get(path);
61867+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
61868 spin_lock(&fs->lock);
61869 write_seqcount_begin(&fs->seq);
61870 old_root = fs->root;
61871 fs->root = *path;
61872+ gr_set_chroot_entries(current, path);
61873 write_seqcount_end(&fs->seq);
61874 spin_unlock(&fs->lock);
61875- if (old_root.dentry)
61876+ if (old_root.dentry) {
61877+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
61878 path_put(&old_root);
61879+ }
61880 }
61881
61882 /*
61883@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
61884 int hits = 0;
61885 spin_lock(&fs->lock);
61886 write_seqcount_begin(&fs->seq);
61887+ /* this root replacement is only done by pivot_root,
61888+ leave grsec's chroot tagging alone for this task
61889+ so that a pivoted root isn't treated as a chroot
61890+ */
61891 hits += replace_path(&fs->root, old_root, new_root);
61892 hits += replace_path(&fs->pwd, old_root, new_root);
61893 write_seqcount_end(&fs->seq);
61894@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
61895
61896 void free_fs_struct(struct fs_struct *fs)
61897 {
61898+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
61899 path_put(&fs->root);
61900 path_put(&fs->pwd);
61901 kmem_cache_free(fs_cachep, fs);
61902@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
61903 task_lock(tsk);
61904 spin_lock(&fs->lock);
61905 tsk->fs = NULL;
61906- kill = !--fs->users;
61907+ gr_clear_chroot_entries(tsk);
61908+ kill = !atomic_dec_return(&fs->users);
61909 spin_unlock(&fs->lock);
61910 task_unlock(tsk);
61911 if (kill)
61912@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
61913 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
61914 /* We don't need to lock fs - think why ;-) */
61915 if (fs) {
61916- fs->users = 1;
61917+ atomic_set(&fs->users, 1);
61918 fs->in_exec = 0;
61919 spin_lock_init(&fs->lock);
61920 seqcount_init(&fs->seq);
61921@@ -121,6 +132,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
61922 spin_lock(&old->lock);
61923 fs->root = old->root;
61924 path_get(&fs->root);
61925+ /* instead of calling gr_set_chroot_entries here,
61926+ we call it from every caller of this function
61927+ */
61928 fs->pwd = old->pwd;
61929 path_get(&fs->pwd);
61930 spin_unlock(&old->lock);
61931@@ -139,8 +153,9 @@ int unshare_fs_struct(void)
61932
61933 task_lock(current);
61934 spin_lock(&fs->lock);
61935- kill = !--fs->users;
61936+ kill = !atomic_dec_return(&fs->users);
61937 current->fs = new_fs;
61938+ gr_set_chroot_entries(current, &new_fs->root);
61939 spin_unlock(&fs->lock);
61940 task_unlock(current);
61941
61942@@ -153,13 +168,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
61943
61944 int current_umask(void)
61945 {
61946- return current->fs->umask;
61947+ return current->fs->umask | gr_acl_umask();
61948 }
61949 EXPORT_SYMBOL(current_umask);
61950
61951 /* to be mentioned only in INIT_TASK */
61952 struct fs_struct init_fs = {
61953- .users = 1,
61954+ .users = ATOMIC_INIT(1),
61955 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
61956 .seq = SEQCNT_ZERO(init_fs.seq),
61957 .umask = 0022,
61958diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
61959index 89acec7..a575262 100644
61960--- a/fs/fscache/cookie.c
61961+++ b/fs/fscache/cookie.c
61962@@ -19,7 +19,7 @@
61963
61964 struct kmem_cache *fscache_cookie_jar;
61965
61966-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
61967+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
61968
61969 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
61970 static int fscache_alloc_object(struct fscache_cache *cache,
61971@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
61972 parent ? (char *) parent->def->name : "<no-parent>",
61973 def->name, netfs_data, enable);
61974
61975- fscache_stat(&fscache_n_acquires);
61976+ fscache_stat_unchecked(&fscache_n_acquires);
61977
61978 /* if there's no parent cookie, then we don't create one here either */
61979 if (!parent) {
61980- fscache_stat(&fscache_n_acquires_null);
61981+ fscache_stat_unchecked(&fscache_n_acquires_null);
61982 _leave(" [no parent]");
61983 return NULL;
61984 }
61985@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
61986 /* allocate and initialise a cookie */
61987 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
61988 if (!cookie) {
61989- fscache_stat(&fscache_n_acquires_oom);
61990+ fscache_stat_unchecked(&fscache_n_acquires_oom);
61991 _leave(" [ENOMEM]");
61992 return NULL;
61993 }
61994@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
61995
61996 switch (cookie->def->type) {
61997 case FSCACHE_COOKIE_TYPE_INDEX:
61998- fscache_stat(&fscache_n_cookie_index);
61999+ fscache_stat_unchecked(&fscache_n_cookie_index);
62000 break;
62001 case FSCACHE_COOKIE_TYPE_DATAFILE:
62002- fscache_stat(&fscache_n_cookie_data);
62003+ fscache_stat_unchecked(&fscache_n_cookie_data);
62004 break;
62005 default:
62006- fscache_stat(&fscache_n_cookie_special);
62007+ fscache_stat_unchecked(&fscache_n_cookie_special);
62008 break;
62009 }
62010
62011@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62012 } else {
62013 atomic_dec(&parent->n_children);
62014 __fscache_cookie_put(cookie);
62015- fscache_stat(&fscache_n_acquires_nobufs);
62016+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62017 _leave(" = NULL");
62018 return NULL;
62019 }
62020@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62021 }
62022 }
62023
62024- fscache_stat(&fscache_n_acquires_ok);
62025+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62026 _leave(" = %p", cookie);
62027 return cookie;
62028 }
62029@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62030 cache = fscache_select_cache_for_object(cookie->parent);
62031 if (!cache) {
62032 up_read(&fscache_addremove_sem);
62033- fscache_stat(&fscache_n_acquires_no_cache);
62034+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62035 _leave(" = -ENOMEDIUM [no cache]");
62036 return -ENOMEDIUM;
62037 }
62038@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62039 object = cache->ops->alloc_object(cache, cookie);
62040 fscache_stat_d(&fscache_n_cop_alloc_object);
62041 if (IS_ERR(object)) {
62042- fscache_stat(&fscache_n_object_no_alloc);
62043+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62044 ret = PTR_ERR(object);
62045 goto error;
62046 }
62047
62048- fscache_stat(&fscache_n_object_alloc);
62049+ fscache_stat_unchecked(&fscache_n_object_alloc);
62050
62051- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62052+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62053
62054 _debug("ALLOC OBJ%x: %s {%lx}",
62055 object->debug_id, cookie->def->name, object->events);
62056@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62057
62058 _enter("{%s}", cookie->def->name);
62059
62060- fscache_stat(&fscache_n_invalidates);
62061+ fscache_stat_unchecked(&fscache_n_invalidates);
62062
62063 /* Only permit invalidation of data files. Invalidating an index will
62064 * require the caller to release all its attachments to the tree rooted
62065@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62066 {
62067 struct fscache_object *object;
62068
62069- fscache_stat(&fscache_n_updates);
62070+ fscache_stat_unchecked(&fscache_n_updates);
62071
62072 if (!cookie) {
62073- fscache_stat(&fscache_n_updates_null);
62074+ fscache_stat_unchecked(&fscache_n_updates_null);
62075 _leave(" [no cookie]");
62076 return;
62077 }
62078@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62079 */
62080 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62081 {
62082- fscache_stat(&fscache_n_relinquishes);
62083+ fscache_stat_unchecked(&fscache_n_relinquishes);
62084 if (retire)
62085- fscache_stat(&fscache_n_relinquishes_retire);
62086+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62087
62088 if (!cookie) {
62089- fscache_stat(&fscache_n_relinquishes_null);
62090+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62091 _leave(" [no cookie]");
62092 return;
62093 }
62094@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62095 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62096 goto inconsistent;
62097
62098- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62099+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62100
62101 __fscache_use_cookie(cookie);
62102 if (fscache_submit_op(object, op) < 0)
62103diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62104index 7872a62..d91b19f 100644
62105--- a/fs/fscache/internal.h
62106+++ b/fs/fscache/internal.h
62107@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62108 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62109 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62110 struct fscache_operation *,
62111- atomic_t *,
62112- atomic_t *,
62113+ atomic_unchecked_t *,
62114+ atomic_unchecked_t *,
62115 void (*)(struct fscache_operation *));
62116 extern void fscache_invalidate_writes(struct fscache_cookie *);
62117
62118@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62119 * stats.c
62120 */
62121 #ifdef CONFIG_FSCACHE_STATS
62122-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62123-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62124+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62125+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62126
62127-extern atomic_t fscache_n_op_pend;
62128-extern atomic_t fscache_n_op_run;
62129-extern atomic_t fscache_n_op_enqueue;
62130-extern atomic_t fscache_n_op_deferred_release;
62131-extern atomic_t fscache_n_op_release;
62132-extern atomic_t fscache_n_op_gc;
62133-extern atomic_t fscache_n_op_cancelled;
62134-extern atomic_t fscache_n_op_rejected;
62135+extern atomic_unchecked_t fscache_n_op_pend;
62136+extern atomic_unchecked_t fscache_n_op_run;
62137+extern atomic_unchecked_t fscache_n_op_enqueue;
62138+extern atomic_unchecked_t fscache_n_op_deferred_release;
62139+extern atomic_unchecked_t fscache_n_op_release;
62140+extern atomic_unchecked_t fscache_n_op_gc;
62141+extern atomic_unchecked_t fscache_n_op_cancelled;
62142+extern atomic_unchecked_t fscache_n_op_rejected;
62143
62144-extern atomic_t fscache_n_attr_changed;
62145-extern atomic_t fscache_n_attr_changed_ok;
62146-extern atomic_t fscache_n_attr_changed_nobufs;
62147-extern atomic_t fscache_n_attr_changed_nomem;
62148-extern atomic_t fscache_n_attr_changed_calls;
62149+extern atomic_unchecked_t fscache_n_attr_changed;
62150+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62151+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62152+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62153+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62154
62155-extern atomic_t fscache_n_allocs;
62156-extern atomic_t fscache_n_allocs_ok;
62157-extern atomic_t fscache_n_allocs_wait;
62158-extern atomic_t fscache_n_allocs_nobufs;
62159-extern atomic_t fscache_n_allocs_intr;
62160-extern atomic_t fscache_n_allocs_object_dead;
62161-extern atomic_t fscache_n_alloc_ops;
62162-extern atomic_t fscache_n_alloc_op_waits;
62163+extern atomic_unchecked_t fscache_n_allocs;
62164+extern atomic_unchecked_t fscache_n_allocs_ok;
62165+extern atomic_unchecked_t fscache_n_allocs_wait;
62166+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62167+extern atomic_unchecked_t fscache_n_allocs_intr;
62168+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62169+extern atomic_unchecked_t fscache_n_alloc_ops;
62170+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62171
62172-extern atomic_t fscache_n_retrievals;
62173-extern atomic_t fscache_n_retrievals_ok;
62174-extern atomic_t fscache_n_retrievals_wait;
62175-extern atomic_t fscache_n_retrievals_nodata;
62176-extern atomic_t fscache_n_retrievals_nobufs;
62177-extern atomic_t fscache_n_retrievals_intr;
62178-extern atomic_t fscache_n_retrievals_nomem;
62179-extern atomic_t fscache_n_retrievals_object_dead;
62180-extern atomic_t fscache_n_retrieval_ops;
62181-extern atomic_t fscache_n_retrieval_op_waits;
62182+extern atomic_unchecked_t fscache_n_retrievals;
62183+extern atomic_unchecked_t fscache_n_retrievals_ok;
62184+extern atomic_unchecked_t fscache_n_retrievals_wait;
62185+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62186+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62187+extern atomic_unchecked_t fscache_n_retrievals_intr;
62188+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62189+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62190+extern atomic_unchecked_t fscache_n_retrieval_ops;
62191+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62192
62193-extern atomic_t fscache_n_stores;
62194-extern atomic_t fscache_n_stores_ok;
62195-extern atomic_t fscache_n_stores_again;
62196-extern atomic_t fscache_n_stores_nobufs;
62197-extern atomic_t fscache_n_stores_oom;
62198-extern atomic_t fscache_n_store_ops;
62199-extern atomic_t fscache_n_store_calls;
62200-extern atomic_t fscache_n_store_pages;
62201-extern atomic_t fscache_n_store_radix_deletes;
62202-extern atomic_t fscache_n_store_pages_over_limit;
62203+extern atomic_unchecked_t fscache_n_stores;
62204+extern atomic_unchecked_t fscache_n_stores_ok;
62205+extern atomic_unchecked_t fscache_n_stores_again;
62206+extern atomic_unchecked_t fscache_n_stores_nobufs;
62207+extern atomic_unchecked_t fscache_n_stores_oom;
62208+extern atomic_unchecked_t fscache_n_store_ops;
62209+extern atomic_unchecked_t fscache_n_store_calls;
62210+extern atomic_unchecked_t fscache_n_store_pages;
62211+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62212+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62213
62214-extern atomic_t fscache_n_store_vmscan_not_storing;
62215-extern atomic_t fscache_n_store_vmscan_gone;
62216-extern atomic_t fscache_n_store_vmscan_busy;
62217-extern atomic_t fscache_n_store_vmscan_cancelled;
62218-extern atomic_t fscache_n_store_vmscan_wait;
62219+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62220+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62221+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62222+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62223+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62224
62225-extern atomic_t fscache_n_marks;
62226-extern atomic_t fscache_n_uncaches;
62227+extern atomic_unchecked_t fscache_n_marks;
62228+extern atomic_unchecked_t fscache_n_uncaches;
62229
62230-extern atomic_t fscache_n_acquires;
62231-extern atomic_t fscache_n_acquires_null;
62232-extern atomic_t fscache_n_acquires_no_cache;
62233-extern atomic_t fscache_n_acquires_ok;
62234-extern atomic_t fscache_n_acquires_nobufs;
62235-extern atomic_t fscache_n_acquires_oom;
62236+extern atomic_unchecked_t fscache_n_acquires;
62237+extern atomic_unchecked_t fscache_n_acquires_null;
62238+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62239+extern atomic_unchecked_t fscache_n_acquires_ok;
62240+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62241+extern atomic_unchecked_t fscache_n_acquires_oom;
62242
62243-extern atomic_t fscache_n_invalidates;
62244-extern atomic_t fscache_n_invalidates_run;
62245+extern atomic_unchecked_t fscache_n_invalidates;
62246+extern atomic_unchecked_t fscache_n_invalidates_run;
62247
62248-extern atomic_t fscache_n_updates;
62249-extern atomic_t fscache_n_updates_null;
62250-extern atomic_t fscache_n_updates_run;
62251+extern atomic_unchecked_t fscache_n_updates;
62252+extern atomic_unchecked_t fscache_n_updates_null;
62253+extern atomic_unchecked_t fscache_n_updates_run;
62254
62255-extern atomic_t fscache_n_relinquishes;
62256-extern atomic_t fscache_n_relinquishes_null;
62257-extern atomic_t fscache_n_relinquishes_waitcrt;
62258-extern atomic_t fscache_n_relinquishes_retire;
62259+extern atomic_unchecked_t fscache_n_relinquishes;
62260+extern atomic_unchecked_t fscache_n_relinquishes_null;
62261+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62262+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62263
62264-extern atomic_t fscache_n_cookie_index;
62265-extern atomic_t fscache_n_cookie_data;
62266-extern atomic_t fscache_n_cookie_special;
62267+extern atomic_unchecked_t fscache_n_cookie_index;
62268+extern atomic_unchecked_t fscache_n_cookie_data;
62269+extern atomic_unchecked_t fscache_n_cookie_special;
62270
62271-extern atomic_t fscache_n_object_alloc;
62272-extern atomic_t fscache_n_object_no_alloc;
62273-extern atomic_t fscache_n_object_lookups;
62274-extern atomic_t fscache_n_object_lookups_negative;
62275-extern atomic_t fscache_n_object_lookups_positive;
62276-extern atomic_t fscache_n_object_lookups_timed_out;
62277-extern atomic_t fscache_n_object_created;
62278-extern atomic_t fscache_n_object_avail;
62279-extern atomic_t fscache_n_object_dead;
62280+extern atomic_unchecked_t fscache_n_object_alloc;
62281+extern atomic_unchecked_t fscache_n_object_no_alloc;
62282+extern atomic_unchecked_t fscache_n_object_lookups;
62283+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62284+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62285+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62286+extern atomic_unchecked_t fscache_n_object_created;
62287+extern atomic_unchecked_t fscache_n_object_avail;
62288+extern atomic_unchecked_t fscache_n_object_dead;
62289
62290-extern atomic_t fscache_n_checkaux_none;
62291-extern atomic_t fscache_n_checkaux_okay;
62292-extern atomic_t fscache_n_checkaux_update;
62293-extern atomic_t fscache_n_checkaux_obsolete;
62294+extern atomic_unchecked_t fscache_n_checkaux_none;
62295+extern atomic_unchecked_t fscache_n_checkaux_okay;
62296+extern atomic_unchecked_t fscache_n_checkaux_update;
62297+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62298
62299 extern atomic_t fscache_n_cop_alloc_object;
62300 extern atomic_t fscache_n_cop_lookup_object;
62301@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62302 atomic_inc(stat);
62303 }
62304
62305+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62306+{
62307+ atomic_inc_unchecked(stat);
62308+}
62309+
62310 static inline void fscache_stat_d(atomic_t *stat)
62311 {
62312 atomic_dec(stat);
62313@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62314
62315 #define __fscache_stat(stat) (NULL)
62316 #define fscache_stat(stat) do {} while (0)
62317+#define fscache_stat_unchecked(stat) do {} while (0)
62318 #define fscache_stat_d(stat) do {} while (0)
62319 #endif
62320
62321diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62322index da032da..0076ce7 100644
62323--- a/fs/fscache/object.c
62324+++ b/fs/fscache/object.c
62325@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62326 _debug("LOOKUP \"%s\" in \"%s\"",
62327 cookie->def->name, object->cache->tag->name);
62328
62329- fscache_stat(&fscache_n_object_lookups);
62330+ fscache_stat_unchecked(&fscache_n_object_lookups);
62331 fscache_stat(&fscache_n_cop_lookup_object);
62332 ret = object->cache->ops->lookup_object(object);
62333 fscache_stat_d(&fscache_n_cop_lookup_object);
62334@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62335 if (ret == -ETIMEDOUT) {
62336 /* probably stuck behind another object, so move this one to
62337 * the back of the queue */
62338- fscache_stat(&fscache_n_object_lookups_timed_out);
62339+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62340 _leave(" [timeout]");
62341 return NO_TRANSIT;
62342 }
62343@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62344 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62345
62346 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62347- fscache_stat(&fscache_n_object_lookups_negative);
62348+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62349
62350 /* Allow write requests to begin stacking up and read requests to begin
62351 * returning ENODATA.
62352@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62353 /* if we were still looking up, then we must have a positive lookup
62354 * result, in which case there may be data available */
62355 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62356- fscache_stat(&fscache_n_object_lookups_positive);
62357+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62358
62359 /* We do (presumably) have data */
62360 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62361@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62362 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62363 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62364 } else {
62365- fscache_stat(&fscache_n_object_created);
62366+ fscache_stat_unchecked(&fscache_n_object_created);
62367 }
62368
62369 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62370@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62371 fscache_stat_d(&fscache_n_cop_lookup_complete);
62372
62373 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62374- fscache_stat(&fscache_n_object_avail);
62375+ fscache_stat_unchecked(&fscache_n_object_avail);
62376
62377 _leave("");
62378 return transit_to(JUMPSTART_DEPS);
62379@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62380
62381 /* this just shifts the object release to the work processor */
62382 fscache_put_object(object);
62383- fscache_stat(&fscache_n_object_dead);
62384+ fscache_stat_unchecked(&fscache_n_object_dead);
62385
62386 _leave("");
62387 return transit_to(OBJECT_DEAD);
62388@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62389 enum fscache_checkaux result;
62390
62391 if (!object->cookie->def->check_aux) {
62392- fscache_stat(&fscache_n_checkaux_none);
62393+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62394 return FSCACHE_CHECKAUX_OKAY;
62395 }
62396
62397@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62398 switch (result) {
62399 /* entry okay as is */
62400 case FSCACHE_CHECKAUX_OKAY:
62401- fscache_stat(&fscache_n_checkaux_okay);
62402+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62403 break;
62404
62405 /* entry requires update */
62406 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62407- fscache_stat(&fscache_n_checkaux_update);
62408+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62409 break;
62410
62411 /* entry requires deletion */
62412 case FSCACHE_CHECKAUX_OBSOLETE:
62413- fscache_stat(&fscache_n_checkaux_obsolete);
62414+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62415 break;
62416
62417 default:
62418@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62419 {
62420 const struct fscache_state *s;
62421
62422- fscache_stat(&fscache_n_invalidates_run);
62423+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62424 fscache_stat(&fscache_n_cop_invalidate_object);
62425 s = _fscache_invalidate_object(object, event);
62426 fscache_stat_d(&fscache_n_cop_invalidate_object);
62427@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62428 {
62429 _enter("{OBJ%x},%d", object->debug_id, event);
62430
62431- fscache_stat(&fscache_n_updates_run);
62432+ fscache_stat_unchecked(&fscache_n_updates_run);
62433 fscache_stat(&fscache_n_cop_update_object);
62434 object->cache->ops->update_object(object);
62435 fscache_stat_d(&fscache_n_cop_update_object);
62436diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62437index e7b87a0..a85d47a 100644
62438--- a/fs/fscache/operation.c
62439+++ b/fs/fscache/operation.c
62440@@ -17,7 +17,7 @@
62441 #include <linux/slab.h>
62442 #include "internal.h"
62443
62444-atomic_t fscache_op_debug_id;
62445+atomic_unchecked_t fscache_op_debug_id;
62446 EXPORT_SYMBOL(fscache_op_debug_id);
62447
62448 /**
62449@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62450 ASSERTCMP(atomic_read(&op->usage), >, 0);
62451 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62452
62453- fscache_stat(&fscache_n_op_enqueue);
62454+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62455 switch (op->flags & FSCACHE_OP_TYPE) {
62456 case FSCACHE_OP_ASYNC:
62457 _debug("queue async");
62458@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62459 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62460 if (op->processor)
62461 fscache_enqueue_operation(op);
62462- fscache_stat(&fscache_n_op_run);
62463+ fscache_stat_unchecked(&fscache_n_op_run);
62464 }
62465
62466 /*
62467@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62468 if (object->n_in_progress > 0) {
62469 atomic_inc(&op->usage);
62470 list_add_tail(&op->pend_link, &object->pending_ops);
62471- fscache_stat(&fscache_n_op_pend);
62472+ fscache_stat_unchecked(&fscache_n_op_pend);
62473 } else if (!list_empty(&object->pending_ops)) {
62474 atomic_inc(&op->usage);
62475 list_add_tail(&op->pend_link, &object->pending_ops);
62476- fscache_stat(&fscache_n_op_pend);
62477+ fscache_stat_unchecked(&fscache_n_op_pend);
62478 fscache_start_operations(object);
62479 } else {
62480 ASSERTCMP(object->n_in_progress, ==, 0);
62481@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62482 object->n_exclusive++; /* reads and writes must wait */
62483 atomic_inc(&op->usage);
62484 list_add_tail(&op->pend_link, &object->pending_ops);
62485- fscache_stat(&fscache_n_op_pend);
62486+ fscache_stat_unchecked(&fscache_n_op_pend);
62487 ret = 0;
62488 } else {
62489 /* If we're in any other state, there must have been an I/O
62490@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62491 if (object->n_exclusive > 0) {
62492 atomic_inc(&op->usage);
62493 list_add_tail(&op->pend_link, &object->pending_ops);
62494- fscache_stat(&fscache_n_op_pend);
62495+ fscache_stat_unchecked(&fscache_n_op_pend);
62496 } else if (!list_empty(&object->pending_ops)) {
62497 atomic_inc(&op->usage);
62498 list_add_tail(&op->pend_link, &object->pending_ops);
62499- fscache_stat(&fscache_n_op_pend);
62500+ fscache_stat_unchecked(&fscache_n_op_pend);
62501 fscache_start_operations(object);
62502 } else {
62503 ASSERTCMP(object->n_exclusive, ==, 0);
62504@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
62505 object->n_ops++;
62506 atomic_inc(&op->usage);
62507 list_add_tail(&op->pend_link, &object->pending_ops);
62508- fscache_stat(&fscache_n_op_pend);
62509+ fscache_stat_unchecked(&fscache_n_op_pend);
62510 ret = 0;
62511 } else if (fscache_object_is_dying(object)) {
62512- fscache_stat(&fscache_n_op_rejected);
62513+ fscache_stat_unchecked(&fscache_n_op_rejected);
62514 op->state = FSCACHE_OP_ST_CANCELLED;
62515 ret = -ENOBUFS;
62516 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
62517@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
62518 ret = -EBUSY;
62519 if (op->state == FSCACHE_OP_ST_PENDING) {
62520 ASSERT(!list_empty(&op->pend_link));
62521- fscache_stat(&fscache_n_op_cancelled);
62522+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62523 list_del_init(&op->pend_link);
62524 if (do_cancel)
62525 do_cancel(op);
62526@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
62527 while (!list_empty(&object->pending_ops)) {
62528 op = list_entry(object->pending_ops.next,
62529 struct fscache_operation, pend_link);
62530- fscache_stat(&fscache_n_op_cancelled);
62531+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62532 list_del_init(&op->pend_link);
62533
62534 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
62535@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
62536 op->state, ==, FSCACHE_OP_ST_CANCELLED);
62537 op->state = FSCACHE_OP_ST_DEAD;
62538
62539- fscache_stat(&fscache_n_op_release);
62540+ fscache_stat_unchecked(&fscache_n_op_release);
62541
62542 if (op->release) {
62543 op->release(op);
62544@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
62545 * lock, and defer it otherwise */
62546 if (!spin_trylock(&object->lock)) {
62547 _debug("defer put");
62548- fscache_stat(&fscache_n_op_deferred_release);
62549+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
62550
62551 cache = object->cache;
62552 spin_lock(&cache->op_gc_list_lock);
62553@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
62554
62555 _debug("GC DEFERRED REL OBJ%x OP%x",
62556 object->debug_id, op->debug_id);
62557- fscache_stat(&fscache_n_op_gc);
62558+ fscache_stat_unchecked(&fscache_n_op_gc);
62559
62560 ASSERTCMP(atomic_read(&op->usage), ==, 0);
62561 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
62562diff --git a/fs/fscache/page.c b/fs/fscache/page.c
62563index de33b3f..8be4d29 100644
62564--- a/fs/fscache/page.c
62565+++ b/fs/fscache/page.c
62566@@ -74,7 +74,7 @@ try_again:
62567 val = radix_tree_lookup(&cookie->stores, page->index);
62568 if (!val) {
62569 rcu_read_unlock();
62570- fscache_stat(&fscache_n_store_vmscan_not_storing);
62571+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
62572 __fscache_uncache_page(cookie, page);
62573 return true;
62574 }
62575@@ -104,11 +104,11 @@ try_again:
62576 spin_unlock(&cookie->stores_lock);
62577
62578 if (xpage) {
62579- fscache_stat(&fscache_n_store_vmscan_cancelled);
62580- fscache_stat(&fscache_n_store_radix_deletes);
62581+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
62582+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62583 ASSERTCMP(xpage, ==, page);
62584 } else {
62585- fscache_stat(&fscache_n_store_vmscan_gone);
62586+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
62587 }
62588
62589 wake_up_bit(&cookie->flags, 0);
62590@@ -123,11 +123,11 @@ page_busy:
62591 * sleeping on memory allocation, so we may need to impose a timeout
62592 * too. */
62593 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
62594- fscache_stat(&fscache_n_store_vmscan_busy);
62595+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
62596 return false;
62597 }
62598
62599- fscache_stat(&fscache_n_store_vmscan_wait);
62600+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
62601 if (!release_page_wait_timeout(cookie, page))
62602 _debug("fscache writeout timeout page: %p{%lx}",
62603 page, page->index);
62604@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
62605 FSCACHE_COOKIE_STORING_TAG);
62606 if (!radix_tree_tag_get(&cookie->stores, page->index,
62607 FSCACHE_COOKIE_PENDING_TAG)) {
62608- fscache_stat(&fscache_n_store_radix_deletes);
62609+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62610 xpage = radix_tree_delete(&cookie->stores, page->index);
62611 }
62612 spin_unlock(&cookie->stores_lock);
62613@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
62614
62615 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
62616
62617- fscache_stat(&fscache_n_attr_changed_calls);
62618+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
62619
62620 if (fscache_object_is_active(object)) {
62621 fscache_stat(&fscache_n_cop_attr_changed);
62622@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62623
62624 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62625
62626- fscache_stat(&fscache_n_attr_changed);
62627+ fscache_stat_unchecked(&fscache_n_attr_changed);
62628
62629 op = kzalloc(sizeof(*op), GFP_KERNEL);
62630 if (!op) {
62631- fscache_stat(&fscache_n_attr_changed_nomem);
62632+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
62633 _leave(" = -ENOMEM");
62634 return -ENOMEM;
62635 }
62636@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62637 if (fscache_submit_exclusive_op(object, op) < 0)
62638 goto nobufs_dec;
62639 spin_unlock(&cookie->lock);
62640- fscache_stat(&fscache_n_attr_changed_ok);
62641+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
62642 fscache_put_operation(op);
62643 _leave(" = 0");
62644 return 0;
62645@@ -242,7 +242,7 @@ nobufs:
62646 kfree(op);
62647 if (wake_cookie)
62648 __fscache_wake_unused_cookie(cookie);
62649- fscache_stat(&fscache_n_attr_changed_nobufs);
62650+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
62651 _leave(" = %d", -ENOBUFS);
62652 return -ENOBUFS;
62653 }
62654@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
62655 /* allocate a retrieval operation and attempt to submit it */
62656 op = kzalloc(sizeof(*op), GFP_NOIO);
62657 if (!op) {
62658- fscache_stat(&fscache_n_retrievals_nomem);
62659+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62660 return NULL;
62661 }
62662
62663@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
62664 return 0;
62665 }
62666
62667- fscache_stat(&fscache_n_retrievals_wait);
62668+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
62669
62670 jif = jiffies;
62671 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
62672 TASK_INTERRUPTIBLE) != 0) {
62673- fscache_stat(&fscache_n_retrievals_intr);
62674+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62675 _leave(" = -ERESTARTSYS");
62676 return -ERESTARTSYS;
62677 }
62678@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
62679 */
62680 int fscache_wait_for_operation_activation(struct fscache_object *object,
62681 struct fscache_operation *op,
62682- atomic_t *stat_op_waits,
62683- atomic_t *stat_object_dead,
62684+ atomic_unchecked_t *stat_op_waits,
62685+ atomic_unchecked_t *stat_object_dead,
62686 void (*do_cancel)(struct fscache_operation *))
62687 {
62688 int ret;
62689@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62690
62691 _debug(">>> WT");
62692 if (stat_op_waits)
62693- fscache_stat(stat_op_waits);
62694+ fscache_stat_unchecked(stat_op_waits);
62695 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
62696 TASK_INTERRUPTIBLE) != 0) {
62697 ret = fscache_cancel_op(op, do_cancel);
62698@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62699 check_if_dead:
62700 if (op->state == FSCACHE_OP_ST_CANCELLED) {
62701 if (stat_object_dead)
62702- fscache_stat(stat_object_dead);
62703+ fscache_stat_unchecked(stat_object_dead);
62704 _leave(" = -ENOBUFS [cancelled]");
62705 return -ENOBUFS;
62706 }
62707@@ -381,7 +381,7 @@ check_if_dead:
62708 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
62709 fscache_cancel_op(op, do_cancel);
62710 if (stat_object_dead)
62711- fscache_stat(stat_object_dead);
62712+ fscache_stat_unchecked(stat_object_dead);
62713 return -ENOBUFS;
62714 }
62715 return 0;
62716@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62717
62718 _enter("%p,%p,,,", cookie, page);
62719
62720- fscache_stat(&fscache_n_retrievals);
62721+ fscache_stat_unchecked(&fscache_n_retrievals);
62722
62723 if (hlist_empty(&cookie->backing_objects))
62724 goto nobufs;
62725@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62726 goto nobufs_unlock_dec;
62727 spin_unlock(&cookie->lock);
62728
62729- fscache_stat(&fscache_n_retrieval_ops);
62730+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62731
62732 /* pin the netfs read context in case we need to do the actual netfs
62733 * read because we've encountered a cache read failure */
62734@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62735
62736 error:
62737 if (ret == -ENOMEM)
62738- fscache_stat(&fscache_n_retrievals_nomem);
62739+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62740 else if (ret == -ERESTARTSYS)
62741- fscache_stat(&fscache_n_retrievals_intr);
62742+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62743 else if (ret == -ENODATA)
62744- fscache_stat(&fscache_n_retrievals_nodata);
62745+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62746 else if (ret < 0)
62747- fscache_stat(&fscache_n_retrievals_nobufs);
62748+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62749 else
62750- fscache_stat(&fscache_n_retrievals_ok);
62751+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62752
62753 fscache_put_retrieval(op);
62754 _leave(" = %d", ret);
62755@@ -505,7 +505,7 @@ nobufs_unlock:
62756 __fscache_wake_unused_cookie(cookie);
62757 kfree(op);
62758 nobufs:
62759- fscache_stat(&fscache_n_retrievals_nobufs);
62760+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62761 _leave(" = -ENOBUFS");
62762 return -ENOBUFS;
62763 }
62764@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62765
62766 _enter("%p,,%d,,,", cookie, *nr_pages);
62767
62768- fscache_stat(&fscache_n_retrievals);
62769+ fscache_stat_unchecked(&fscache_n_retrievals);
62770
62771 if (hlist_empty(&cookie->backing_objects))
62772 goto nobufs;
62773@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62774 goto nobufs_unlock_dec;
62775 spin_unlock(&cookie->lock);
62776
62777- fscache_stat(&fscache_n_retrieval_ops);
62778+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62779
62780 /* pin the netfs read context in case we need to do the actual netfs
62781 * read because we've encountered a cache read failure */
62782@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62783
62784 error:
62785 if (ret == -ENOMEM)
62786- fscache_stat(&fscache_n_retrievals_nomem);
62787+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62788 else if (ret == -ERESTARTSYS)
62789- fscache_stat(&fscache_n_retrievals_intr);
62790+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62791 else if (ret == -ENODATA)
62792- fscache_stat(&fscache_n_retrievals_nodata);
62793+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62794 else if (ret < 0)
62795- fscache_stat(&fscache_n_retrievals_nobufs);
62796+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62797 else
62798- fscache_stat(&fscache_n_retrievals_ok);
62799+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62800
62801 fscache_put_retrieval(op);
62802 _leave(" = %d", ret);
62803@@ -636,7 +636,7 @@ nobufs_unlock:
62804 if (wake_cookie)
62805 __fscache_wake_unused_cookie(cookie);
62806 nobufs:
62807- fscache_stat(&fscache_n_retrievals_nobufs);
62808+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62809 _leave(" = -ENOBUFS");
62810 return -ENOBUFS;
62811 }
62812@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62813
62814 _enter("%p,%p,,,", cookie, page);
62815
62816- fscache_stat(&fscache_n_allocs);
62817+ fscache_stat_unchecked(&fscache_n_allocs);
62818
62819 if (hlist_empty(&cookie->backing_objects))
62820 goto nobufs;
62821@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62822 goto nobufs_unlock_dec;
62823 spin_unlock(&cookie->lock);
62824
62825- fscache_stat(&fscache_n_alloc_ops);
62826+ fscache_stat_unchecked(&fscache_n_alloc_ops);
62827
62828 ret = fscache_wait_for_operation_activation(
62829 object, &op->op,
62830@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62831
62832 error:
62833 if (ret == -ERESTARTSYS)
62834- fscache_stat(&fscache_n_allocs_intr);
62835+ fscache_stat_unchecked(&fscache_n_allocs_intr);
62836 else if (ret < 0)
62837- fscache_stat(&fscache_n_allocs_nobufs);
62838+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62839 else
62840- fscache_stat(&fscache_n_allocs_ok);
62841+ fscache_stat_unchecked(&fscache_n_allocs_ok);
62842
62843 fscache_put_retrieval(op);
62844 _leave(" = %d", ret);
62845@@ -730,7 +730,7 @@ nobufs_unlock:
62846 if (wake_cookie)
62847 __fscache_wake_unused_cookie(cookie);
62848 nobufs:
62849- fscache_stat(&fscache_n_allocs_nobufs);
62850+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62851 _leave(" = -ENOBUFS");
62852 return -ENOBUFS;
62853 }
62854@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62855
62856 spin_lock(&cookie->stores_lock);
62857
62858- fscache_stat(&fscache_n_store_calls);
62859+ fscache_stat_unchecked(&fscache_n_store_calls);
62860
62861 /* find a page to store */
62862 page = NULL;
62863@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62864 page = results[0];
62865 _debug("gang %d [%lx]", n, page->index);
62866 if (page->index > op->store_limit) {
62867- fscache_stat(&fscache_n_store_pages_over_limit);
62868+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
62869 goto superseded;
62870 }
62871
62872@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62873 spin_unlock(&cookie->stores_lock);
62874 spin_unlock(&object->lock);
62875
62876- fscache_stat(&fscache_n_store_pages);
62877+ fscache_stat_unchecked(&fscache_n_store_pages);
62878 fscache_stat(&fscache_n_cop_write_page);
62879 ret = object->cache->ops->write_page(op, page);
62880 fscache_stat_d(&fscache_n_cop_write_page);
62881@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62882 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62883 ASSERT(PageFsCache(page));
62884
62885- fscache_stat(&fscache_n_stores);
62886+ fscache_stat_unchecked(&fscache_n_stores);
62887
62888 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
62889 _leave(" = -ENOBUFS [invalidating]");
62890@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62891 spin_unlock(&cookie->stores_lock);
62892 spin_unlock(&object->lock);
62893
62894- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
62895+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62896 op->store_limit = object->store_limit;
62897
62898 __fscache_use_cookie(cookie);
62899@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62900
62901 spin_unlock(&cookie->lock);
62902 radix_tree_preload_end();
62903- fscache_stat(&fscache_n_store_ops);
62904- fscache_stat(&fscache_n_stores_ok);
62905+ fscache_stat_unchecked(&fscache_n_store_ops);
62906+ fscache_stat_unchecked(&fscache_n_stores_ok);
62907
62908 /* the work queue now carries its own ref on the object */
62909 fscache_put_operation(&op->op);
62910@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62911 return 0;
62912
62913 already_queued:
62914- fscache_stat(&fscache_n_stores_again);
62915+ fscache_stat_unchecked(&fscache_n_stores_again);
62916 already_pending:
62917 spin_unlock(&cookie->stores_lock);
62918 spin_unlock(&object->lock);
62919 spin_unlock(&cookie->lock);
62920 radix_tree_preload_end();
62921 kfree(op);
62922- fscache_stat(&fscache_n_stores_ok);
62923+ fscache_stat_unchecked(&fscache_n_stores_ok);
62924 _leave(" = 0");
62925 return 0;
62926
62927@@ -1039,14 +1039,14 @@ nobufs:
62928 kfree(op);
62929 if (wake_cookie)
62930 __fscache_wake_unused_cookie(cookie);
62931- fscache_stat(&fscache_n_stores_nobufs);
62932+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
62933 _leave(" = -ENOBUFS");
62934 return -ENOBUFS;
62935
62936 nomem_free:
62937 kfree(op);
62938 nomem:
62939- fscache_stat(&fscache_n_stores_oom);
62940+ fscache_stat_unchecked(&fscache_n_stores_oom);
62941 _leave(" = -ENOMEM");
62942 return -ENOMEM;
62943 }
62944@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
62945 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62946 ASSERTCMP(page, !=, NULL);
62947
62948- fscache_stat(&fscache_n_uncaches);
62949+ fscache_stat_unchecked(&fscache_n_uncaches);
62950
62951 /* cache withdrawal may beat us to it */
62952 if (!PageFsCache(page))
62953@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
62954 struct fscache_cookie *cookie = op->op.object->cookie;
62955
62956 #ifdef CONFIG_FSCACHE_STATS
62957- atomic_inc(&fscache_n_marks);
62958+ atomic_inc_unchecked(&fscache_n_marks);
62959 #endif
62960
62961 _debug("- mark %p{%lx}", page, page->index);
62962diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
62963index 40d13c7..ddf52b9 100644
62964--- a/fs/fscache/stats.c
62965+++ b/fs/fscache/stats.c
62966@@ -18,99 +18,99 @@
62967 /*
62968 * operation counters
62969 */
62970-atomic_t fscache_n_op_pend;
62971-atomic_t fscache_n_op_run;
62972-atomic_t fscache_n_op_enqueue;
62973-atomic_t fscache_n_op_requeue;
62974-atomic_t fscache_n_op_deferred_release;
62975-atomic_t fscache_n_op_release;
62976-atomic_t fscache_n_op_gc;
62977-atomic_t fscache_n_op_cancelled;
62978-atomic_t fscache_n_op_rejected;
62979+atomic_unchecked_t fscache_n_op_pend;
62980+atomic_unchecked_t fscache_n_op_run;
62981+atomic_unchecked_t fscache_n_op_enqueue;
62982+atomic_unchecked_t fscache_n_op_requeue;
62983+atomic_unchecked_t fscache_n_op_deferred_release;
62984+atomic_unchecked_t fscache_n_op_release;
62985+atomic_unchecked_t fscache_n_op_gc;
62986+atomic_unchecked_t fscache_n_op_cancelled;
62987+atomic_unchecked_t fscache_n_op_rejected;
62988
62989-atomic_t fscache_n_attr_changed;
62990-atomic_t fscache_n_attr_changed_ok;
62991-atomic_t fscache_n_attr_changed_nobufs;
62992-atomic_t fscache_n_attr_changed_nomem;
62993-atomic_t fscache_n_attr_changed_calls;
62994+atomic_unchecked_t fscache_n_attr_changed;
62995+atomic_unchecked_t fscache_n_attr_changed_ok;
62996+atomic_unchecked_t fscache_n_attr_changed_nobufs;
62997+atomic_unchecked_t fscache_n_attr_changed_nomem;
62998+atomic_unchecked_t fscache_n_attr_changed_calls;
62999
63000-atomic_t fscache_n_allocs;
63001-atomic_t fscache_n_allocs_ok;
63002-atomic_t fscache_n_allocs_wait;
63003-atomic_t fscache_n_allocs_nobufs;
63004-atomic_t fscache_n_allocs_intr;
63005-atomic_t fscache_n_allocs_object_dead;
63006-atomic_t fscache_n_alloc_ops;
63007-atomic_t fscache_n_alloc_op_waits;
63008+atomic_unchecked_t fscache_n_allocs;
63009+atomic_unchecked_t fscache_n_allocs_ok;
63010+atomic_unchecked_t fscache_n_allocs_wait;
63011+atomic_unchecked_t fscache_n_allocs_nobufs;
63012+atomic_unchecked_t fscache_n_allocs_intr;
63013+atomic_unchecked_t fscache_n_allocs_object_dead;
63014+atomic_unchecked_t fscache_n_alloc_ops;
63015+atomic_unchecked_t fscache_n_alloc_op_waits;
63016
63017-atomic_t fscache_n_retrievals;
63018-atomic_t fscache_n_retrievals_ok;
63019-atomic_t fscache_n_retrievals_wait;
63020-atomic_t fscache_n_retrievals_nodata;
63021-atomic_t fscache_n_retrievals_nobufs;
63022-atomic_t fscache_n_retrievals_intr;
63023-atomic_t fscache_n_retrievals_nomem;
63024-atomic_t fscache_n_retrievals_object_dead;
63025-atomic_t fscache_n_retrieval_ops;
63026-atomic_t fscache_n_retrieval_op_waits;
63027+atomic_unchecked_t fscache_n_retrievals;
63028+atomic_unchecked_t fscache_n_retrievals_ok;
63029+atomic_unchecked_t fscache_n_retrievals_wait;
63030+atomic_unchecked_t fscache_n_retrievals_nodata;
63031+atomic_unchecked_t fscache_n_retrievals_nobufs;
63032+atomic_unchecked_t fscache_n_retrievals_intr;
63033+atomic_unchecked_t fscache_n_retrievals_nomem;
63034+atomic_unchecked_t fscache_n_retrievals_object_dead;
63035+atomic_unchecked_t fscache_n_retrieval_ops;
63036+atomic_unchecked_t fscache_n_retrieval_op_waits;
63037
63038-atomic_t fscache_n_stores;
63039-atomic_t fscache_n_stores_ok;
63040-atomic_t fscache_n_stores_again;
63041-atomic_t fscache_n_stores_nobufs;
63042-atomic_t fscache_n_stores_oom;
63043-atomic_t fscache_n_store_ops;
63044-atomic_t fscache_n_store_calls;
63045-atomic_t fscache_n_store_pages;
63046-atomic_t fscache_n_store_radix_deletes;
63047-atomic_t fscache_n_store_pages_over_limit;
63048+atomic_unchecked_t fscache_n_stores;
63049+atomic_unchecked_t fscache_n_stores_ok;
63050+atomic_unchecked_t fscache_n_stores_again;
63051+atomic_unchecked_t fscache_n_stores_nobufs;
63052+atomic_unchecked_t fscache_n_stores_oom;
63053+atomic_unchecked_t fscache_n_store_ops;
63054+atomic_unchecked_t fscache_n_store_calls;
63055+atomic_unchecked_t fscache_n_store_pages;
63056+atomic_unchecked_t fscache_n_store_radix_deletes;
63057+atomic_unchecked_t fscache_n_store_pages_over_limit;
63058
63059-atomic_t fscache_n_store_vmscan_not_storing;
63060-atomic_t fscache_n_store_vmscan_gone;
63061-atomic_t fscache_n_store_vmscan_busy;
63062-atomic_t fscache_n_store_vmscan_cancelled;
63063-atomic_t fscache_n_store_vmscan_wait;
63064+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63065+atomic_unchecked_t fscache_n_store_vmscan_gone;
63066+atomic_unchecked_t fscache_n_store_vmscan_busy;
63067+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63068+atomic_unchecked_t fscache_n_store_vmscan_wait;
63069
63070-atomic_t fscache_n_marks;
63071-atomic_t fscache_n_uncaches;
63072+atomic_unchecked_t fscache_n_marks;
63073+atomic_unchecked_t fscache_n_uncaches;
63074
63075-atomic_t fscache_n_acquires;
63076-atomic_t fscache_n_acquires_null;
63077-atomic_t fscache_n_acquires_no_cache;
63078-atomic_t fscache_n_acquires_ok;
63079-atomic_t fscache_n_acquires_nobufs;
63080-atomic_t fscache_n_acquires_oom;
63081+atomic_unchecked_t fscache_n_acquires;
63082+atomic_unchecked_t fscache_n_acquires_null;
63083+atomic_unchecked_t fscache_n_acquires_no_cache;
63084+atomic_unchecked_t fscache_n_acquires_ok;
63085+atomic_unchecked_t fscache_n_acquires_nobufs;
63086+atomic_unchecked_t fscache_n_acquires_oom;
63087
63088-atomic_t fscache_n_invalidates;
63089-atomic_t fscache_n_invalidates_run;
63090+atomic_unchecked_t fscache_n_invalidates;
63091+atomic_unchecked_t fscache_n_invalidates_run;
63092
63093-atomic_t fscache_n_updates;
63094-atomic_t fscache_n_updates_null;
63095-atomic_t fscache_n_updates_run;
63096+atomic_unchecked_t fscache_n_updates;
63097+atomic_unchecked_t fscache_n_updates_null;
63098+atomic_unchecked_t fscache_n_updates_run;
63099
63100-atomic_t fscache_n_relinquishes;
63101-atomic_t fscache_n_relinquishes_null;
63102-atomic_t fscache_n_relinquishes_waitcrt;
63103-atomic_t fscache_n_relinquishes_retire;
63104+atomic_unchecked_t fscache_n_relinquishes;
63105+atomic_unchecked_t fscache_n_relinquishes_null;
63106+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63107+atomic_unchecked_t fscache_n_relinquishes_retire;
63108
63109-atomic_t fscache_n_cookie_index;
63110-atomic_t fscache_n_cookie_data;
63111-atomic_t fscache_n_cookie_special;
63112+atomic_unchecked_t fscache_n_cookie_index;
63113+atomic_unchecked_t fscache_n_cookie_data;
63114+atomic_unchecked_t fscache_n_cookie_special;
63115
63116-atomic_t fscache_n_object_alloc;
63117-atomic_t fscache_n_object_no_alloc;
63118-atomic_t fscache_n_object_lookups;
63119-atomic_t fscache_n_object_lookups_negative;
63120-atomic_t fscache_n_object_lookups_positive;
63121-atomic_t fscache_n_object_lookups_timed_out;
63122-atomic_t fscache_n_object_created;
63123-atomic_t fscache_n_object_avail;
63124-atomic_t fscache_n_object_dead;
63125+atomic_unchecked_t fscache_n_object_alloc;
63126+atomic_unchecked_t fscache_n_object_no_alloc;
63127+atomic_unchecked_t fscache_n_object_lookups;
63128+atomic_unchecked_t fscache_n_object_lookups_negative;
63129+atomic_unchecked_t fscache_n_object_lookups_positive;
63130+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63131+atomic_unchecked_t fscache_n_object_created;
63132+atomic_unchecked_t fscache_n_object_avail;
63133+atomic_unchecked_t fscache_n_object_dead;
63134
63135-atomic_t fscache_n_checkaux_none;
63136-atomic_t fscache_n_checkaux_okay;
63137-atomic_t fscache_n_checkaux_update;
63138-atomic_t fscache_n_checkaux_obsolete;
63139+atomic_unchecked_t fscache_n_checkaux_none;
63140+atomic_unchecked_t fscache_n_checkaux_okay;
63141+atomic_unchecked_t fscache_n_checkaux_update;
63142+atomic_unchecked_t fscache_n_checkaux_obsolete;
63143
63144 atomic_t fscache_n_cop_alloc_object;
63145 atomic_t fscache_n_cop_lookup_object;
63146@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63147 seq_puts(m, "FS-Cache statistics\n");
63148
63149 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63150- atomic_read(&fscache_n_cookie_index),
63151- atomic_read(&fscache_n_cookie_data),
63152- atomic_read(&fscache_n_cookie_special));
63153+ atomic_read_unchecked(&fscache_n_cookie_index),
63154+ atomic_read_unchecked(&fscache_n_cookie_data),
63155+ atomic_read_unchecked(&fscache_n_cookie_special));
63156
63157 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63158- atomic_read(&fscache_n_object_alloc),
63159- atomic_read(&fscache_n_object_no_alloc),
63160- atomic_read(&fscache_n_object_avail),
63161- atomic_read(&fscache_n_object_dead));
63162+ atomic_read_unchecked(&fscache_n_object_alloc),
63163+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63164+ atomic_read_unchecked(&fscache_n_object_avail),
63165+ atomic_read_unchecked(&fscache_n_object_dead));
63166 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63167- atomic_read(&fscache_n_checkaux_none),
63168- atomic_read(&fscache_n_checkaux_okay),
63169- atomic_read(&fscache_n_checkaux_update),
63170- atomic_read(&fscache_n_checkaux_obsolete));
63171+ atomic_read_unchecked(&fscache_n_checkaux_none),
63172+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63173+ atomic_read_unchecked(&fscache_n_checkaux_update),
63174+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63175
63176 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63177- atomic_read(&fscache_n_marks),
63178- atomic_read(&fscache_n_uncaches));
63179+ atomic_read_unchecked(&fscache_n_marks),
63180+ atomic_read_unchecked(&fscache_n_uncaches));
63181
63182 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63183 " oom=%u\n",
63184- atomic_read(&fscache_n_acquires),
63185- atomic_read(&fscache_n_acquires_null),
63186- atomic_read(&fscache_n_acquires_no_cache),
63187- atomic_read(&fscache_n_acquires_ok),
63188- atomic_read(&fscache_n_acquires_nobufs),
63189- atomic_read(&fscache_n_acquires_oom));
63190+ atomic_read_unchecked(&fscache_n_acquires),
63191+ atomic_read_unchecked(&fscache_n_acquires_null),
63192+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63193+ atomic_read_unchecked(&fscache_n_acquires_ok),
63194+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63195+ atomic_read_unchecked(&fscache_n_acquires_oom));
63196
63197 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63198- atomic_read(&fscache_n_object_lookups),
63199- atomic_read(&fscache_n_object_lookups_negative),
63200- atomic_read(&fscache_n_object_lookups_positive),
63201- atomic_read(&fscache_n_object_created),
63202- atomic_read(&fscache_n_object_lookups_timed_out));
63203+ atomic_read_unchecked(&fscache_n_object_lookups),
63204+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63205+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63206+ atomic_read_unchecked(&fscache_n_object_created),
63207+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63208
63209 seq_printf(m, "Invals : n=%u run=%u\n",
63210- atomic_read(&fscache_n_invalidates),
63211- atomic_read(&fscache_n_invalidates_run));
63212+ atomic_read_unchecked(&fscache_n_invalidates),
63213+ atomic_read_unchecked(&fscache_n_invalidates_run));
63214
63215 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63216- atomic_read(&fscache_n_updates),
63217- atomic_read(&fscache_n_updates_null),
63218- atomic_read(&fscache_n_updates_run));
63219+ atomic_read_unchecked(&fscache_n_updates),
63220+ atomic_read_unchecked(&fscache_n_updates_null),
63221+ atomic_read_unchecked(&fscache_n_updates_run));
63222
63223 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63224- atomic_read(&fscache_n_relinquishes),
63225- atomic_read(&fscache_n_relinquishes_null),
63226- atomic_read(&fscache_n_relinquishes_waitcrt),
63227- atomic_read(&fscache_n_relinquishes_retire));
63228+ atomic_read_unchecked(&fscache_n_relinquishes),
63229+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63230+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63231+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63232
63233 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63234- atomic_read(&fscache_n_attr_changed),
63235- atomic_read(&fscache_n_attr_changed_ok),
63236- atomic_read(&fscache_n_attr_changed_nobufs),
63237- atomic_read(&fscache_n_attr_changed_nomem),
63238- atomic_read(&fscache_n_attr_changed_calls));
63239+ atomic_read_unchecked(&fscache_n_attr_changed),
63240+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63241+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63242+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63243+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63244
63245 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63246- atomic_read(&fscache_n_allocs),
63247- atomic_read(&fscache_n_allocs_ok),
63248- atomic_read(&fscache_n_allocs_wait),
63249- atomic_read(&fscache_n_allocs_nobufs),
63250- atomic_read(&fscache_n_allocs_intr));
63251+ atomic_read_unchecked(&fscache_n_allocs),
63252+ atomic_read_unchecked(&fscache_n_allocs_ok),
63253+ atomic_read_unchecked(&fscache_n_allocs_wait),
63254+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63255+ atomic_read_unchecked(&fscache_n_allocs_intr));
63256 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63257- atomic_read(&fscache_n_alloc_ops),
63258- atomic_read(&fscache_n_alloc_op_waits),
63259- atomic_read(&fscache_n_allocs_object_dead));
63260+ atomic_read_unchecked(&fscache_n_alloc_ops),
63261+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63262+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63263
63264 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63265 " int=%u oom=%u\n",
63266- atomic_read(&fscache_n_retrievals),
63267- atomic_read(&fscache_n_retrievals_ok),
63268- atomic_read(&fscache_n_retrievals_wait),
63269- atomic_read(&fscache_n_retrievals_nodata),
63270- atomic_read(&fscache_n_retrievals_nobufs),
63271- atomic_read(&fscache_n_retrievals_intr),
63272- atomic_read(&fscache_n_retrievals_nomem));
63273+ atomic_read_unchecked(&fscache_n_retrievals),
63274+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63275+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63276+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63277+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63278+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63279+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63280 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63281- atomic_read(&fscache_n_retrieval_ops),
63282- atomic_read(&fscache_n_retrieval_op_waits),
63283- atomic_read(&fscache_n_retrievals_object_dead));
63284+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63285+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63286+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63287
63288 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63289- atomic_read(&fscache_n_stores),
63290- atomic_read(&fscache_n_stores_ok),
63291- atomic_read(&fscache_n_stores_again),
63292- atomic_read(&fscache_n_stores_nobufs),
63293- atomic_read(&fscache_n_stores_oom));
63294+ atomic_read_unchecked(&fscache_n_stores),
63295+ atomic_read_unchecked(&fscache_n_stores_ok),
63296+ atomic_read_unchecked(&fscache_n_stores_again),
63297+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63298+ atomic_read_unchecked(&fscache_n_stores_oom));
63299 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63300- atomic_read(&fscache_n_store_ops),
63301- atomic_read(&fscache_n_store_calls),
63302- atomic_read(&fscache_n_store_pages),
63303- atomic_read(&fscache_n_store_radix_deletes),
63304- atomic_read(&fscache_n_store_pages_over_limit));
63305+ atomic_read_unchecked(&fscache_n_store_ops),
63306+ atomic_read_unchecked(&fscache_n_store_calls),
63307+ atomic_read_unchecked(&fscache_n_store_pages),
63308+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63309+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63310
63311 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63312- atomic_read(&fscache_n_store_vmscan_not_storing),
63313- atomic_read(&fscache_n_store_vmscan_gone),
63314- atomic_read(&fscache_n_store_vmscan_busy),
63315- atomic_read(&fscache_n_store_vmscan_cancelled),
63316- atomic_read(&fscache_n_store_vmscan_wait));
63317+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63318+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63319+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63320+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63321+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63322
63323 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63324- atomic_read(&fscache_n_op_pend),
63325- atomic_read(&fscache_n_op_run),
63326- atomic_read(&fscache_n_op_enqueue),
63327- atomic_read(&fscache_n_op_cancelled),
63328- atomic_read(&fscache_n_op_rejected));
63329+ atomic_read_unchecked(&fscache_n_op_pend),
63330+ atomic_read_unchecked(&fscache_n_op_run),
63331+ atomic_read_unchecked(&fscache_n_op_enqueue),
63332+ atomic_read_unchecked(&fscache_n_op_cancelled),
63333+ atomic_read_unchecked(&fscache_n_op_rejected));
63334 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63335- atomic_read(&fscache_n_op_deferred_release),
63336- atomic_read(&fscache_n_op_release),
63337- atomic_read(&fscache_n_op_gc));
63338+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63339+ atomic_read_unchecked(&fscache_n_op_release),
63340+ atomic_read_unchecked(&fscache_n_op_gc));
63341
63342 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63343 atomic_read(&fscache_n_cop_alloc_object),
63344diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63345index 28d0c7a..04816b7 100644
63346--- a/fs/fuse/cuse.c
63347+++ b/fs/fuse/cuse.c
63348@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63349 INIT_LIST_HEAD(&cuse_conntbl[i]);
63350
63351 /* inherit and extend fuse_dev_operations */
63352- cuse_channel_fops = fuse_dev_operations;
63353- cuse_channel_fops.owner = THIS_MODULE;
63354- cuse_channel_fops.open = cuse_channel_open;
63355- cuse_channel_fops.release = cuse_channel_release;
63356+ pax_open_kernel();
63357+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63358+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63359+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63360+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63361+ pax_close_kernel();
63362
63363 cuse_class = class_create(THIS_MODULE, "cuse");
63364 if (IS_ERR(cuse_class))
63365diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63366index 71c4619..6a9f6d4 100644
63367--- a/fs/fuse/dev.c
63368+++ b/fs/fuse/dev.c
63369@@ -1394,7 +1394,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63370 ret = 0;
63371 pipe_lock(pipe);
63372
63373- if (!pipe->readers) {
63374+ if (!atomic_read(&pipe->readers)) {
63375 send_sig(SIGPIPE, current, 0);
63376 if (!ret)
63377 ret = -EPIPE;
63378@@ -1423,7 +1423,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63379 page_nr++;
63380 ret += buf->len;
63381
63382- if (pipe->files)
63383+ if (atomic_read(&pipe->files))
63384 do_wakeup = 1;
63385 }
63386
63387diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63388index 08e7b1a..d91c6ee 100644
63389--- a/fs/fuse/dir.c
63390+++ b/fs/fuse/dir.c
63391@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
63392 return link;
63393 }
63394
63395-static void free_link(char *link)
63396+static void free_link(const char *link)
63397 {
63398 if (!IS_ERR(link))
63399 free_page((unsigned long) link);
63400diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
63401index fd62cae..3494dfa 100644
63402--- a/fs/hostfs/hostfs_kern.c
63403+++ b/fs/hostfs/hostfs_kern.c
63404@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63405
63406 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
63407 {
63408- char *s = nd_get_link(nd);
63409+ const char *s = nd_get_link(nd);
63410 if (!IS_ERR(s))
63411 __putname(s);
63412 }
63413diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
63414index 5eba47f..d353c22 100644
63415--- a/fs/hugetlbfs/inode.c
63416+++ b/fs/hugetlbfs/inode.c
63417@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63418 struct mm_struct *mm = current->mm;
63419 struct vm_area_struct *vma;
63420 struct hstate *h = hstate_file(file);
63421+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
63422 struct vm_unmapped_area_info info;
63423
63424 if (len & ~huge_page_mask(h))
63425@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63426 return addr;
63427 }
63428
63429+#ifdef CONFIG_PAX_RANDMMAP
63430+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
63431+#endif
63432+
63433 if (addr) {
63434 addr = ALIGN(addr, huge_page_size(h));
63435 vma = find_vma(mm, addr);
63436- if (TASK_SIZE - len >= addr &&
63437- (!vma || addr + len <= vma->vm_start))
63438+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
63439 return addr;
63440 }
63441
63442 info.flags = 0;
63443 info.length = len;
63444 info.low_limit = TASK_UNMAPPED_BASE;
63445+
63446+#ifdef CONFIG_PAX_RANDMMAP
63447+ if (mm->pax_flags & MF_PAX_RANDMMAP)
63448+ info.low_limit += mm->delta_mmap;
63449+#endif
63450+
63451 info.high_limit = TASK_SIZE;
63452 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
63453 info.align_offset = 0;
63454@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
63455 };
63456 MODULE_ALIAS_FS("hugetlbfs");
63457
63458-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63459+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63460
63461 static int can_do_hugetlb_shm(void)
63462 {
63463diff --git a/fs/inode.c b/fs/inode.c
63464index aa149e7..46f1f65 100644
63465--- a/fs/inode.c
63466+++ b/fs/inode.c
63467@@ -842,16 +842,20 @@ unsigned int get_next_ino(void)
63468 unsigned int *p = &get_cpu_var(last_ino);
63469 unsigned int res = *p;
63470
63471+start:
63472+
63473 #ifdef CONFIG_SMP
63474 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
63475- static atomic_t shared_last_ino;
63476- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
63477+ static atomic_unchecked_t shared_last_ino;
63478+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
63479
63480 res = next - LAST_INO_BATCH;
63481 }
63482 #endif
63483
63484- *p = ++res;
63485+ if (unlikely(!++res))
63486+ goto start; /* never zero */
63487+ *p = res;
63488 put_cpu_var(last_ino);
63489 return res;
63490 }
63491diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
63492index 4a6cf28..d3a29d3 100644
63493--- a/fs/jffs2/erase.c
63494+++ b/fs/jffs2/erase.c
63495@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
63496 struct jffs2_unknown_node marker = {
63497 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
63498 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63499- .totlen = cpu_to_je32(c->cleanmarker_size)
63500+ .totlen = cpu_to_je32(c->cleanmarker_size),
63501+ .hdr_crc = cpu_to_je32(0)
63502 };
63503
63504 jffs2_prealloc_raw_node_refs(c, jeb, 1);
63505diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
63506index 09ed551..45684f8 100644
63507--- a/fs/jffs2/wbuf.c
63508+++ b/fs/jffs2/wbuf.c
63509@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
63510 {
63511 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
63512 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63513- .totlen = constant_cpu_to_je32(8)
63514+ .totlen = constant_cpu_to_je32(8),
63515+ .hdr_crc = constant_cpu_to_je32(0)
63516 };
63517
63518 /*
63519diff --git a/fs/jfs/super.c b/fs/jfs/super.c
63520index 16c3a95..e9cb75d 100644
63521--- a/fs/jfs/super.c
63522+++ b/fs/jfs/super.c
63523@@ -902,7 +902,7 @@ static int __init init_jfs_fs(void)
63524
63525 jfs_inode_cachep =
63526 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
63527- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
63528+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
63529 init_once);
63530 if (jfs_inode_cachep == NULL)
63531 return -ENOMEM;
63532diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
63533index 2d881b3..fe1ac77 100644
63534--- a/fs/kernfs/dir.c
63535+++ b/fs/kernfs/dir.c
63536@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
63537 *
63538 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63539 */
63540-static unsigned int kernfs_name_hash(const char *name, const void *ns)
63541+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
63542 {
63543 unsigned long hash = init_name_hash();
63544 unsigned int len = strlen(name);
63545@@ -833,6 +833,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
63546 ret = scops->mkdir(parent, dentry->d_name.name, mode);
63547
63548 kernfs_put_active(parent);
63549+
63550+ if (!ret) {
63551+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
63552+ ret = PTR_ERR_OR_ZERO(dentry_ret);
63553+ }
63554+
63555 return ret;
63556 }
63557
63558diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
63559index ddc9f96..4e450ad 100644
63560--- a/fs/kernfs/file.c
63561+++ b/fs/kernfs/file.c
63562@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
63563
63564 struct kernfs_open_node {
63565 atomic_t refcnt;
63566- atomic_t event;
63567+ atomic_unchecked_t event;
63568 wait_queue_head_t poll;
63569 struct list_head files; /* goes through kernfs_open_file.list */
63570 };
63571@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
63572 {
63573 struct kernfs_open_file *of = sf->private;
63574
63575- of->event = atomic_read(&of->kn->attr.open->event);
63576+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
63577
63578 return of->kn->attr.ops->seq_show(sf, v);
63579 }
63580@@ -271,7 +271,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
63581 {
63582 struct kernfs_open_file *of = kernfs_of(file);
63583 const struct kernfs_ops *ops;
63584- size_t len;
63585+ ssize_t len;
63586 char *buf;
63587
63588 if (of->atomic_write_len) {
63589@@ -384,12 +384,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
63590 return ret;
63591 }
63592
63593-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63594- void *buf, int len, int write)
63595+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63596+ void *buf, size_t len, int write)
63597 {
63598 struct file *file = vma->vm_file;
63599 struct kernfs_open_file *of = kernfs_of(file);
63600- int ret;
63601+ ssize_t ret;
63602
63603 if (!of->vm_ops)
63604 return -EINVAL;
63605@@ -568,7 +568,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
63606 return -ENOMEM;
63607
63608 atomic_set(&new_on->refcnt, 0);
63609- atomic_set(&new_on->event, 1);
63610+ atomic_set_unchecked(&new_on->event, 1);
63611 init_waitqueue_head(&new_on->poll);
63612 INIT_LIST_HEAD(&new_on->files);
63613 goto retry;
63614@@ -792,7 +792,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
63615
63616 kernfs_put_active(kn);
63617
63618- if (of->event != atomic_read(&on->event))
63619+ if (of->event != atomic_read_unchecked(&on->event))
63620 goto trigger;
63621
63622 return DEFAULT_POLLMASK;
63623@@ -823,7 +823,7 @@ repeat:
63624
63625 on = kn->attr.open;
63626 if (on) {
63627- atomic_inc(&on->event);
63628+ atomic_inc_unchecked(&on->event);
63629 wake_up_interruptible(&on->poll);
63630 }
63631
63632diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
63633index 8a19889..4c3069a 100644
63634--- a/fs/kernfs/symlink.c
63635+++ b/fs/kernfs/symlink.c
63636@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
63637 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
63638 void *cookie)
63639 {
63640- char *page = nd_get_link(nd);
63641+ const char *page = nd_get_link(nd);
63642 if (!IS_ERR(page))
63643 free_page((unsigned long)page);
63644 }
63645diff --git a/fs/libfs.c b/fs/libfs.c
63646index 005843c..06c4191 100644
63647--- a/fs/libfs.c
63648+++ b/fs/libfs.c
63649@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63650
63651 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
63652 struct dentry *next = list_entry(p, struct dentry, d_child);
63653+ char d_name[sizeof(next->d_iname)];
63654+ const unsigned char *name;
63655+
63656 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
63657 if (!simple_positive(next)) {
63658 spin_unlock(&next->d_lock);
63659@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63660
63661 spin_unlock(&next->d_lock);
63662 spin_unlock(&dentry->d_lock);
63663- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
63664+ name = next->d_name.name;
63665+ if (name == next->d_iname) {
63666+ memcpy(d_name, name, next->d_name.len);
63667+ name = d_name;
63668+ }
63669+ if (!dir_emit(ctx, name, next->d_name.len,
63670 next->d_inode->i_ino, dt_type(next->d_inode)))
63671 return 0;
63672 spin_lock(&dentry->d_lock);
63673@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
63674 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
63675 void *cookie)
63676 {
63677- char *s = nd_get_link(nd);
63678+ const char *s = nd_get_link(nd);
63679 if (!IS_ERR(s))
63680 kfree(s);
63681 }
63682diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
63683index acd3947..1f896e2 100644
63684--- a/fs/lockd/clntproc.c
63685+++ b/fs/lockd/clntproc.c
63686@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
63687 /*
63688 * Cookie counter for NLM requests
63689 */
63690-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
63691+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
63692
63693 void nlmclnt_next_cookie(struct nlm_cookie *c)
63694 {
63695- u32 cookie = atomic_inc_return(&nlm_cookie);
63696+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
63697
63698 memcpy(c->data, &cookie, 4);
63699 c->len=4;
63700diff --git a/fs/locks.c b/fs/locks.c
63701index 59e2f90..bd69071 100644
63702--- a/fs/locks.c
63703+++ b/fs/locks.c
63704@@ -2374,7 +2374,7 @@ void locks_remove_file(struct file *filp)
63705 locks_remove_posix(filp, filp);
63706
63707 if (filp->f_op->flock) {
63708- struct file_lock fl = {
63709+ struct file_lock flock = {
63710 .fl_owner = filp,
63711 .fl_pid = current->tgid,
63712 .fl_file = filp,
63713@@ -2382,9 +2382,9 @@ void locks_remove_file(struct file *filp)
63714 .fl_type = F_UNLCK,
63715 .fl_end = OFFSET_MAX,
63716 };
63717- filp->f_op->flock(filp, F_SETLKW, &fl);
63718- if (fl.fl_ops && fl.fl_ops->fl_release_private)
63719- fl.fl_ops->fl_release_private(&fl);
63720+ filp->f_op->flock(filp, F_SETLKW, &flock);
63721+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
63722+ flock.fl_ops->fl_release_private(&flock);
63723 }
63724
63725 spin_lock(&inode->i_lock);
63726diff --git a/fs/mount.h b/fs/mount.h
63727index 0ad6f76..a04c146 100644
63728--- a/fs/mount.h
63729+++ b/fs/mount.h
63730@@ -12,7 +12,7 @@ struct mnt_namespace {
63731 u64 seq; /* Sequence number to prevent loops */
63732 wait_queue_head_t poll;
63733 u64 event;
63734-};
63735+} __randomize_layout;
63736
63737 struct mnt_pcp {
63738 int mnt_count;
63739@@ -63,7 +63,7 @@ struct mount {
63740 int mnt_expiry_mark; /* true if marked for expiry */
63741 struct hlist_head mnt_pins;
63742 struct path mnt_ex_mountpoint;
63743-};
63744+} __randomize_layout;
63745
63746 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
63747
63748diff --git a/fs/namei.c b/fs/namei.c
63749index bc35b02..7ed1f1d 100644
63750--- a/fs/namei.c
63751+++ b/fs/namei.c
63752@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
63753 if (ret != -EACCES)
63754 return ret;
63755
63756+#ifdef CONFIG_GRKERNSEC
63757+ /* we'll block if we have to log due to a denied capability use */
63758+ if (mask & MAY_NOT_BLOCK)
63759+ return -ECHILD;
63760+#endif
63761+
63762 if (S_ISDIR(inode->i_mode)) {
63763 /* DACs are overridable for directories */
63764- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63765- return 0;
63766 if (!(mask & MAY_WRITE))
63767- if (capable_wrt_inode_uidgid(inode,
63768- CAP_DAC_READ_SEARCH))
63769+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63770+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63771 return 0;
63772+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63773+ return 0;
63774 return -EACCES;
63775 }
63776 /*
63777+ * Searching includes executable on directories, else just read.
63778+ */
63779+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63780+ if (mask == MAY_READ)
63781+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63782+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63783+ return 0;
63784+
63785+ /*
63786 * Read/write DACs are always overridable.
63787 * Executable DACs are overridable when there is
63788 * at least one exec bit set.
63789@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
63790 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63791 return 0;
63792
63793- /*
63794- * Searching includes executable on directories, else just read.
63795- */
63796- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63797- if (mask == MAY_READ)
63798- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63799- return 0;
63800-
63801 return -EACCES;
63802 }
63803 EXPORT_SYMBOL(generic_permission);
63804@@ -497,7 +504,7 @@ struct nameidata {
63805 int last_type;
63806 unsigned depth;
63807 struct file *base;
63808- char *saved_names[MAX_NESTED_LINKS + 1];
63809+ const char *saved_names[MAX_NESTED_LINKS + 1];
63810 };
63811
63812 /*
63813@@ -708,13 +715,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
63814 nd->flags |= LOOKUP_JUMPED;
63815 }
63816
63817-void nd_set_link(struct nameidata *nd, char *path)
63818+void nd_set_link(struct nameidata *nd, const char *path)
63819 {
63820 nd->saved_names[nd->depth] = path;
63821 }
63822 EXPORT_SYMBOL(nd_set_link);
63823
63824-char *nd_get_link(struct nameidata *nd)
63825+const char *nd_get_link(const struct nameidata *nd)
63826 {
63827 return nd->saved_names[nd->depth];
63828 }
63829@@ -849,7 +856,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63830 {
63831 struct dentry *dentry = link->dentry;
63832 int error;
63833- char *s;
63834+ const char *s;
63835
63836 BUG_ON(nd->flags & LOOKUP_RCU);
63837
63838@@ -870,6 +877,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63839 if (error)
63840 goto out_put_nd_path;
63841
63842+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
63843+ dentry->d_inode, dentry, nd->path.mnt)) {
63844+ error = -EACCES;
63845+ goto out_put_nd_path;
63846+ }
63847+
63848 nd->last_type = LAST_BIND;
63849 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
63850 error = PTR_ERR(*p);
63851@@ -1633,6 +1646,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
63852 if (res)
63853 break;
63854 res = walk_component(nd, path, LOOKUP_FOLLOW);
63855+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
63856+ res = -EACCES;
63857 put_link(nd, &link, cookie);
63858 } while (res > 0);
63859
63860@@ -1705,7 +1720,7 @@ EXPORT_SYMBOL(full_name_hash);
63861 static inline u64 hash_name(const char *name)
63862 {
63863 unsigned long a, b, adata, bdata, mask, hash, len;
63864- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63865+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63866
63867 hash = a = 0;
63868 len = -sizeof(unsigned long);
63869@@ -2000,6 +2015,8 @@ static int path_lookupat(int dfd, const char *name,
63870 if (err)
63871 break;
63872 err = lookup_last(nd, &path);
63873+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
63874+ err = -EACCES;
63875 put_link(nd, &link, cookie);
63876 }
63877 }
63878@@ -2007,6 +2024,13 @@ static int path_lookupat(int dfd, const char *name,
63879 if (!err)
63880 err = complete_walk(nd);
63881
63882+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
63883+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
63884+ path_put(&nd->path);
63885+ err = -ENOENT;
63886+ }
63887+ }
63888+
63889 if (!err && nd->flags & LOOKUP_DIRECTORY) {
63890 if (!d_can_lookup(nd->path.dentry)) {
63891 path_put(&nd->path);
63892@@ -2028,8 +2052,15 @@ static int filename_lookup(int dfd, struct filename *name,
63893 retval = path_lookupat(dfd, name->name,
63894 flags | LOOKUP_REVAL, nd);
63895
63896- if (likely(!retval))
63897+ if (likely(!retval)) {
63898 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
63899+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
63900+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
63901+ path_put(&nd->path);
63902+ return -ENOENT;
63903+ }
63904+ }
63905+ }
63906 return retval;
63907 }
63908
63909@@ -2595,6 +2626,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
63910 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
63911 return -EPERM;
63912
63913+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
63914+ return -EPERM;
63915+ if (gr_handle_rawio(inode))
63916+ return -EPERM;
63917+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
63918+ return -EACCES;
63919+
63920 return 0;
63921 }
63922
63923@@ -2826,7 +2864,7 @@ looked_up:
63924 * cleared otherwise prior to returning.
63925 */
63926 static int lookup_open(struct nameidata *nd, struct path *path,
63927- struct file *file,
63928+ struct path *link, struct file *file,
63929 const struct open_flags *op,
63930 bool got_write, int *opened)
63931 {
63932@@ -2861,6 +2899,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
63933 /* Negative dentry, just create the file */
63934 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
63935 umode_t mode = op->mode;
63936+
63937+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
63938+ error = -EACCES;
63939+ goto out_dput;
63940+ }
63941+
63942+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
63943+ error = -EACCES;
63944+ goto out_dput;
63945+ }
63946+
63947 if (!IS_POSIXACL(dir->d_inode))
63948 mode &= ~current_umask();
63949 /*
63950@@ -2882,6 +2931,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
63951 nd->flags & LOOKUP_EXCL);
63952 if (error)
63953 goto out_dput;
63954+ else
63955+ gr_handle_create(dentry, nd->path.mnt);
63956 }
63957 out_no_open:
63958 path->dentry = dentry;
63959@@ -2896,7 +2947,7 @@ out_dput:
63960 /*
63961 * Handle the last step of open()
63962 */
63963-static int do_last(struct nameidata *nd, struct path *path,
63964+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
63965 struct file *file, const struct open_flags *op,
63966 int *opened, struct filename *name)
63967 {
63968@@ -2946,6 +2997,15 @@ static int do_last(struct nameidata *nd, struct path *path,
63969 if (error)
63970 return error;
63971
63972+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
63973+ error = -ENOENT;
63974+ goto out;
63975+ }
63976+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
63977+ error = -EACCES;
63978+ goto out;
63979+ }
63980+
63981 audit_inode(name, dir, LOOKUP_PARENT);
63982 error = -EISDIR;
63983 /* trailing slashes? */
63984@@ -2965,7 +3025,7 @@ retry_lookup:
63985 */
63986 }
63987 mutex_lock(&dir->d_inode->i_mutex);
63988- error = lookup_open(nd, path, file, op, got_write, opened);
63989+ error = lookup_open(nd, path, link, file, op, got_write, opened);
63990 mutex_unlock(&dir->d_inode->i_mutex);
63991
63992 if (error <= 0) {
63993@@ -2989,11 +3049,28 @@ retry_lookup:
63994 goto finish_open_created;
63995 }
63996
63997+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
63998+ error = -ENOENT;
63999+ goto exit_dput;
64000+ }
64001+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64002+ error = -EACCES;
64003+ goto exit_dput;
64004+ }
64005+
64006 /*
64007 * create/update audit record if it already exists.
64008 */
64009- if (d_is_positive(path->dentry))
64010+ if (d_is_positive(path->dentry)) {
64011+ /* only check if O_CREAT is specified, all other checks need to go
64012+ into may_open */
64013+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64014+ error = -EACCES;
64015+ goto exit_dput;
64016+ }
64017+
64018 audit_inode(name, path->dentry, 0);
64019+ }
64020
64021 /*
64022 * If atomic_open() acquired write access it is dropped now due to
64023@@ -3034,6 +3111,11 @@ finish_lookup:
64024 }
64025 }
64026 BUG_ON(inode != path->dentry->d_inode);
64027+ /* if we're resolving a symlink to another symlink */
64028+ if (link && gr_handle_symlink_owner(link, inode)) {
64029+ error = -EACCES;
64030+ goto out;
64031+ }
64032 return 1;
64033 }
64034
64035@@ -3053,7 +3135,18 @@ finish_open:
64036 path_put(&save_parent);
64037 return error;
64038 }
64039+
64040+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64041+ error = -ENOENT;
64042+ goto out;
64043+ }
64044+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64045+ error = -EACCES;
64046+ goto out;
64047+ }
64048+
64049 audit_inode(name, nd->path.dentry, 0);
64050+
64051 error = -EISDIR;
64052 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64053 goto out;
64054@@ -3214,7 +3307,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64055 if (unlikely(error))
64056 goto out;
64057
64058- error = do_last(nd, &path, file, op, &opened, pathname);
64059+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64060 while (unlikely(error > 0)) { /* trailing symlink */
64061 struct path link = path;
64062 void *cookie;
64063@@ -3232,7 +3325,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64064 error = follow_link(&link, nd, &cookie);
64065 if (unlikely(error))
64066 break;
64067- error = do_last(nd, &path, file, op, &opened, pathname);
64068+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64069 put_link(nd, &link, cookie);
64070 }
64071 out:
64072@@ -3329,9 +3422,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
64073 goto unlock;
64074
64075 error = -EEXIST;
64076- if (d_is_positive(dentry))
64077+ if (d_is_positive(dentry)) {
64078+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64079+ error = -ENOENT;
64080 goto fail;
64081-
64082+ }
64083 /*
64084 * Special case - lookup gave negative, but... we had foo/bar/
64085 * From the vfs_mknod() POV we just have a negative dentry -
64086@@ -3383,6 +3478,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64087 }
64088 EXPORT_SYMBOL(user_path_create);
64089
64090+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64091+{
64092+ struct filename *tmp = getname(pathname);
64093+ struct dentry *res;
64094+ if (IS_ERR(tmp))
64095+ return ERR_CAST(tmp);
64096+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64097+ if (IS_ERR(res))
64098+ putname(tmp);
64099+ else
64100+ *to = tmp;
64101+ return res;
64102+}
64103+
64104 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64105 {
64106 int error = may_create(dir, dentry);
64107@@ -3446,6 +3555,17 @@ retry:
64108
64109 if (!IS_POSIXACL(path.dentry->d_inode))
64110 mode &= ~current_umask();
64111+
64112+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64113+ error = -EPERM;
64114+ goto out;
64115+ }
64116+
64117+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64118+ error = -EACCES;
64119+ goto out;
64120+ }
64121+
64122 error = security_path_mknod(&path, dentry, mode, dev);
64123 if (error)
64124 goto out;
64125@@ -3461,6 +3581,8 @@ retry:
64126 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64127 break;
64128 }
64129+ if (!error)
64130+ gr_handle_create(dentry, path.mnt);
64131 out:
64132 done_path_create(&path, dentry);
64133 if (retry_estale(error, lookup_flags)) {
64134@@ -3515,9 +3637,16 @@ retry:
64135
64136 if (!IS_POSIXACL(path.dentry->d_inode))
64137 mode &= ~current_umask();
64138+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64139+ error = -EACCES;
64140+ goto out;
64141+ }
64142 error = security_path_mkdir(&path, dentry, mode);
64143 if (!error)
64144 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64145+ if (!error)
64146+ gr_handle_create(dentry, path.mnt);
64147+out:
64148 done_path_create(&path, dentry);
64149 if (retry_estale(error, lookup_flags)) {
64150 lookup_flags |= LOOKUP_REVAL;
64151@@ -3601,6 +3730,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64152 struct filename *name;
64153 struct dentry *dentry;
64154 struct nameidata nd;
64155+ u64 saved_ino = 0;
64156+ dev_t saved_dev = 0;
64157 unsigned int lookup_flags = 0;
64158 retry:
64159 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64160@@ -3633,10 +3764,21 @@ retry:
64161 error = -ENOENT;
64162 goto exit3;
64163 }
64164+
64165+ saved_ino = gr_get_ino_from_dentry(dentry);
64166+ saved_dev = gr_get_dev_from_dentry(dentry);
64167+
64168+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64169+ error = -EACCES;
64170+ goto exit3;
64171+ }
64172+
64173 error = security_path_rmdir(&nd.path, dentry);
64174 if (error)
64175 goto exit3;
64176 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64177+ if (!error && (saved_dev || saved_ino))
64178+ gr_handle_delete(saved_ino, saved_dev);
64179 exit3:
64180 dput(dentry);
64181 exit2:
64182@@ -3729,6 +3871,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64183 struct nameidata nd;
64184 struct inode *inode = NULL;
64185 struct inode *delegated_inode = NULL;
64186+ u64 saved_ino = 0;
64187+ dev_t saved_dev = 0;
64188 unsigned int lookup_flags = 0;
64189 retry:
64190 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64191@@ -3755,10 +3899,22 @@ retry_deleg:
64192 if (d_is_negative(dentry))
64193 goto slashes;
64194 ihold(inode);
64195+
64196+ if (inode->i_nlink <= 1) {
64197+ saved_ino = gr_get_ino_from_dentry(dentry);
64198+ saved_dev = gr_get_dev_from_dentry(dentry);
64199+ }
64200+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64201+ error = -EACCES;
64202+ goto exit2;
64203+ }
64204+
64205 error = security_path_unlink(&nd.path, dentry);
64206 if (error)
64207 goto exit2;
64208 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64209+ if (!error && (saved_ino || saved_dev))
64210+ gr_handle_delete(saved_ino, saved_dev);
64211 exit2:
64212 dput(dentry);
64213 }
64214@@ -3847,9 +4003,17 @@ retry:
64215 if (IS_ERR(dentry))
64216 goto out_putname;
64217
64218+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64219+ error = -EACCES;
64220+ goto out;
64221+ }
64222+
64223 error = security_path_symlink(&path, dentry, from->name);
64224 if (!error)
64225 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64226+ if (!error)
64227+ gr_handle_create(dentry, path.mnt);
64228+out:
64229 done_path_create(&path, dentry);
64230 if (retry_estale(error, lookup_flags)) {
64231 lookup_flags |= LOOKUP_REVAL;
64232@@ -3953,6 +4117,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64233 struct dentry *new_dentry;
64234 struct path old_path, new_path;
64235 struct inode *delegated_inode = NULL;
64236+ struct filename *to = NULL;
64237 int how = 0;
64238 int error;
64239
64240@@ -3976,7 +4141,7 @@ retry:
64241 if (error)
64242 return error;
64243
64244- new_dentry = user_path_create(newdfd, newname, &new_path,
64245+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64246 (how & LOOKUP_REVAL));
64247 error = PTR_ERR(new_dentry);
64248 if (IS_ERR(new_dentry))
64249@@ -3988,11 +4153,28 @@ retry:
64250 error = may_linkat(&old_path);
64251 if (unlikely(error))
64252 goto out_dput;
64253+
64254+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64255+ old_path.dentry->d_inode,
64256+ old_path.dentry->d_inode->i_mode, to)) {
64257+ error = -EACCES;
64258+ goto out_dput;
64259+ }
64260+
64261+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64262+ old_path.dentry, old_path.mnt, to)) {
64263+ error = -EACCES;
64264+ goto out_dput;
64265+ }
64266+
64267 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64268 if (error)
64269 goto out_dput;
64270 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64271+ if (!error)
64272+ gr_handle_create(new_dentry, new_path.mnt);
64273 out_dput:
64274+ putname(to);
64275 done_path_create(&new_path, new_dentry);
64276 if (delegated_inode) {
64277 error = break_deleg_wait(&delegated_inode);
64278@@ -4308,6 +4490,20 @@ retry_deleg:
64279 if (new_dentry == trap)
64280 goto exit5;
64281
64282+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
64283+ /* use EXDEV error to cause 'mv' to switch to an alternative
64284+ * method for usability
64285+ */
64286+ error = -EXDEV;
64287+ goto exit5;
64288+ }
64289+
64290+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64291+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64292+ to, flags);
64293+ if (error)
64294+ goto exit5;
64295+
64296 error = security_path_rename(&oldnd.path, old_dentry,
64297 &newnd.path, new_dentry, flags);
64298 if (error)
64299@@ -4315,6 +4511,9 @@ retry_deleg:
64300 error = vfs_rename(old_dir->d_inode, old_dentry,
64301 new_dir->d_inode, new_dentry,
64302 &delegated_inode, flags);
64303+ if (!error)
64304+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64305+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64306 exit5:
64307 dput(new_dentry);
64308 exit4:
64309@@ -4371,14 +4570,24 @@ EXPORT_SYMBOL(vfs_whiteout);
64310
64311 int readlink_copy(char __user *buffer, int buflen, const char *link)
64312 {
64313+ char tmpbuf[64];
64314+ const char *newlink;
64315 int len = PTR_ERR(link);
64316+
64317 if (IS_ERR(link))
64318 goto out;
64319
64320 len = strlen(link);
64321 if (len > (unsigned) buflen)
64322 len = buflen;
64323- if (copy_to_user(buffer, link, len))
64324+
64325+ if (len < sizeof(tmpbuf)) {
64326+ memcpy(tmpbuf, link, len);
64327+ newlink = tmpbuf;
64328+ } else
64329+ newlink = link;
64330+
64331+ if (copy_to_user(buffer, newlink, len))
64332 len = -EFAULT;
64333 out:
64334 return len;
64335diff --git a/fs/namespace.c b/fs/namespace.c
64336index cd1e968..e64ff16 100644
64337--- a/fs/namespace.c
64338+++ b/fs/namespace.c
64339@@ -1448,6 +1448,9 @@ static int do_umount(struct mount *mnt, int flags)
64340 if (!(sb->s_flags & MS_RDONLY))
64341 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64342 up_write(&sb->s_umount);
64343+
64344+ gr_log_remount(mnt->mnt_devname, retval);
64345+
64346 return retval;
64347 }
64348
64349@@ -1470,6 +1473,9 @@ static int do_umount(struct mount *mnt, int flags)
64350 }
64351 unlock_mount_hash();
64352 namespace_unlock();
64353+
64354+ gr_log_unmount(mnt->mnt_devname, retval);
64355+
64356 return retval;
64357 }
64358
64359@@ -1520,7 +1526,7 @@ static inline bool may_mount(void)
64360 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64361 */
64362
64363-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64364+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64365 {
64366 struct path path;
64367 struct mount *mnt;
64368@@ -1565,7 +1571,7 @@ out:
64369 /*
64370 * The 2.0 compatible umount. No flags.
64371 */
64372-SYSCALL_DEFINE1(oldumount, char __user *, name)
64373+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64374 {
64375 return sys_umount(name, 0);
64376 }
64377@@ -2631,6 +2637,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64378 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64379 MS_STRICTATIME);
64380
64381+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
64382+ retval = -EPERM;
64383+ goto dput_out;
64384+ }
64385+
64386+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
64387+ retval = -EPERM;
64388+ goto dput_out;
64389+ }
64390+
64391 if (flags & MS_REMOUNT)
64392 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
64393 data_page);
64394@@ -2644,7 +2660,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64395 retval = do_new_mount(&path, type_page, flags, mnt_flags,
64396 dev_name, data_page);
64397 dput_out:
64398+ gr_log_mount(dev_name, &path, retval);
64399+
64400 path_put(&path);
64401+
64402 return retval;
64403 }
64404
64405@@ -2662,7 +2681,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
64406 * number incrementing at 10Ghz will take 12,427 years to wrap which
64407 * is effectively never, so we can ignore the possibility.
64408 */
64409-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
64410+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
64411
64412 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64413 {
64414@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64415 return ERR_PTR(ret);
64416 }
64417 new_ns->ns.ops = &mntns_operations;
64418- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
64419+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
64420 atomic_set(&new_ns->count, 1);
64421 new_ns->root = NULL;
64422 INIT_LIST_HEAD(&new_ns->list);
64423@@ -2688,7 +2707,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64424 return new_ns;
64425 }
64426
64427-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64428+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64429 struct user_namespace *user_ns, struct fs_struct *new_fs)
64430 {
64431 struct mnt_namespace *new_ns;
64432@@ -2809,8 +2828,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
64433 }
64434 EXPORT_SYMBOL(mount_subtree);
64435
64436-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
64437- char __user *, type, unsigned long, flags, void __user *, data)
64438+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
64439+ const char __user *, type, unsigned long, flags, void __user *, data)
64440 {
64441 int ret;
64442 char *kernel_type;
64443@@ -2916,6 +2935,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
64444 if (error)
64445 goto out2;
64446
64447+ if (gr_handle_chroot_pivot()) {
64448+ error = -EPERM;
64449+ goto out2;
64450+ }
64451+
64452 get_fs_root(current->fs, &root);
64453 old_mp = lock_mount(&old);
64454 error = PTR_ERR(old_mp);
64455@@ -3190,7 +3214,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
64456 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
64457 return -EPERM;
64458
64459- if (fs->users != 1)
64460+ if (atomic_read(&fs->users) != 1)
64461 return -EINVAL;
64462
64463 get_mnt_ns(mnt_ns);
64464diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
64465index 02f8d09..a5c25d1 100644
64466--- a/fs/nfs/callback_xdr.c
64467+++ b/fs/nfs/callback_xdr.c
64468@@ -51,7 +51,7 @@ struct callback_op {
64469 callback_decode_arg_t decode_args;
64470 callback_encode_res_t encode_res;
64471 long res_maxsize;
64472-};
64473+} __do_const;
64474
64475 static struct callback_op callback_ops[];
64476
64477diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
64478index 2211f6b..30d0950 100644
64479--- a/fs/nfs/inode.c
64480+++ b/fs/nfs/inode.c
64481@@ -1234,16 +1234,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
64482 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
64483 }
64484
64485-static atomic_long_t nfs_attr_generation_counter;
64486+static atomic_long_unchecked_t nfs_attr_generation_counter;
64487
64488 static unsigned long nfs_read_attr_generation_counter(void)
64489 {
64490- return atomic_long_read(&nfs_attr_generation_counter);
64491+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
64492 }
64493
64494 unsigned long nfs_inc_attr_generation_counter(void)
64495 {
64496- return atomic_long_inc_return(&nfs_attr_generation_counter);
64497+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
64498 }
64499
64500 void nfs_fattr_init(struct nfs_fattr *fattr)
64501diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
64502index ac71d13..a2e590a 100644
64503--- a/fs/nfsd/nfs4proc.c
64504+++ b/fs/nfsd/nfs4proc.c
64505@@ -1237,7 +1237,7 @@ struct nfsd4_operation {
64506 nfsd4op_rsize op_rsize_bop;
64507 stateid_getter op_get_currentstateid;
64508 stateid_setter op_set_currentstateid;
64509-};
64510+} __do_const;
64511
64512 static struct nfsd4_operation nfsd4_ops[];
64513
64514diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
64515index 15f7b73..00e230b 100644
64516--- a/fs/nfsd/nfs4xdr.c
64517+++ b/fs/nfsd/nfs4xdr.c
64518@@ -1560,7 +1560,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
64519
64520 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
64521
64522-static nfsd4_dec nfsd4_dec_ops[] = {
64523+static const nfsd4_dec nfsd4_dec_ops[] = {
64524 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
64525 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
64526 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
64527diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
64528index 83a9694..6b7f928 100644
64529--- a/fs/nfsd/nfscache.c
64530+++ b/fs/nfsd/nfscache.c
64531@@ -537,7 +537,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64532 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
64533 u32 hash;
64534 struct nfsd_drc_bucket *b;
64535- int len;
64536+ long len;
64537 size_t bufsize = 0;
64538
64539 if (!rp)
64540@@ -546,11 +546,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64541 hash = nfsd_cache_hash(rp->c_xid);
64542 b = &drc_hashtbl[hash];
64543
64544- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
64545- len >>= 2;
64546+ if (statp) {
64547+ len = (char*)statp - (char*)resv->iov_base;
64548+ len = resv->iov_len - len;
64549+ len >>= 2;
64550+ }
64551
64552 /* Don't cache excessive amounts of data and XDR failures */
64553- if (!statp || len > (256 >> 2)) {
64554+ if (!statp || len > (256 >> 2) || len < 0) {
64555 nfsd_reply_cache_free(b, rp);
64556 return;
64557 }
64558@@ -558,7 +561,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64559 switch (cachetype) {
64560 case RC_REPLSTAT:
64561 if (len != 1)
64562- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
64563+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
64564 rp->c_replstat = *statp;
64565 break;
64566 case RC_REPLBUFF:
64567diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
64568index 5685c67..73029ef 100644
64569--- a/fs/nfsd/vfs.c
64570+++ b/fs/nfsd/vfs.c
64571@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
64572
64573 oldfs = get_fs();
64574 set_fs(KERNEL_DS);
64575- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
64576+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
64577 set_fs(oldfs);
64578 return nfsd_finish_read(file, count, host_err);
64579 }
64580@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
64581
64582 /* Write the data. */
64583 oldfs = get_fs(); set_fs(KERNEL_DS);
64584- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
64585+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
64586 set_fs(oldfs);
64587 if (host_err < 0)
64588 goto out_nfserr;
64589@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
64590 */
64591
64592 oldfs = get_fs(); set_fs(KERNEL_DS);
64593- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
64594+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
64595 set_fs(oldfs);
64596
64597 if (host_err < 0)
64598diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
64599index 52ccd34..7a6b202 100644
64600--- a/fs/nls/nls_base.c
64601+++ b/fs/nls/nls_base.c
64602@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
64603
64604 int __register_nls(struct nls_table *nls, struct module *owner)
64605 {
64606- struct nls_table ** tmp = &tables;
64607+ struct nls_table *tmp = tables;
64608
64609 if (nls->next)
64610 return -EBUSY;
64611
64612- nls->owner = owner;
64613+ pax_open_kernel();
64614+ *(void **)&nls->owner = owner;
64615+ pax_close_kernel();
64616 spin_lock(&nls_lock);
64617- while (*tmp) {
64618- if (nls == *tmp) {
64619+ while (tmp) {
64620+ if (nls == tmp) {
64621 spin_unlock(&nls_lock);
64622 return -EBUSY;
64623 }
64624- tmp = &(*tmp)->next;
64625+ tmp = tmp->next;
64626 }
64627- nls->next = tables;
64628+ pax_open_kernel();
64629+ *(struct nls_table **)&nls->next = tables;
64630+ pax_close_kernel();
64631 tables = nls;
64632 spin_unlock(&nls_lock);
64633 return 0;
64634@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
64635
64636 int unregister_nls(struct nls_table * nls)
64637 {
64638- struct nls_table ** tmp = &tables;
64639+ struct nls_table * const * tmp = &tables;
64640
64641 spin_lock(&nls_lock);
64642 while (*tmp) {
64643 if (nls == *tmp) {
64644- *tmp = nls->next;
64645+ pax_open_kernel();
64646+ *(struct nls_table **)tmp = nls->next;
64647+ pax_close_kernel();
64648 spin_unlock(&nls_lock);
64649 return 0;
64650 }
64651@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
64652 return -EINVAL;
64653 }
64654
64655-static struct nls_table *find_nls(char *charset)
64656+static struct nls_table *find_nls(const char *charset)
64657 {
64658 struct nls_table *nls;
64659 spin_lock(&nls_lock);
64660@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
64661 return nls;
64662 }
64663
64664-struct nls_table *load_nls(char *charset)
64665+struct nls_table *load_nls(const char *charset)
64666 {
64667 return try_then_request_module(find_nls(charset), "nls_%s", charset);
64668 }
64669diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
64670index 162b3f1..6076a7c 100644
64671--- a/fs/nls/nls_euc-jp.c
64672+++ b/fs/nls/nls_euc-jp.c
64673@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
64674 p_nls = load_nls("cp932");
64675
64676 if (p_nls) {
64677- table.charset2upper = p_nls->charset2upper;
64678- table.charset2lower = p_nls->charset2lower;
64679+ pax_open_kernel();
64680+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64681+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64682+ pax_close_kernel();
64683 return register_nls(&table);
64684 }
64685
64686diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
64687index a80a741..7b96e1b 100644
64688--- a/fs/nls/nls_koi8-ru.c
64689+++ b/fs/nls/nls_koi8-ru.c
64690@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
64691 p_nls = load_nls("koi8-u");
64692
64693 if (p_nls) {
64694- table.charset2upper = p_nls->charset2upper;
64695- table.charset2lower = p_nls->charset2lower;
64696+ pax_open_kernel();
64697+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64698+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64699+ pax_close_kernel();
64700 return register_nls(&table);
64701 }
64702
64703diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
64704index bff8567..83281c6 100644
64705--- a/fs/notify/fanotify/fanotify_user.c
64706+++ b/fs/notify/fanotify/fanotify_user.c
64707@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
64708
64709 fd = fanotify_event_metadata.fd;
64710 ret = -EFAULT;
64711- if (copy_to_user(buf, &fanotify_event_metadata,
64712- fanotify_event_metadata.event_len))
64713+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
64714+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
64715 goto out_close_fd;
64716
64717 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
64718diff --git a/fs/notify/notification.c b/fs/notify/notification.c
64719index a95d8e0..a91a5fd 100644
64720--- a/fs/notify/notification.c
64721+++ b/fs/notify/notification.c
64722@@ -48,7 +48,7 @@
64723 #include <linux/fsnotify_backend.h>
64724 #include "fsnotify.h"
64725
64726-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64727+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64728
64729 /**
64730 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
64731@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64732 */
64733 u32 fsnotify_get_cookie(void)
64734 {
64735- return atomic_inc_return(&fsnotify_sync_cookie);
64736+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
64737 }
64738 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
64739
64740diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
64741index 9e38daf..5727cae 100644
64742--- a/fs/ntfs/dir.c
64743+++ b/fs/ntfs/dir.c
64744@@ -1310,7 +1310,7 @@ find_next_index_buffer:
64745 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
64746 ~(s64)(ndir->itype.index.block_size - 1)));
64747 /* Bounds checks. */
64748- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64749+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64750 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
64751 "inode 0x%lx or driver bug.", vdir->i_ino);
64752 goto err_out;
64753diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
64754index 643faa4..ef9027e 100644
64755--- a/fs/ntfs/file.c
64756+++ b/fs/ntfs/file.c
64757@@ -1280,7 +1280,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
64758 char *addr;
64759 size_t total = 0;
64760 unsigned len;
64761- int left;
64762+ unsigned left;
64763
64764 do {
64765 len = PAGE_CACHE_SIZE - ofs;
64766diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
64767index 9e1e112..241a52a 100644
64768--- a/fs/ntfs/super.c
64769+++ b/fs/ntfs/super.c
64770@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64771 if (!silent)
64772 ntfs_error(sb, "Primary boot sector is invalid.");
64773 } else if (!silent)
64774- ntfs_error(sb, read_err_str, "primary");
64775+ ntfs_error(sb, read_err_str, "%s", "primary");
64776 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
64777 if (bh_primary)
64778 brelse(bh_primary);
64779@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64780 goto hotfix_primary_boot_sector;
64781 brelse(bh_backup);
64782 } else if (!silent)
64783- ntfs_error(sb, read_err_str, "backup");
64784+ ntfs_error(sb, read_err_str, "%s", "backup");
64785 /* Try to read NT3.51- backup boot sector. */
64786 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
64787 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
64788@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64789 "sector.");
64790 brelse(bh_backup);
64791 } else if (!silent)
64792- ntfs_error(sb, read_err_str, "backup");
64793+ ntfs_error(sb, read_err_str, "%s", "backup");
64794 /* We failed. Cleanup and return. */
64795 if (bh_primary)
64796 brelse(bh_primary);
64797diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
64798index 0440134..d52c93a 100644
64799--- a/fs/ocfs2/localalloc.c
64800+++ b/fs/ocfs2/localalloc.c
64801@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
64802 goto bail;
64803 }
64804
64805- atomic_inc(&osb->alloc_stats.moves);
64806+ atomic_inc_unchecked(&osb->alloc_stats.moves);
64807
64808 bail:
64809 if (handle)
64810diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
64811index 7d6b7d0..5fb529a 100644
64812--- a/fs/ocfs2/ocfs2.h
64813+++ b/fs/ocfs2/ocfs2.h
64814@@ -242,11 +242,11 @@ enum ocfs2_vol_state
64815
64816 struct ocfs2_alloc_stats
64817 {
64818- atomic_t moves;
64819- atomic_t local_data;
64820- atomic_t bitmap_data;
64821- atomic_t bg_allocs;
64822- atomic_t bg_extends;
64823+ atomic_unchecked_t moves;
64824+ atomic_unchecked_t local_data;
64825+ atomic_unchecked_t bitmap_data;
64826+ atomic_unchecked_t bg_allocs;
64827+ atomic_unchecked_t bg_extends;
64828 };
64829
64830 enum ocfs2_local_alloc_state
64831diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
64832index 0cb889a..6a26b24 100644
64833--- a/fs/ocfs2/suballoc.c
64834+++ b/fs/ocfs2/suballoc.c
64835@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
64836 mlog_errno(status);
64837 goto bail;
64838 }
64839- atomic_inc(&osb->alloc_stats.bg_extends);
64840+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
64841
64842 /* You should never ask for this much metadata */
64843 BUG_ON(bits_wanted >
64844@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
64845 mlog_errno(status);
64846 goto bail;
64847 }
64848- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64849+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64850
64851 *suballoc_loc = res.sr_bg_blkno;
64852 *suballoc_bit_start = res.sr_bit_offset;
64853@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
64854 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
64855 res->sr_bits);
64856
64857- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64858+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64859
64860 BUG_ON(res->sr_bits != 1);
64861
64862@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
64863 mlog_errno(status);
64864 goto bail;
64865 }
64866- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64867+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64868
64869 BUG_ON(res.sr_bits != 1);
64870
64871@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
64872 cluster_start,
64873 num_clusters);
64874 if (!status)
64875- atomic_inc(&osb->alloc_stats.local_data);
64876+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
64877 } else {
64878 if (min_clusters > (osb->bitmap_cpg - 1)) {
64879 /* The only paths asking for contiguousness
64880@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
64881 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
64882 res.sr_bg_blkno,
64883 res.sr_bit_offset);
64884- atomic_inc(&osb->alloc_stats.bitmap_data);
64885+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
64886 *num_clusters = res.sr_bits;
64887 }
64888 }
64889diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
64890index 8372317..ec86e79 100644
64891--- a/fs/ocfs2/super.c
64892+++ b/fs/ocfs2/super.c
64893@@ -306,11 +306,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
64894 "%10s => GlobalAllocs: %d LocalAllocs: %d "
64895 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
64896 "Stats",
64897- atomic_read(&osb->alloc_stats.bitmap_data),
64898- atomic_read(&osb->alloc_stats.local_data),
64899- atomic_read(&osb->alloc_stats.bg_allocs),
64900- atomic_read(&osb->alloc_stats.moves),
64901- atomic_read(&osb->alloc_stats.bg_extends));
64902+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
64903+ atomic_read_unchecked(&osb->alloc_stats.local_data),
64904+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
64905+ atomic_read_unchecked(&osb->alloc_stats.moves),
64906+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
64907
64908 out += snprintf(buf + out, len - out,
64909 "%10s => State: %u Descriptor: %llu Size: %u bits "
64910@@ -2113,11 +2113,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
64911
64912 mutex_init(&osb->system_file_mutex);
64913
64914- atomic_set(&osb->alloc_stats.moves, 0);
64915- atomic_set(&osb->alloc_stats.local_data, 0);
64916- atomic_set(&osb->alloc_stats.bitmap_data, 0);
64917- atomic_set(&osb->alloc_stats.bg_allocs, 0);
64918- atomic_set(&osb->alloc_stats.bg_extends, 0);
64919+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
64920+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
64921+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
64922+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
64923+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
64924
64925 /* Copy the blockcheck stats from the superblock probe */
64926 osb->osb_ecc_stats = *stats;
64927diff --git a/fs/open.c b/fs/open.c
64928index 813be03..781941d 100644
64929--- a/fs/open.c
64930+++ b/fs/open.c
64931@@ -32,6 +32,8 @@
64932 #include <linux/dnotify.h>
64933 #include <linux/compat.h>
64934
64935+#define CREATE_TRACE_POINTS
64936+#include <trace/events/fs.h>
64937 #include "internal.h"
64938
64939 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
64940@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
64941 error = locks_verify_truncate(inode, NULL, length);
64942 if (!error)
64943 error = security_path_truncate(path);
64944+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
64945+ error = -EACCES;
64946 if (!error)
64947 error = do_truncate(path->dentry, length, 0, NULL);
64948
64949@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
64950 error = locks_verify_truncate(inode, f.file, length);
64951 if (!error)
64952 error = security_path_truncate(&f.file->f_path);
64953+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
64954+ error = -EACCES;
64955 if (!error)
64956 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
64957 sb_end_write(inode->i_sb);
64958@@ -392,6 +398,9 @@ retry:
64959 if (__mnt_is_readonly(path.mnt))
64960 res = -EROFS;
64961
64962+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
64963+ res = -EACCES;
64964+
64965 out_path_release:
64966 path_put(&path);
64967 if (retry_estale(res, lookup_flags)) {
64968@@ -423,6 +432,8 @@ retry:
64969 if (error)
64970 goto dput_and_out;
64971
64972+ gr_log_chdir(path.dentry, path.mnt);
64973+
64974 set_fs_pwd(current->fs, &path);
64975
64976 dput_and_out:
64977@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
64978 goto out_putf;
64979
64980 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
64981+
64982+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
64983+ error = -EPERM;
64984+
64985+ if (!error)
64986+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
64987+
64988 if (!error)
64989 set_fs_pwd(current->fs, &f.file->f_path);
64990 out_putf:
64991@@ -481,7 +499,13 @@ retry:
64992 if (error)
64993 goto dput_and_out;
64994
64995+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
64996+ goto dput_and_out;
64997+
64998 set_fs_root(current->fs, &path);
64999+
65000+ gr_handle_chroot_chdir(&path);
65001+
65002 error = 0;
65003 dput_and_out:
65004 path_put(&path);
65005@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
65006 return error;
65007 retry_deleg:
65008 mutex_lock(&inode->i_mutex);
65009+
65010+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65011+ error = -EACCES;
65012+ goto out_unlock;
65013+ }
65014+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65015+ error = -EACCES;
65016+ goto out_unlock;
65017+ }
65018+
65019 error = security_path_chmod(path, mode);
65020 if (error)
65021 goto out_unlock;
65022@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65023 uid = make_kuid(current_user_ns(), user);
65024 gid = make_kgid(current_user_ns(), group);
65025
65026+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65027+ return -EACCES;
65028+
65029 newattrs.ia_valid = ATTR_CTIME;
65030 if (user != (uid_t) -1) {
65031 if (!uid_valid(uid))
65032@@ -1014,6 +1051,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65033 } else {
65034 fsnotify_open(f);
65035 fd_install(fd, f);
65036+ trace_do_sys_open(tmp->name, flags, mode);
65037 }
65038 }
65039 putname(tmp);
65040diff --git a/fs/pipe.c b/fs/pipe.c
65041index 21981e5..3d5f55c 100644
65042--- a/fs/pipe.c
65043+++ b/fs/pipe.c
65044@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65045
65046 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65047 {
65048- if (pipe->files)
65049+ if (atomic_read(&pipe->files))
65050 mutex_lock_nested(&pipe->mutex, subclass);
65051 }
65052
65053@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65054
65055 void pipe_unlock(struct pipe_inode_info *pipe)
65056 {
65057- if (pipe->files)
65058+ if (atomic_read(&pipe->files))
65059 mutex_unlock(&pipe->mutex);
65060 }
65061 EXPORT_SYMBOL(pipe_unlock);
65062@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65063 }
65064 if (bufs) /* More to do? */
65065 continue;
65066- if (!pipe->writers)
65067+ if (!atomic_read(&pipe->writers))
65068 break;
65069- if (!pipe->waiting_writers) {
65070+ if (!atomic_read(&pipe->waiting_writers)) {
65071 /* syscall merging: Usually we must not sleep
65072 * if O_NONBLOCK is set, or if we got some data.
65073 * But if a writer sleeps in kernel space, then
65074@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65075
65076 __pipe_lock(pipe);
65077
65078- if (!pipe->readers) {
65079+ if (!atomic_read(&pipe->readers)) {
65080 send_sig(SIGPIPE, current, 0);
65081 ret = -EPIPE;
65082 goto out;
65083@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65084 for (;;) {
65085 int bufs;
65086
65087- if (!pipe->readers) {
65088+ if (!atomic_read(&pipe->readers)) {
65089 send_sig(SIGPIPE, current, 0);
65090 if (!ret)
65091 ret = -EPIPE;
65092@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65093 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65094 do_wakeup = 0;
65095 }
65096- pipe->waiting_writers++;
65097+ atomic_inc(&pipe->waiting_writers);
65098 pipe_wait(pipe);
65099- pipe->waiting_writers--;
65100+ atomic_dec(&pipe->waiting_writers);
65101 }
65102 out:
65103 __pipe_unlock(pipe);
65104@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65105 mask = 0;
65106 if (filp->f_mode & FMODE_READ) {
65107 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65108- if (!pipe->writers && filp->f_version != pipe->w_counter)
65109+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65110 mask |= POLLHUP;
65111 }
65112
65113@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65114 * Most Unices do not set POLLERR for FIFOs but on Linux they
65115 * behave exactly like pipes for poll().
65116 */
65117- if (!pipe->readers)
65118+ if (!atomic_read(&pipe->readers))
65119 mask |= POLLERR;
65120 }
65121
65122@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65123 int kill = 0;
65124
65125 spin_lock(&inode->i_lock);
65126- if (!--pipe->files) {
65127+ if (atomic_dec_and_test(&pipe->files)) {
65128 inode->i_pipe = NULL;
65129 kill = 1;
65130 }
65131@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65132
65133 __pipe_lock(pipe);
65134 if (file->f_mode & FMODE_READ)
65135- pipe->readers--;
65136+ atomic_dec(&pipe->readers);
65137 if (file->f_mode & FMODE_WRITE)
65138- pipe->writers--;
65139+ atomic_dec(&pipe->writers);
65140
65141- if (pipe->readers || pipe->writers) {
65142+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65143 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65144 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65145 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65146@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65147 kfree(pipe);
65148 }
65149
65150-static struct vfsmount *pipe_mnt __read_mostly;
65151+struct vfsmount *pipe_mnt __read_mostly;
65152
65153 /*
65154 * pipefs_dname() is called from d_path().
65155@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65156 goto fail_iput;
65157
65158 inode->i_pipe = pipe;
65159- pipe->files = 2;
65160- pipe->readers = pipe->writers = 1;
65161+ atomic_set(&pipe->files, 2);
65162+ atomic_set(&pipe->readers, 1);
65163+ atomic_set(&pipe->writers, 1);
65164 inode->i_fop = &pipefifo_fops;
65165
65166 /*
65167@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65168 spin_lock(&inode->i_lock);
65169 if (inode->i_pipe) {
65170 pipe = inode->i_pipe;
65171- pipe->files++;
65172+ atomic_inc(&pipe->files);
65173 spin_unlock(&inode->i_lock);
65174 } else {
65175 spin_unlock(&inode->i_lock);
65176 pipe = alloc_pipe_info();
65177 if (!pipe)
65178 return -ENOMEM;
65179- pipe->files = 1;
65180+ atomic_set(&pipe->files, 1);
65181 spin_lock(&inode->i_lock);
65182 if (unlikely(inode->i_pipe)) {
65183- inode->i_pipe->files++;
65184+ atomic_inc(&inode->i_pipe->files);
65185 spin_unlock(&inode->i_lock);
65186 free_pipe_info(pipe);
65187 pipe = inode->i_pipe;
65188@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65189 * opened, even when there is no process writing the FIFO.
65190 */
65191 pipe->r_counter++;
65192- if (pipe->readers++ == 0)
65193+ if (atomic_inc_return(&pipe->readers) == 1)
65194 wake_up_partner(pipe);
65195
65196- if (!is_pipe && !pipe->writers) {
65197+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65198 if ((filp->f_flags & O_NONBLOCK)) {
65199 /* suppress POLLHUP until we have
65200 * seen a writer */
65201@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65202 * errno=ENXIO when there is no process reading the FIFO.
65203 */
65204 ret = -ENXIO;
65205- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65206+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65207 goto err;
65208
65209 pipe->w_counter++;
65210- if (!pipe->writers++)
65211+ if (atomic_inc_return(&pipe->writers) == 1)
65212 wake_up_partner(pipe);
65213
65214- if (!is_pipe && !pipe->readers) {
65215+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65216 if (wait_for_partner(pipe, &pipe->r_counter))
65217 goto err_wr;
65218 }
65219@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65220 * the process can at least talk to itself.
65221 */
65222
65223- pipe->readers++;
65224- pipe->writers++;
65225+ atomic_inc(&pipe->readers);
65226+ atomic_inc(&pipe->writers);
65227 pipe->r_counter++;
65228 pipe->w_counter++;
65229- if (pipe->readers == 1 || pipe->writers == 1)
65230+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65231 wake_up_partner(pipe);
65232 break;
65233
65234@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65235 return 0;
65236
65237 err_rd:
65238- if (!--pipe->readers)
65239+ if (atomic_dec_and_test(&pipe->readers))
65240 wake_up_interruptible(&pipe->wait);
65241 ret = -ERESTARTSYS;
65242 goto err;
65243
65244 err_wr:
65245- if (!--pipe->writers)
65246+ if (atomic_dec_and_test(&pipe->writers))
65247 wake_up_interruptible(&pipe->wait);
65248 ret = -ERESTARTSYS;
65249 goto err;
65250diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65251index 0855f77..6787d50 100644
65252--- a/fs/posix_acl.c
65253+++ b/fs/posix_acl.c
65254@@ -20,6 +20,7 @@
65255 #include <linux/xattr.h>
65256 #include <linux/export.h>
65257 #include <linux/user_namespace.h>
65258+#include <linux/grsecurity.h>
65259
65260 struct posix_acl **acl_by_type(struct inode *inode, int type)
65261 {
65262@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65263 }
65264 }
65265 if (mode_p)
65266- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65267+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65268 return not_equiv;
65269 }
65270 EXPORT_SYMBOL(posix_acl_equiv_mode);
65271@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65272 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65273 }
65274
65275- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65276+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65277 return not_equiv;
65278 }
65279
65280@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65281 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65282 int err = -ENOMEM;
65283 if (clone) {
65284+ *mode_p &= ~gr_acl_umask();
65285+
65286 err = posix_acl_create_masq(clone, mode_p);
65287 if (err < 0) {
65288 posix_acl_release(clone);
65289@@ -659,11 +662,12 @@ struct posix_acl *
65290 posix_acl_from_xattr(struct user_namespace *user_ns,
65291 const void *value, size_t size)
65292 {
65293- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65294- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65295+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65296+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65297 int count;
65298 struct posix_acl *acl;
65299 struct posix_acl_entry *acl_e;
65300+ umode_t umask = gr_acl_umask();
65301
65302 if (!value)
65303 return NULL;
65304@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65305
65306 switch(acl_e->e_tag) {
65307 case ACL_USER_OBJ:
65308+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65309+ break;
65310 case ACL_GROUP_OBJ:
65311 case ACL_MASK:
65312+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65313+ break;
65314 case ACL_OTHER:
65315+ acl_e->e_perm &= ~(umask & S_IRWXO);
65316 break;
65317
65318 case ACL_USER:
65319+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65320 acl_e->e_uid =
65321 make_kuid(user_ns,
65322 le32_to_cpu(entry->e_id));
65323@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65324 goto fail;
65325 break;
65326 case ACL_GROUP:
65327+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65328 acl_e->e_gid =
65329 make_kgid(user_ns,
65330 le32_to_cpu(entry->e_id));
65331diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65332index 2183fcf..3c32a98 100644
65333--- a/fs/proc/Kconfig
65334+++ b/fs/proc/Kconfig
65335@@ -30,7 +30,7 @@ config PROC_FS
65336
65337 config PROC_KCORE
65338 bool "/proc/kcore support" if !ARM
65339- depends on PROC_FS && MMU
65340+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65341 help
65342 Provides a virtual ELF core file of the live kernel. This can
65343 be read with gdb and other ELF tools. No modifications can be
65344@@ -38,8 +38,8 @@ config PROC_KCORE
65345
65346 config PROC_VMCORE
65347 bool "/proc/vmcore support"
65348- depends on PROC_FS && CRASH_DUMP
65349- default y
65350+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
65351+ default n
65352 help
65353 Exports the dump image of crashed kernel in ELF format.
65354
65355@@ -63,8 +63,8 @@ config PROC_SYSCTL
65356 limited in memory.
65357
65358 config PROC_PAGE_MONITOR
65359- default y
65360- depends on PROC_FS && MMU
65361+ default n
65362+ depends on PROC_FS && MMU && !GRKERNSEC
65363 bool "Enable /proc page monitoring" if EXPERT
65364 help
65365 Various /proc files exist to monitor process memory utilization:
65366diff --git a/fs/proc/array.c b/fs/proc/array.c
65367index bd117d0..e6872d7 100644
65368--- a/fs/proc/array.c
65369+++ b/fs/proc/array.c
65370@@ -60,6 +60,7 @@
65371 #include <linux/tty.h>
65372 #include <linux/string.h>
65373 #include <linux/mman.h>
65374+#include <linux/grsecurity.h>
65375 #include <linux/proc_fs.h>
65376 #include <linux/ioport.h>
65377 #include <linux/uaccess.h>
65378@@ -344,6 +345,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
65379 seq_putc(m, '\n');
65380 }
65381
65382+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65383+static inline void task_pax(struct seq_file *m, struct task_struct *p)
65384+{
65385+ if (p->mm)
65386+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
65387+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
65388+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
65389+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
65390+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
65391+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
65392+ else
65393+ seq_printf(m, "PaX:\t-----\n");
65394+}
65395+#endif
65396+
65397 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65398 struct pid *pid, struct task_struct *task)
65399 {
65400@@ -362,9 +378,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65401 task_cpus_allowed(m, task);
65402 cpuset_task_status_allowed(m, task);
65403 task_context_switch_counts(m, task);
65404+
65405+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65406+ task_pax(m, task);
65407+#endif
65408+
65409+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
65410+ task_grsec_rbac(m, task);
65411+#endif
65412+
65413 return 0;
65414 }
65415
65416+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65417+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65418+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65419+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65420+#endif
65421+
65422 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65423 struct pid *pid, struct task_struct *task, int whole)
65424 {
65425@@ -386,6 +417,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65426 char tcomm[sizeof(task->comm)];
65427 unsigned long flags;
65428
65429+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65430+ if (current->exec_id != m->exec_id) {
65431+ gr_log_badprocpid("stat");
65432+ return 0;
65433+ }
65434+#endif
65435+
65436 state = *get_task_state(task);
65437 vsize = eip = esp = 0;
65438 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65439@@ -456,6 +494,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65440 gtime = task_gtime(task);
65441 }
65442
65443+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65444+ if (PAX_RAND_FLAGS(mm)) {
65445+ eip = 0;
65446+ esp = 0;
65447+ wchan = 0;
65448+ }
65449+#endif
65450+#ifdef CONFIG_GRKERNSEC_HIDESYM
65451+ wchan = 0;
65452+ eip =0;
65453+ esp =0;
65454+#endif
65455+
65456 /* scale priority and nice values from timeslices to -20..20 */
65457 /* to make it look like a "normal" Unix priority/nice value */
65458 priority = task_prio(task);
65459@@ -487,9 +538,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65460 seq_put_decimal_ull(m, ' ', vsize);
65461 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
65462 seq_put_decimal_ull(m, ' ', rsslim);
65463+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65464+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
65465+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
65466+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
65467+#else
65468 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
65469 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
65470 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
65471+#endif
65472 seq_put_decimal_ull(m, ' ', esp);
65473 seq_put_decimal_ull(m, ' ', eip);
65474 /* The signal information here is obsolete.
65475@@ -511,7 +568,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65476 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
65477 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
65478
65479- if (mm && permitted) {
65480+ if (mm && permitted
65481+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65482+ && !PAX_RAND_FLAGS(mm)
65483+#endif
65484+ ) {
65485 seq_put_decimal_ull(m, ' ', mm->start_data);
65486 seq_put_decimal_ull(m, ' ', mm->end_data);
65487 seq_put_decimal_ull(m, ' ', mm->start_brk);
65488@@ -549,8 +610,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65489 struct pid *pid, struct task_struct *task)
65490 {
65491 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
65492- struct mm_struct *mm = get_task_mm(task);
65493+ struct mm_struct *mm;
65494
65495+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65496+ if (current->exec_id != m->exec_id) {
65497+ gr_log_badprocpid("statm");
65498+ return 0;
65499+ }
65500+#endif
65501+ mm = get_task_mm(task);
65502 if (mm) {
65503 size = task_statm(mm, &shared, &text, &data, &resident);
65504 mmput(mm);
65505@@ -573,6 +641,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65506 return 0;
65507 }
65508
65509+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65510+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
65511+{
65512+ unsigned long flags;
65513+ u32 curr_ip = 0;
65514+
65515+ if (lock_task_sighand(task, &flags)) {
65516+ curr_ip = task->signal->curr_ip;
65517+ unlock_task_sighand(task, &flags);
65518+ }
65519+ return seq_printf(m, "%pI4\n", &curr_ip);
65520+}
65521+#endif
65522+
65523 #ifdef CONFIG_CHECKPOINT_RESTORE
65524 static struct pid *
65525 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
65526diff --git a/fs/proc/base.c b/fs/proc/base.c
65527index 3f3d7ae..68de109 100644
65528--- a/fs/proc/base.c
65529+++ b/fs/proc/base.c
65530@@ -113,6 +113,14 @@ struct pid_entry {
65531 union proc_op op;
65532 };
65533
65534+struct getdents_callback {
65535+ struct linux_dirent __user * current_dir;
65536+ struct linux_dirent __user * previous;
65537+ struct file * file;
65538+ int count;
65539+ int error;
65540+};
65541+
65542 #define NOD(NAME, MODE, IOP, FOP, OP) { \
65543 .name = (NAME), \
65544 .len = sizeof(NAME) - 1, \
65545@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
65546 return 0;
65547 }
65548
65549+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65550+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65551+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65552+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65553+#endif
65554+
65555 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65556 struct pid *pid, struct task_struct *task)
65557 {
65558 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
65559 if (mm && !IS_ERR(mm)) {
65560 unsigned int nwords = 0;
65561+
65562+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65563+ /* allow if we're currently ptracing this task */
65564+ if (PAX_RAND_FLAGS(mm) &&
65565+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
65566+ mmput(mm);
65567+ return 0;
65568+ }
65569+#endif
65570+
65571 do {
65572 nwords += 2;
65573 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
65574@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65575 }
65576
65577
65578-#ifdef CONFIG_KALLSYMS
65579+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65580 /*
65581 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
65582 * Returns the resolved symbol. If that fails, simply return the address.
65583@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
65584 mutex_unlock(&task->signal->cred_guard_mutex);
65585 }
65586
65587-#ifdef CONFIG_STACKTRACE
65588+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65589
65590 #define MAX_STACK_TRACE_DEPTH 64
65591
65592@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
65593 return 0;
65594 }
65595
65596-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65597+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65598 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65599 struct pid *pid, struct task_struct *task)
65600 {
65601@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65602 /************************************************************************/
65603
65604 /* permission checks */
65605-static int proc_fd_access_allowed(struct inode *inode)
65606+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
65607 {
65608 struct task_struct *task;
65609 int allowed = 0;
65610@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
65611 */
65612 task = get_proc_task(inode);
65613 if (task) {
65614- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65615+ if (log)
65616+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65617+ else
65618+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65619 put_task_struct(task);
65620 }
65621 return allowed;
65622@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
65623 struct task_struct *task,
65624 int hide_pid_min)
65625 {
65626+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65627+ return false;
65628+
65629+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65630+ rcu_read_lock();
65631+ {
65632+ const struct cred *tmpcred = current_cred();
65633+ const struct cred *cred = __task_cred(task);
65634+
65635+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
65636+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65637+ || in_group_p(grsec_proc_gid)
65638+#endif
65639+ ) {
65640+ rcu_read_unlock();
65641+ return true;
65642+ }
65643+ }
65644+ rcu_read_unlock();
65645+
65646+ if (!pid->hide_pid)
65647+ return false;
65648+#endif
65649+
65650 if (pid->hide_pid < hide_pid_min)
65651 return true;
65652 if (in_group_p(pid->pid_gid))
65653 return true;
65654+
65655 return ptrace_may_access(task, PTRACE_MODE_READ);
65656 }
65657
65658@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
65659 put_task_struct(task);
65660
65661 if (!has_perms) {
65662+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65663+ {
65664+#else
65665 if (pid->hide_pid == 2) {
65666+#endif
65667 /*
65668 * Let's make getdents(), stat(), and open()
65669 * consistent with each other. If a process
65670@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
65671
65672 if (task) {
65673 mm = mm_access(task, mode);
65674+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
65675+ mmput(mm);
65676+ mm = ERR_PTR(-EPERM);
65677+ }
65678 put_task_struct(task);
65679
65680 if (!IS_ERR_OR_NULL(mm)) {
65681@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
65682 return PTR_ERR(mm);
65683
65684 file->private_data = mm;
65685+
65686+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65687+ file->f_version = current->exec_id;
65688+#endif
65689+
65690 return 0;
65691 }
65692
65693@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65694 ssize_t copied;
65695 char *page;
65696
65697+#ifdef CONFIG_GRKERNSEC
65698+ if (write)
65699+ return -EPERM;
65700+#endif
65701+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65702+ if (file->f_version != current->exec_id) {
65703+ gr_log_badprocpid("mem");
65704+ return 0;
65705+ }
65706+#endif
65707+
65708 if (!mm)
65709 return 0;
65710
65711@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65712 goto free;
65713
65714 while (count > 0) {
65715- int this_len = min_t(int, count, PAGE_SIZE);
65716+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
65717
65718 if (write && copy_from_user(page, buf, this_len)) {
65719 copied = -EFAULT;
65720@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65721 if (!mm)
65722 return 0;
65723
65724+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65725+ if (file->f_version != current->exec_id) {
65726+ gr_log_badprocpid("environ");
65727+ return 0;
65728+ }
65729+#endif
65730+
65731 page = (char *)__get_free_page(GFP_TEMPORARY);
65732 if (!page)
65733 return -ENOMEM;
65734@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65735 goto free;
65736 while (count > 0) {
65737 size_t this_len, max_len;
65738- int retval;
65739+ ssize_t retval;
65740
65741 if (src >= (mm->env_end - mm->env_start))
65742 break;
65743@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
65744 int error = -EACCES;
65745
65746 /* Are we allowed to snoop on the tasks file descriptors? */
65747- if (!proc_fd_access_allowed(inode))
65748+ if (!proc_fd_access_allowed(inode, 0))
65749 goto out;
65750
65751 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65752@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
65753 struct path path;
65754
65755 /* Are we allowed to snoop on the tasks file descriptors? */
65756- if (!proc_fd_access_allowed(inode))
65757- goto out;
65758+ /* logging this is needed for learning on chromium to work properly,
65759+ but we don't want to flood the logs from 'ps' which does a readlink
65760+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
65761+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
65762+ */
65763+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
65764+ if (!proc_fd_access_allowed(inode,0))
65765+ goto out;
65766+ } else {
65767+ if (!proc_fd_access_allowed(inode,1))
65768+ goto out;
65769+ }
65770
65771 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65772 if (error)
65773@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
65774 rcu_read_lock();
65775 cred = __task_cred(task);
65776 inode->i_uid = cred->euid;
65777+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65778+ inode->i_gid = grsec_proc_gid;
65779+#else
65780 inode->i_gid = cred->egid;
65781+#endif
65782 rcu_read_unlock();
65783 }
65784 security_task_to_inode(task, inode);
65785@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
65786 return -ENOENT;
65787 }
65788 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65789+#ifdef CONFIG_GRKERNSEC_PROC_USER
65790+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65791+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65792+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65793+#endif
65794 task_dumpable(task)) {
65795 cred = __task_cred(task);
65796 stat->uid = cred->euid;
65797+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65798+ stat->gid = grsec_proc_gid;
65799+#else
65800 stat->gid = cred->egid;
65801+#endif
65802 }
65803 }
65804 rcu_read_unlock();
65805@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
65806
65807 if (task) {
65808 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65809+#ifdef CONFIG_GRKERNSEC_PROC_USER
65810+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65811+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65812+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65813+#endif
65814 task_dumpable(task)) {
65815 rcu_read_lock();
65816 cred = __task_cred(task);
65817 inode->i_uid = cred->euid;
65818+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65819+ inode->i_gid = grsec_proc_gid;
65820+#else
65821 inode->i_gid = cred->egid;
65822+#endif
65823 rcu_read_unlock();
65824 } else {
65825 inode->i_uid = GLOBAL_ROOT_UID;
65826@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
65827 if (!task)
65828 goto out_no_task;
65829
65830+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65831+ goto out;
65832+
65833 /*
65834 * Yes, it does not scale. And it should not. Don't add
65835 * new entries into /proc/<tgid>/ without very good reasons.
65836@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
65837 if (!task)
65838 return -ENOENT;
65839
65840+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65841+ goto out;
65842+
65843 if (!dir_emit_dots(file, ctx))
65844 goto out;
65845
65846@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
65847 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
65848 #endif
65849 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
65850-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65851+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65852 ONE("syscall", S_IRUSR, proc_pid_syscall),
65853 #endif
65854 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
65855@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
65856 #ifdef CONFIG_SECURITY
65857 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
65858 #endif
65859-#ifdef CONFIG_KALLSYMS
65860+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65861 ONE("wchan", S_IRUGO, proc_pid_wchan),
65862 #endif
65863-#ifdef CONFIG_STACKTRACE
65864+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65865 ONE("stack", S_IRUSR, proc_pid_stack),
65866 #endif
65867 #ifdef CONFIG_SCHEDSTATS
65868@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
65869 #ifdef CONFIG_HARDWALL
65870 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
65871 #endif
65872+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65873+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
65874+#endif
65875 #ifdef CONFIG_USER_NS
65876 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
65877 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
65878@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
65879 if (!inode)
65880 goto out;
65881
65882+#ifdef CONFIG_GRKERNSEC_PROC_USER
65883+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
65884+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65885+ inode->i_gid = grsec_proc_gid;
65886+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
65887+#else
65888 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
65889+#endif
65890 inode->i_op = &proc_tgid_base_inode_operations;
65891 inode->i_fop = &proc_tgid_base_operations;
65892 inode->i_flags|=S_IMMUTABLE;
65893@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
65894 if (!task)
65895 goto out;
65896
65897+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65898+ goto out_put_task;
65899+
65900 result = proc_pid_instantiate(dir, dentry, task, NULL);
65901+out_put_task:
65902 put_task_struct(task);
65903 out:
65904 return ERR_PTR(result);
65905@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
65906 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
65907 #endif
65908 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
65909-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65910+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65911 ONE("syscall", S_IRUSR, proc_pid_syscall),
65912 #endif
65913 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
65914@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
65915 #ifdef CONFIG_SECURITY
65916 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
65917 #endif
65918-#ifdef CONFIG_KALLSYMS
65919+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65920 ONE("wchan", S_IRUGO, proc_pid_wchan),
65921 #endif
65922-#ifdef CONFIG_STACKTRACE
65923+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65924 ONE("stack", S_IRUSR, proc_pid_stack),
65925 #endif
65926 #ifdef CONFIG_SCHEDSTATS
65927diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
65928index cbd82df..c0407d2 100644
65929--- a/fs/proc/cmdline.c
65930+++ b/fs/proc/cmdline.c
65931@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
65932
65933 static int __init proc_cmdline_init(void)
65934 {
65935+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65936+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
65937+#else
65938 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
65939+#endif
65940 return 0;
65941 }
65942 fs_initcall(proc_cmdline_init);
65943diff --git a/fs/proc/devices.c b/fs/proc/devices.c
65944index 50493ed..248166b 100644
65945--- a/fs/proc/devices.c
65946+++ b/fs/proc/devices.c
65947@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
65948
65949 static int __init proc_devices_init(void)
65950 {
65951+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65952+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
65953+#else
65954 proc_create("devices", 0, NULL, &proc_devinfo_operations);
65955+#endif
65956 return 0;
65957 }
65958 fs_initcall(proc_devices_init);
65959diff --git a/fs/proc/fd.c b/fs/proc/fd.c
65960index 8e5ad83..1f07a8c 100644
65961--- a/fs/proc/fd.c
65962+++ b/fs/proc/fd.c
65963@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
65964 if (!task)
65965 return -ENOENT;
65966
65967- files = get_files_struct(task);
65968+ if (!gr_acl_handle_procpidmem(task))
65969+ files = get_files_struct(task);
65970 put_task_struct(task);
65971
65972 if (files) {
65973@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
65974 */
65975 int proc_fd_permission(struct inode *inode, int mask)
65976 {
65977+ struct task_struct *task;
65978 int rv = generic_permission(inode, mask);
65979- if (rv == 0)
65980- return 0;
65981+
65982 if (task_tgid(current) == proc_pid(inode))
65983 rv = 0;
65984+
65985+ task = get_proc_task(inode);
65986+ if (task == NULL)
65987+ return rv;
65988+
65989+ if (gr_acl_handle_procpidmem(task))
65990+ rv = -EACCES;
65991+
65992+ put_task_struct(task);
65993+
65994 return rv;
65995 }
65996
65997diff --git a/fs/proc/generic.c b/fs/proc/generic.c
65998index b502bba..849e216 100644
65999--- a/fs/proc/generic.c
66000+++ b/fs/proc/generic.c
66001@@ -22,6 +22,7 @@
66002 #include <linux/bitops.h>
66003 #include <linux/spinlock.h>
66004 #include <linux/completion.h>
66005+#include <linux/grsecurity.h>
66006 #include <asm/uaccess.h>
66007
66008 #include "internal.h"
66009@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66010 return proc_lookup_de(PDE(dir), dir, dentry);
66011 }
66012
66013+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66014+ unsigned int flags)
66015+{
66016+ if (gr_proc_is_restricted())
66017+ return ERR_PTR(-EACCES);
66018+
66019+ return proc_lookup_de(PDE(dir), dir, dentry);
66020+}
66021+
66022 /*
66023 * This returns non-zero if at EOF, so that the /proc
66024 * root directory can use this and check if it should
66025@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66026 return proc_readdir_de(PDE(inode), file, ctx);
66027 }
66028
66029+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66030+{
66031+ struct inode *inode = file_inode(file);
66032+
66033+ if (gr_proc_is_restricted())
66034+ return -EACCES;
66035+
66036+ return proc_readdir_de(PDE(inode), file, ctx);
66037+}
66038+
66039 /*
66040 * These are the generic /proc directory operations. They
66041 * use the in-memory "struct proc_dir_entry" tree to parse
66042@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
66043 .iterate = proc_readdir,
66044 };
66045
66046+static const struct file_operations proc_dir_restricted_operations = {
66047+ .llseek = generic_file_llseek,
66048+ .read = generic_read_dir,
66049+ .iterate = proc_readdir_restrict,
66050+};
66051+
66052 /*
66053 * proc directories can do almost nothing..
66054 */
66055@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66056 .setattr = proc_notify_change,
66057 };
66058
66059+static const struct inode_operations proc_dir_restricted_inode_operations = {
66060+ .lookup = proc_lookup_restrict,
66061+ .getattr = proc_getattr,
66062+ .setattr = proc_notify_change,
66063+};
66064+
66065 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66066 {
66067 int ret;
66068@@ -339,8 +371,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
66069 return ret;
66070
66071 if (S_ISDIR(dp->mode)) {
66072- dp->proc_fops = &proc_dir_operations;
66073- dp->proc_iops = &proc_dir_inode_operations;
66074+ if (dp->restricted) {
66075+ dp->proc_fops = &proc_dir_restricted_operations;
66076+ dp->proc_iops = &proc_dir_restricted_inode_operations;
66077+ } else {
66078+ dp->proc_fops = &proc_dir_operations;
66079+ dp->proc_iops = &proc_dir_inode_operations;
66080+ }
66081 dir->nlink++;
66082 } else if (S_ISLNK(dp->mode)) {
66083 dp->proc_iops = &proc_link_inode_operations;
66084@@ -453,6 +490,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66085 }
66086 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66087
66088+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66089+ struct proc_dir_entry *parent, void *data)
66090+{
66091+ struct proc_dir_entry *ent;
66092+
66093+ if (mode == 0)
66094+ mode = S_IRUGO | S_IXUGO;
66095+
66096+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66097+ if (ent) {
66098+ ent->data = data;
66099+ ent->restricted = 1;
66100+ if (proc_register(parent, ent) < 0) {
66101+ kfree(ent);
66102+ ent = NULL;
66103+ }
66104+ }
66105+ return ent;
66106+}
66107+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66108+
66109 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66110 struct proc_dir_entry *parent)
66111 {
66112@@ -467,6 +525,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66113 }
66114 EXPORT_SYMBOL(proc_mkdir);
66115
66116+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66117+ struct proc_dir_entry *parent)
66118+{
66119+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66120+}
66121+EXPORT_SYMBOL(proc_mkdir_restrict);
66122+
66123 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66124 struct proc_dir_entry *parent,
66125 const struct file_operations *proc_fops,
66126diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66127index 3b0f838..a0e0f63e 100644
66128--- a/fs/proc/inode.c
66129+++ b/fs/proc/inode.c
66130@@ -24,11 +24,17 @@
66131 #include <linux/mount.h>
66132 #include <linux/magic.h>
66133 #include <linux/namei.h>
66134+#include <linux/grsecurity.h>
66135
66136 #include <asm/uaccess.h>
66137
66138 #include "internal.h"
66139
66140+#ifdef CONFIG_PROC_SYSCTL
66141+extern const struct inode_operations proc_sys_inode_operations;
66142+extern const struct inode_operations proc_sys_dir_operations;
66143+#endif
66144+
66145 static void proc_evict_inode(struct inode *inode)
66146 {
66147 struct proc_dir_entry *de;
66148@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
66149 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
66150 sysctl_head_put(head);
66151 }
66152+
66153+#ifdef CONFIG_PROC_SYSCTL
66154+ if (inode->i_op == &proc_sys_inode_operations ||
66155+ inode->i_op == &proc_sys_dir_operations)
66156+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66157+#endif
66158+
66159 }
66160
66161 static struct kmem_cache * proc_inode_cachep;
66162@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66163 if (de->mode) {
66164 inode->i_mode = de->mode;
66165 inode->i_uid = de->uid;
66166+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66167+ inode->i_gid = grsec_proc_gid;
66168+#else
66169 inode->i_gid = de->gid;
66170+#endif
66171 }
66172 if (de->size)
66173 inode->i_size = de->size;
66174diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66175index c835b94..c9e01a3 100644
66176--- a/fs/proc/internal.h
66177+++ b/fs/proc/internal.h
66178@@ -47,9 +47,10 @@ struct proc_dir_entry {
66179 struct completion *pde_unload_completion;
66180 struct list_head pde_openers; /* who did ->open, but not ->release */
66181 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66182+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66183 u8 namelen;
66184 char name[];
66185-};
66186+} __randomize_layout;
66187
66188 union proc_op {
66189 int (*proc_get_link)(struct dentry *, struct path *);
66190@@ -67,7 +68,7 @@ struct proc_inode {
66191 struct ctl_table *sysctl_entry;
66192 const struct proc_ns_operations *ns_ops;
66193 struct inode vfs_inode;
66194-};
66195+} __randomize_layout;
66196
66197 /*
66198 * General functions
66199@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66200 struct pid *, struct task_struct *);
66201 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66202 struct pid *, struct task_struct *);
66203+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66204+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66205+ struct pid *, struct task_struct *);
66206+#endif
66207
66208 /*
66209 * base.c
66210@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66211 * generic.c
66212 */
66213 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66214+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66215 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66216 struct dentry *);
66217 extern int proc_readdir(struct file *, struct dir_context *);
66218+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66219 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66220
66221 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66222diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66223index a352d57..cb94a5c 100644
66224--- a/fs/proc/interrupts.c
66225+++ b/fs/proc/interrupts.c
66226@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66227
66228 static int __init proc_interrupts_init(void)
66229 {
66230+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66231+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66232+#else
66233 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66234+#endif
66235 return 0;
66236 }
66237 fs_initcall(proc_interrupts_init);
66238diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66239index 91a4e64..cb007c0 100644
66240--- a/fs/proc/kcore.c
66241+++ b/fs/proc/kcore.c
66242@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66243 * the addresses in the elf_phdr on our list.
66244 */
66245 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66246- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66247+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66248+ if (tsz > buflen)
66249 tsz = buflen;
66250-
66251+
66252 while (buflen) {
66253 struct kcore_list *m;
66254
66255@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66256 kfree(elf_buf);
66257 } else {
66258 if (kern_addr_valid(start)) {
66259- unsigned long n;
66260+ char *elf_buf;
66261+ mm_segment_t oldfs;
66262
66263- n = copy_to_user(buffer, (char *)start, tsz);
66264- /*
66265- * We cannot distinguish between fault on source
66266- * and fault on destination. When this happens
66267- * we clear too and hope it will trigger the
66268- * EFAULT again.
66269- */
66270- if (n) {
66271- if (clear_user(buffer + tsz - n,
66272- n))
66273+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66274+ if (!elf_buf)
66275+ return -ENOMEM;
66276+ oldfs = get_fs();
66277+ set_fs(KERNEL_DS);
66278+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66279+ set_fs(oldfs);
66280+ if (copy_to_user(buffer, elf_buf, tsz)) {
66281+ kfree(elf_buf);
66282 return -EFAULT;
66283+ }
66284 }
66285+ set_fs(oldfs);
66286+ kfree(elf_buf);
66287 } else {
66288 if (clear_user(buffer, tsz))
66289 return -EFAULT;
66290@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66291
66292 static int open_kcore(struct inode *inode, struct file *filp)
66293 {
66294+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66295+ return -EPERM;
66296+#endif
66297 if (!capable(CAP_SYS_RAWIO))
66298 return -EPERM;
66299 if (kcore_need_update)
66300diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66301index d3ebf2e..6ad42d1 100644
66302--- a/fs/proc/meminfo.c
66303+++ b/fs/proc/meminfo.c
66304@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66305 vmi.used >> 10,
66306 vmi.largest_chunk >> 10
66307 #ifdef CONFIG_MEMORY_FAILURE
66308- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66309+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66310 #endif
66311 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66312 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66313diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66314index d4a3574..b421ce9 100644
66315--- a/fs/proc/nommu.c
66316+++ b/fs/proc/nommu.c
66317@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66318
66319 if (file) {
66320 seq_pad(m, ' ');
66321- seq_path(m, &file->f_path, "");
66322+ seq_path(m, &file->f_path, "\n\\");
66323 }
66324
66325 seq_putc(m, '\n');
66326diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66327index 1bde894..22ac7eb 100644
66328--- a/fs/proc/proc_net.c
66329+++ b/fs/proc/proc_net.c
66330@@ -23,9 +23,27 @@
66331 #include <linux/nsproxy.h>
66332 #include <net/net_namespace.h>
66333 #include <linux/seq_file.h>
66334+#include <linux/grsecurity.h>
66335
66336 #include "internal.h"
66337
66338+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66339+static struct seq_operations *ipv6_seq_ops_addr;
66340+
66341+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66342+{
66343+ ipv6_seq_ops_addr = addr;
66344+}
66345+
66346+void unregister_ipv6_seq_ops_addr(void)
66347+{
66348+ ipv6_seq_ops_addr = NULL;
66349+}
66350+
66351+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
66352+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
66353+#endif
66354+
66355 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
66356 {
66357 return pde->parent->data;
66358@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
66359 return maybe_get_net(PDE_NET(PDE(inode)));
66360 }
66361
66362+extern const struct seq_operations dev_seq_ops;
66363+
66364 int seq_open_net(struct inode *ino, struct file *f,
66365 const struct seq_operations *ops, int size)
66366 {
66367@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
66368
66369 BUG_ON(size < sizeof(*p));
66370
66371+ /* only permit access to /proc/net/dev */
66372+ if (
66373+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66374+ ops != ipv6_seq_ops_addr &&
66375+#endif
66376+ ops != &dev_seq_ops && gr_proc_is_restricted())
66377+ return -EACCES;
66378+
66379 net = get_proc_net(ino);
66380 if (net == NULL)
66381 return -ENXIO;
66382@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
66383 int err;
66384 struct net *net;
66385
66386+ if (gr_proc_is_restricted())
66387+ return -EACCES;
66388+
66389 err = -ENXIO;
66390 net = get_proc_net(inode);
66391 if (net == NULL)
66392diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
66393index f92d5dd..26398ac 100644
66394--- a/fs/proc/proc_sysctl.c
66395+++ b/fs/proc/proc_sysctl.c
66396@@ -11,13 +11,21 @@
66397 #include <linux/namei.h>
66398 #include <linux/mm.h>
66399 #include <linux/module.h>
66400+#include <linux/nsproxy.h>
66401+#ifdef CONFIG_GRKERNSEC
66402+#include <net/net_namespace.h>
66403+#endif
66404 #include "internal.h"
66405
66406+extern int gr_handle_chroot_sysctl(const int op);
66407+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66408+ const int op);
66409+
66410 static const struct dentry_operations proc_sys_dentry_operations;
66411 static const struct file_operations proc_sys_file_operations;
66412-static const struct inode_operations proc_sys_inode_operations;
66413+const struct inode_operations proc_sys_inode_operations;
66414 static const struct file_operations proc_sys_dir_file_operations;
66415-static const struct inode_operations proc_sys_dir_operations;
66416+const struct inode_operations proc_sys_dir_operations;
66417
66418 void proc_sys_poll_notify(struct ctl_table_poll *poll)
66419 {
66420@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
66421
66422 err = NULL;
66423 d_set_d_op(dentry, &proc_sys_dentry_operations);
66424+
66425+ gr_handle_proc_create(dentry, inode);
66426+
66427 d_add(dentry, inode);
66428
66429 out:
66430@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66431 struct inode *inode = file_inode(filp);
66432 struct ctl_table_header *head = grab_header(inode);
66433 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
66434+ int op = write ? MAY_WRITE : MAY_READ;
66435 ssize_t error;
66436 size_t res;
66437
66438@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66439 * and won't be until we finish.
66440 */
66441 error = -EPERM;
66442- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
66443+ if (sysctl_perm(head, table, op))
66444 goto out;
66445
66446 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
66447@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66448 if (!table->proc_handler)
66449 goto out;
66450
66451+#ifdef CONFIG_GRKERNSEC
66452+ error = -EPERM;
66453+ if (gr_handle_chroot_sysctl(op))
66454+ goto out;
66455+ dget(filp->f_path.dentry);
66456+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
66457+ dput(filp->f_path.dentry);
66458+ goto out;
66459+ }
66460+ dput(filp->f_path.dentry);
66461+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
66462+ goto out;
66463+ if (write) {
66464+ if (current->nsproxy->net_ns != table->extra2) {
66465+ if (!capable(CAP_SYS_ADMIN))
66466+ goto out;
66467+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
66468+ goto out;
66469+ }
66470+#endif
66471+
66472 /* careful: calling conventions are nasty here */
66473 res = count;
66474 error = table->proc_handler(table, write, buf, &res, ppos);
66475@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
66476 return false;
66477 } else {
66478 d_set_d_op(child, &proc_sys_dentry_operations);
66479+
66480+ gr_handle_proc_create(child, inode);
66481+
66482 d_add(child, inode);
66483 }
66484 } else {
66485@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
66486 if ((*pos)++ < ctx->pos)
66487 return true;
66488
66489+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
66490+ return 0;
66491+
66492 if (unlikely(S_ISLNK(table->mode)))
66493 res = proc_sys_link_fill_cache(file, ctx, head, table);
66494 else
66495@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
66496 if (IS_ERR(head))
66497 return PTR_ERR(head);
66498
66499+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
66500+ return -ENOENT;
66501+
66502 generic_fillattr(inode, stat);
66503 if (table)
66504 stat->mode = (stat->mode & S_IFMT) | table->mode;
66505@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
66506 .llseek = generic_file_llseek,
66507 };
66508
66509-static const struct inode_operations proc_sys_inode_operations = {
66510+const struct inode_operations proc_sys_inode_operations = {
66511 .permission = proc_sys_permission,
66512 .setattr = proc_sys_setattr,
66513 .getattr = proc_sys_getattr,
66514 };
66515
66516-static const struct inode_operations proc_sys_dir_operations = {
66517+const struct inode_operations proc_sys_dir_operations = {
66518 .lookup = proc_sys_lookup,
66519 .permission = proc_sys_permission,
66520 .setattr = proc_sys_setattr,
66521@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
66522 static struct ctl_dir *new_dir(struct ctl_table_set *set,
66523 const char *name, int namelen)
66524 {
66525- struct ctl_table *table;
66526+ ctl_table_no_const *table;
66527 struct ctl_dir *new;
66528 struct ctl_node *node;
66529 char *new_name;
66530@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
66531 return NULL;
66532
66533 node = (struct ctl_node *)(new + 1);
66534- table = (struct ctl_table *)(node + 1);
66535+ table = (ctl_table_no_const *)(node + 1);
66536 new_name = (char *)(table + 2);
66537 memcpy(new_name, name, namelen);
66538 new_name[namelen] = '\0';
66539@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
66540 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
66541 struct ctl_table_root *link_root)
66542 {
66543- struct ctl_table *link_table, *entry, *link;
66544+ ctl_table_no_const *link_table, *link;
66545+ struct ctl_table *entry;
66546 struct ctl_table_header *links;
66547 struct ctl_node *node;
66548 char *link_name;
66549@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
66550 return NULL;
66551
66552 node = (struct ctl_node *)(links + 1);
66553- link_table = (struct ctl_table *)(node + nr_entries);
66554+ link_table = (ctl_table_no_const *)(node + nr_entries);
66555 link_name = (char *)&link_table[nr_entries + 1];
66556
66557 for (link = link_table, entry = table; entry->procname; link++, entry++) {
66558@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66559 struct ctl_table_header ***subheader, struct ctl_table_set *set,
66560 struct ctl_table *table)
66561 {
66562- struct ctl_table *ctl_table_arg = NULL;
66563- struct ctl_table *entry, *files;
66564+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
66565+ struct ctl_table *entry;
66566 int nr_files = 0;
66567 int nr_dirs = 0;
66568 int err = -ENOMEM;
66569@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66570 nr_files++;
66571 }
66572
66573- files = table;
66574 /* If there are mixed files and directories we need a new table */
66575 if (nr_dirs && nr_files) {
66576- struct ctl_table *new;
66577+ ctl_table_no_const *new;
66578 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
66579 GFP_KERNEL);
66580 if (!files)
66581@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66582 /* Register everything except a directory full of subdirectories */
66583 if (nr_files || !nr_dirs) {
66584 struct ctl_table_header *header;
66585- header = __register_sysctl_table(set, path, files);
66586+ header = __register_sysctl_table(set, path, files ? files : table);
66587 if (!header) {
66588 kfree(ctl_table_arg);
66589 goto out;
66590diff --git a/fs/proc/root.c b/fs/proc/root.c
66591index e74ac9f..35e89f4 100644
66592--- a/fs/proc/root.c
66593+++ b/fs/proc/root.c
66594@@ -188,7 +188,15 @@ void __init proc_root_init(void)
66595 proc_mkdir("openprom", NULL);
66596 #endif
66597 proc_tty_init();
66598+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66599+#ifdef CONFIG_GRKERNSEC_PROC_USER
66600+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
66601+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66602+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
66603+#endif
66604+#else
66605 proc_mkdir("bus", NULL);
66606+#endif
66607 proc_sys_init();
66608 }
66609
66610diff --git a/fs/proc/stat.c b/fs/proc/stat.c
66611index 510413eb..34d9a8c 100644
66612--- a/fs/proc/stat.c
66613+++ b/fs/proc/stat.c
66614@@ -11,6 +11,7 @@
66615 #include <linux/irqnr.h>
66616 #include <linux/cputime.h>
66617 #include <linux/tick.h>
66618+#include <linux/grsecurity.h>
66619
66620 #ifndef arch_irq_stat_cpu
66621 #define arch_irq_stat_cpu(cpu) 0
66622@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
66623 u64 sum_softirq = 0;
66624 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
66625 struct timespec boottime;
66626+ int unrestricted = 1;
66627+
66628+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66629+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66630+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
66631+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66632+ && !in_group_p(grsec_proc_gid)
66633+#endif
66634+ )
66635+ unrestricted = 0;
66636+#endif
66637+#endif
66638
66639 user = nice = system = idle = iowait =
66640 irq = softirq = steal = 0;
66641@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
66642 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66643 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66644 idle += get_idle_time(i);
66645- iowait += get_iowait_time(i);
66646- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66647- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66648- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66649- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66650- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66651- sum += kstat_cpu_irqs_sum(i);
66652- sum += arch_irq_stat_cpu(i);
66653+ if (unrestricted) {
66654+ iowait += get_iowait_time(i);
66655+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66656+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66657+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66658+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66659+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66660+ sum += kstat_cpu_irqs_sum(i);
66661+ sum += arch_irq_stat_cpu(i);
66662+ for (j = 0; j < NR_SOFTIRQS; j++) {
66663+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66664
66665- for (j = 0; j < NR_SOFTIRQS; j++) {
66666- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66667-
66668- per_softirq_sums[j] += softirq_stat;
66669- sum_softirq += softirq_stat;
66670+ per_softirq_sums[j] += softirq_stat;
66671+ sum_softirq += softirq_stat;
66672+ }
66673 }
66674 }
66675- sum += arch_irq_stat();
66676+ if (unrestricted)
66677+ sum += arch_irq_stat();
66678
66679 seq_puts(p, "cpu ");
66680 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66681@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
66682 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66683 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66684 idle = get_idle_time(i);
66685- iowait = get_iowait_time(i);
66686- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66687- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66688- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66689- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66690- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66691+ if (unrestricted) {
66692+ iowait = get_iowait_time(i);
66693+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66694+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66695+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66696+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66697+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66698+ }
66699 seq_printf(p, "cpu%d", i);
66700 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66701 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
66702@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
66703
66704 /* sum again ? it could be updated? */
66705 for_each_irq_nr(j)
66706- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
66707+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
66708
66709 seq_printf(p,
66710 "\nctxt %llu\n"
66711@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
66712 "processes %lu\n"
66713 "procs_running %lu\n"
66714 "procs_blocked %lu\n",
66715- nr_context_switches(),
66716+ unrestricted ? nr_context_switches() : 0ULL,
66717 (unsigned long)jif,
66718- total_forks,
66719- nr_running(),
66720- nr_iowait());
66721+ unrestricted ? total_forks : 0UL,
66722+ unrestricted ? nr_running() : 0UL,
66723+ unrestricted ? nr_iowait() : 0UL);
66724
66725 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
66726
66727diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
66728index f86e549..3a88fcd 100644
66729--- a/fs/proc/task_mmu.c
66730+++ b/fs/proc/task_mmu.c
66731@@ -13,12 +13,19 @@
66732 #include <linux/swap.h>
66733 #include <linux/swapops.h>
66734 #include <linux/mmu_notifier.h>
66735+#include <linux/grsecurity.h>
66736
66737 #include <asm/elf.h>
66738 #include <asm/uaccess.h>
66739 #include <asm/tlbflush.h>
66740 #include "internal.h"
66741
66742+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66743+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66744+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66745+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66746+#endif
66747+
66748 void task_mem(struct seq_file *m, struct mm_struct *mm)
66749 {
66750 unsigned long data, text, lib, swap;
66751@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66752 "VmExe:\t%8lu kB\n"
66753 "VmLib:\t%8lu kB\n"
66754 "VmPTE:\t%8lu kB\n"
66755- "VmSwap:\t%8lu kB\n",
66756- hiwater_vm << (PAGE_SHIFT-10),
66757+ "VmSwap:\t%8lu kB\n"
66758+
66759+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66760+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
66761+#endif
66762+
66763+ ,hiwater_vm << (PAGE_SHIFT-10),
66764 total_vm << (PAGE_SHIFT-10),
66765 mm->locked_vm << (PAGE_SHIFT-10),
66766 mm->pinned_vm << (PAGE_SHIFT-10),
66767@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66768 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
66769 (PTRS_PER_PTE * sizeof(pte_t) *
66770 atomic_long_read(&mm->nr_ptes)) >> 10,
66771- swap << (PAGE_SHIFT-10));
66772+ swap << (PAGE_SHIFT-10)
66773+
66774+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66775+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66776+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
66777+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
66778+#else
66779+ , mm->context.user_cs_base
66780+ , mm->context.user_cs_limit
66781+#endif
66782+#endif
66783+
66784+ );
66785 }
66786
66787 unsigned long task_vsize(struct mm_struct *mm)
66788@@ -282,13 +306,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66789 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
66790 }
66791
66792- /* We don't show the stack guard page in /proc/maps */
66793+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66794+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
66795+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
66796+#else
66797 start = vma->vm_start;
66798- if (stack_guard_page_start(vma, start))
66799- start += PAGE_SIZE;
66800 end = vma->vm_end;
66801- if (stack_guard_page_end(vma, end))
66802- end -= PAGE_SIZE;
66803+#endif
66804
66805 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
66806 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
66807@@ -298,7 +322,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66808 flags & VM_WRITE ? 'w' : '-',
66809 flags & VM_EXEC ? 'x' : '-',
66810 flags & VM_MAYSHARE ? 's' : 'p',
66811+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66812+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
66813+#else
66814 pgoff,
66815+#endif
66816 MAJOR(dev), MINOR(dev), ino);
66817
66818 /*
66819@@ -307,7 +335,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66820 */
66821 if (file) {
66822 seq_pad(m, ' ');
66823- seq_path(m, &file->f_path, "\n");
66824+ seq_path(m, &file->f_path, "\n\\");
66825 goto done;
66826 }
66827
66828@@ -338,8 +366,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66829 * Thread stack in /proc/PID/task/TID/maps or
66830 * the main process stack.
66831 */
66832- if (!is_pid || (vma->vm_start <= mm->start_stack &&
66833- vma->vm_end >= mm->start_stack)) {
66834+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
66835+ (vma->vm_start <= mm->start_stack &&
66836+ vma->vm_end >= mm->start_stack)) {
66837 name = "[stack]";
66838 } else {
66839 /* Thread stack in /proc/PID/maps */
66840@@ -359,6 +388,12 @@ done:
66841
66842 static int show_map(struct seq_file *m, void *v, int is_pid)
66843 {
66844+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66845+ if (current->exec_id != m->exec_id) {
66846+ gr_log_badprocpid("maps");
66847+ return 0;
66848+ }
66849+#endif
66850 show_map_vma(m, v, is_pid);
66851 m_cache_vma(m, v);
66852 return 0;
66853@@ -629,12 +664,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
66854 .private = &mss,
66855 };
66856
66857+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66858+ if (current->exec_id != m->exec_id) {
66859+ gr_log_badprocpid("smaps");
66860+ return 0;
66861+ }
66862+#endif
66863 memset(&mss, 0, sizeof mss);
66864- mss.vma = vma;
66865- /* mmap_sem is held in m_start */
66866- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
66867- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
66868-
66869+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66870+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
66871+#endif
66872+ mss.vma = vma;
66873+ /* mmap_sem is held in m_start */
66874+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
66875+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
66876+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66877+ }
66878+#endif
66879 show_map_vma(m, vma, is_pid);
66880
66881 seq_printf(m,
66882@@ -652,7 +698,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
66883 "KernelPageSize: %8lu kB\n"
66884 "MMUPageSize: %8lu kB\n"
66885 "Locked: %8lu kB\n",
66886+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66887+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
66888+#else
66889 (vma->vm_end - vma->vm_start) >> 10,
66890+#endif
66891 mss.resident >> 10,
66892 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
66893 mss.shared_clean >> 10,
66894@@ -1489,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
66895 char buffer[64];
66896 int nid;
66897
66898+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66899+ if (current->exec_id != m->exec_id) {
66900+ gr_log_badprocpid("numa_maps");
66901+ return 0;
66902+ }
66903+#endif
66904+
66905 if (!mm)
66906 return 0;
66907
66908@@ -1510,11 +1567,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
66909 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
66910 }
66911
66912+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66913+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
66914+#else
66915 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
66916+#endif
66917
66918 if (file) {
66919 seq_puts(m, " file=");
66920- seq_path(m, &file->f_path, "\n\t= ");
66921+ seq_path(m, &file->f_path, "\n\t\\= ");
66922 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
66923 seq_puts(m, " heap");
66924 } else {
66925diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
66926index 599ec2e..f1413ae 100644
66927--- a/fs/proc/task_nommu.c
66928+++ b/fs/proc/task_nommu.c
66929@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66930 else
66931 bytes += kobjsize(mm);
66932
66933- if (current->fs && current->fs->users > 1)
66934+ if (current->fs && atomic_read(&current->fs->users) > 1)
66935 sbytes += kobjsize(current->fs);
66936 else
66937 bytes += kobjsize(current->fs);
66938@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
66939
66940 if (file) {
66941 seq_pad(m, ' ');
66942- seq_path(m, &file->f_path, "");
66943+ seq_path(m, &file->f_path, "\n\\");
66944 } else if (mm) {
66945 pid_t tid = pid_of_stack(priv, vma, is_pid);
66946
66947diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
66948index a90d6d35..d08047c 100644
66949--- a/fs/proc/vmcore.c
66950+++ b/fs/proc/vmcore.c
66951@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
66952 nr_bytes = count;
66953
66954 /* If pfn is not ram, return zeros for sparse dump files */
66955- if (pfn_is_ram(pfn) == 0)
66956- memset(buf, 0, nr_bytes);
66957- else {
66958+ if (pfn_is_ram(pfn) == 0) {
66959+ if (userbuf) {
66960+ if (clear_user((char __force_user *)buf, nr_bytes))
66961+ return -EFAULT;
66962+ } else
66963+ memset(buf, 0, nr_bytes);
66964+ } else {
66965 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
66966 offset, userbuf);
66967 if (tmp < 0)
66968@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
66969 static int copy_to(void *target, void *src, size_t size, int userbuf)
66970 {
66971 if (userbuf) {
66972- if (copy_to_user((char __user *) target, src, size))
66973+ if (copy_to_user((char __force_user *) target, src, size))
66974 return -EFAULT;
66975 } else {
66976 memcpy(target, src, size);
66977@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
66978 if (*fpos < m->offset + m->size) {
66979 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
66980 start = m->paddr + *fpos - m->offset;
66981- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
66982+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
66983 if (tmp < 0)
66984 return tmp;
66985 buflen -= tsz;
66986@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
66987 static ssize_t read_vmcore(struct file *file, char __user *buffer,
66988 size_t buflen, loff_t *fpos)
66989 {
66990- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
66991+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
66992 }
66993
66994 /*
66995diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
66996index d3fb2b6..43a8140 100644
66997--- a/fs/qnx6/qnx6.h
66998+++ b/fs/qnx6/qnx6.h
66999@@ -74,7 +74,7 @@ enum {
67000 BYTESEX_BE,
67001 };
67002
67003-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67004+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67005 {
67006 if (sbi->s_bytesex == BYTESEX_LE)
67007 return le64_to_cpu((__force __le64)n);
67008@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67009 return (__force __fs64)cpu_to_be64(n);
67010 }
67011
67012-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67013+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67014 {
67015 if (sbi->s_bytesex == BYTESEX_LE)
67016 return le32_to_cpu((__force __le32)n);
67017diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67018index bb2869f..d34ada8 100644
67019--- a/fs/quota/netlink.c
67020+++ b/fs/quota/netlink.c
67021@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67022 void quota_send_warning(struct kqid qid, dev_t dev,
67023 const char warntype)
67024 {
67025- static atomic_t seq;
67026+ static atomic_unchecked_t seq;
67027 struct sk_buff *skb;
67028 void *msg_head;
67029 int ret;
67030@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67031 "VFS: Not enough memory to send quota warning.\n");
67032 return;
67033 }
67034- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67035+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67036 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67037 if (!msg_head) {
67038 printk(KERN_ERR
67039diff --git a/fs/read_write.c b/fs/read_write.c
67040index c0805c93..d39f2eb 100644
67041--- a/fs/read_write.c
67042+++ b/fs/read_write.c
67043@@ -507,7 +507,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67044
67045 old_fs = get_fs();
67046 set_fs(get_ds());
67047- p = (__force const char __user *)buf;
67048+ p = (const char __force_user *)buf;
67049 if (count > MAX_RW_COUNT)
67050 count = MAX_RW_COUNT;
67051 if (file->f_op->write)
67052diff --git a/fs/readdir.c b/fs/readdir.c
67053index ced6791..936687b 100644
67054--- a/fs/readdir.c
67055+++ b/fs/readdir.c
67056@@ -18,6 +18,7 @@
67057 #include <linux/security.h>
67058 #include <linux/syscalls.h>
67059 #include <linux/unistd.h>
67060+#include <linux/namei.h>
67061
67062 #include <asm/uaccess.h>
67063
67064@@ -71,6 +72,7 @@ struct old_linux_dirent {
67065 struct readdir_callback {
67066 struct dir_context ctx;
67067 struct old_linux_dirent __user * dirent;
67068+ struct file * file;
67069 int result;
67070 };
67071
67072@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
67073 buf->result = -EOVERFLOW;
67074 return -EOVERFLOW;
67075 }
67076+
67077+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67078+ return 0;
67079+
67080 buf->result++;
67081 dirent = buf->dirent;
67082 if (!access_ok(VERIFY_WRITE, dirent,
67083@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67084 if (!f.file)
67085 return -EBADF;
67086
67087+ buf.file = f.file;
67088 error = iterate_dir(f.file, &buf.ctx);
67089 if (buf.result)
67090 error = buf.result;
67091@@ -145,6 +152,7 @@ struct getdents_callback {
67092 struct dir_context ctx;
67093 struct linux_dirent __user * current_dir;
67094 struct linux_dirent __user * previous;
67095+ struct file * file;
67096 int count;
67097 int error;
67098 };
67099@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
67100 buf->error = -EOVERFLOW;
67101 return -EOVERFLOW;
67102 }
67103+
67104+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67105+ return 0;
67106+
67107 dirent = buf->previous;
67108 if (dirent) {
67109 if (__put_user(offset, &dirent->d_off))
67110@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67111 if (!f.file)
67112 return -EBADF;
67113
67114+ buf.file = f.file;
67115 error = iterate_dir(f.file, &buf.ctx);
67116 if (error >= 0)
67117 error = buf.error;
67118@@ -230,6 +243,7 @@ struct getdents_callback64 {
67119 struct dir_context ctx;
67120 struct linux_dirent64 __user * current_dir;
67121 struct linux_dirent64 __user * previous;
67122+ struct file *file;
67123 int count;
67124 int error;
67125 };
67126@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
67127 buf->error = -EINVAL; /* only used if we fail.. */
67128 if (reclen > buf->count)
67129 return -EINVAL;
67130+
67131+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67132+ return 0;
67133+
67134 dirent = buf->previous;
67135 if (dirent) {
67136 if (__put_user(offset, &dirent->d_off))
67137@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67138 if (!f.file)
67139 return -EBADF;
67140
67141+ buf.file = f.file;
67142 error = iterate_dir(f.file, &buf.ctx);
67143 if (error >= 0)
67144 error = buf.error;
67145diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67146index 9c02d96..6562c10 100644
67147--- a/fs/reiserfs/do_balan.c
67148+++ b/fs/reiserfs/do_balan.c
67149@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67150 return;
67151 }
67152
67153- atomic_inc(&fs_generation(tb->tb_sb));
67154+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67155 do_balance_starts(tb);
67156
67157 /*
67158diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67159index aca73dd..e3c558d 100644
67160--- a/fs/reiserfs/item_ops.c
67161+++ b/fs/reiserfs/item_ops.c
67162@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67163 }
67164
67165 static struct item_operations errcatch_ops = {
67166- errcatch_bytes_number,
67167- errcatch_decrement_key,
67168- errcatch_is_left_mergeable,
67169- errcatch_print_item,
67170- errcatch_check_item,
67171+ .bytes_number = errcatch_bytes_number,
67172+ .decrement_key = errcatch_decrement_key,
67173+ .is_left_mergeable = errcatch_is_left_mergeable,
67174+ .print_item = errcatch_print_item,
67175+ .check_item = errcatch_check_item,
67176
67177- errcatch_create_vi,
67178- errcatch_check_left,
67179- errcatch_check_right,
67180- errcatch_part_size,
67181- errcatch_unit_num,
67182- errcatch_print_vi
67183+ .create_vi = errcatch_create_vi,
67184+ .check_left = errcatch_check_left,
67185+ .check_right = errcatch_check_right,
67186+ .part_size = errcatch_part_size,
67187+ .unit_num = errcatch_unit_num,
67188+ .print_vi = errcatch_print_vi
67189 };
67190
67191 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67192diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67193index 621b9f3..af527fd 100644
67194--- a/fs/reiserfs/procfs.c
67195+++ b/fs/reiserfs/procfs.c
67196@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67197 "SMALL_TAILS " : "NO_TAILS ",
67198 replay_only(sb) ? "REPLAY_ONLY " : "",
67199 convert_reiserfs(sb) ? "CONV " : "",
67200- atomic_read(&r->s_generation_counter),
67201+ atomic_read_unchecked(&r->s_generation_counter),
67202 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67203 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67204 SF(s_good_search_by_key_reada), SF(s_bmaps),
67205diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67206index bb79cdd..fcf49ef 100644
67207--- a/fs/reiserfs/reiserfs.h
67208+++ b/fs/reiserfs/reiserfs.h
67209@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
67210 /* Comment? -Hans */
67211 wait_queue_head_t s_wait;
67212 /* increased by one every time the tree gets re-balanced */
67213- atomic_t s_generation_counter;
67214+ atomic_unchecked_t s_generation_counter;
67215
67216 /* File system properties. Currently holds on-disk FS format */
67217 unsigned long s_properties;
67218@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67219 #define REISERFS_USER_MEM 1 /* user memory mode */
67220
67221 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67222-#define get_generation(s) atomic_read (&fs_generation(s))
67223+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67224 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67225 #define __fs_changed(gen,s) (gen != get_generation (s))
67226 #define fs_changed(gen,s) \
67227diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
67228index 71fbbe3..eff29ba 100644
67229--- a/fs/reiserfs/super.c
67230+++ b/fs/reiserfs/super.c
67231@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
67232 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
67233 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
67234 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
67235+#ifdef CONFIG_REISERFS_FS_XATTR
67236+ /* turn on user xattrs by default */
67237+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
67238+#endif
67239 /* no preallocation minimum, be smart in reiserfs_file_write instead */
67240 sbi->s_alloc_options.preallocmin = 0;
67241 /* Preallocate by 16 blocks (17-1) at once */
67242diff --git a/fs/select.c b/fs/select.c
67243index 467bb1c..cf9d65a 100644
67244--- a/fs/select.c
67245+++ b/fs/select.c
67246@@ -20,6 +20,7 @@
67247 #include <linux/export.h>
67248 #include <linux/slab.h>
67249 #include <linux/poll.h>
67250+#include <linux/security.h>
67251 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67252 #include <linux/file.h>
67253 #include <linux/fdtable.h>
67254@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67255 struct poll_list *walk = head;
67256 unsigned long todo = nfds;
67257
67258+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67259 if (nfds > rlimit(RLIMIT_NOFILE))
67260 return -EINVAL;
67261
67262diff --git a/fs/seq_file.c b/fs/seq_file.c
67263index dbf3a59..daf023f 100644
67264--- a/fs/seq_file.c
67265+++ b/fs/seq_file.c
67266@@ -12,6 +12,8 @@
67267 #include <linux/slab.h>
67268 #include <linux/cred.h>
67269 #include <linux/mm.h>
67270+#include <linux/sched.h>
67271+#include <linux/grsecurity.h>
67272
67273 #include <asm/uaccess.h>
67274 #include <asm/page.h>
67275@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
67276
67277 static void *seq_buf_alloc(unsigned long size)
67278 {
67279- void *buf;
67280-
67281- /*
67282- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
67283- * it's better to fall back to vmalloc() than to kill things.
67284- */
67285- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
67286- if (!buf && size > PAGE_SIZE)
67287- buf = vmalloc(size);
67288- return buf;
67289+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67290 }
67291
67292 /**
67293@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67294 #ifdef CONFIG_USER_NS
67295 p->user_ns = file->f_cred->user_ns;
67296 #endif
67297+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67298+ p->exec_id = current->exec_id;
67299+#endif
67300
67301 /*
67302 * Wrappers around seq_open(e.g. swaps_open) need to be
67303@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67304 }
67305 EXPORT_SYMBOL(seq_open);
67306
67307+
67308+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67309+{
67310+ if (gr_proc_is_restricted())
67311+ return -EACCES;
67312+
67313+ return seq_open(file, op);
67314+}
67315+EXPORT_SYMBOL(seq_open_restrict);
67316+
67317 static int traverse(struct seq_file *m, loff_t offset)
67318 {
67319 loff_t pos = 0, index;
67320@@ -158,7 +164,7 @@ Eoverflow:
67321 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67322 {
67323 struct seq_file *m = file->private_data;
67324- size_t copied = 0;
67325+ ssize_t copied = 0;
67326 loff_t pos;
67327 size_t n;
67328 void *p;
67329@@ -589,7 +595,7 @@ static void single_stop(struct seq_file *p, void *v)
67330 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67331 void *data)
67332 {
67333- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67334+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67335 int res = -ENOMEM;
67336
67337 if (op) {
67338@@ -625,6 +631,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67339 }
67340 EXPORT_SYMBOL(single_open_size);
67341
67342+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67343+ void *data)
67344+{
67345+ if (gr_proc_is_restricted())
67346+ return -EACCES;
67347+
67348+ return single_open(file, show, data);
67349+}
67350+EXPORT_SYMBOL(single_open_restrict);
67351+
67352+
67353 int single_release(struct inode *inode, struct file *file)
67354 {
67355 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
67356diff --git a/fs/splice.c b/fs/splice.c
67357index 75c6058..770d40c 100644
67358--- a/fs/splice.c
67359+++ b/fs/splice.c
67360@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67361 pipe_lock(pipe);
67362
67363 for (;;) {
67364- if (!pipe->readers) {
67365+ if (!atomic_read(&pipe->readers)) {
67366 send_sig(SIGPIPE, current, 0);
67367 if (!ret)
67368 ret = -EPIPE;
67369@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67370 page_nr++;
67371 ret += buf->len;
67372
67373- if (pipe->files)
67374+ if (atomic_read(&pipe->files))
67375 do_wakeup = 1;
67376
67377 if (!--spd->nr_pages)
67378@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67379 do_wakeup = 0;
67380 }
67381
67382- pipe->waiting_writers++;
67383+ atomic_inc(&pipe->waiting_writers);
67384 pipe_wait(pipe);
67385- pipe->waiting_writers--;
67386+ atomic_dec(&pipe->waiting_writers);
67387 }
67388
67389 pipe_unlock(pipe);
67390@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
67391 old_fs = get_fs();
67392 set_fs(get_ds());
67393 /* The cast to a user pointer is valid due to the set_fs() */
67394- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
67395+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
67396 set_fs(old_fs);
67397
67398 return res;
67399@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
67400 old_fs = get_fs();
67401 set_fs(get_ds());
67402 /* The cast to a user pointer is valid due to the set_fs() */
67403- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
67404+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
67405 set_fs(old_fs);
67406
67407 return res;
67408@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
67409 goto err;
67410
67411 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
67412- vec[i].iov_base = (void __user *) page_address(page);
67413+ vec[i].iov_base = (void __force_user *) page_address(page);
67414 vec[i].iov_len = this_len;
67415 spd.pages[i] = page;
67416 spd.nr_pages++;
67417@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67418 ops->release(pipe, buf);
67419 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67420 pipe->nrbufs--;
67421- if (pipe->files)
67422+ if (atomic_read(&pipe->files))
67423 sd->need_wakeup = true;
67424 }
67425
67426@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67427 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
67428 {
67429 while (!pipe->nrbufs) {
67430- if (!pipe->writers)
67431+ if (!atomic_read(&pipe->writers))
67432 return 0;
67433
67434- if (!pipe->waiting_writers && sd->num_spliced)
67435+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
67436 return 0;
67437
67438 if (sd->flags & SPLICE_F_NONBLOCK)
67439@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
67440 ops->release(pipe, buf);
67441 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67442 pipe->nrbufs--;
67443- if (pipe->files)
67444+ if (atomic_read(&pipe->files))
67445 sd.need_wakeup = true;
67446 } else {
67447 buf->offset += ret;
67448@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
67449 * out of the pipe right after the splice_to_pipe(). So set
67450 * PIPE_READERS appropriately.
67451 */
67452- pipe->readers = 1;
67453+ atomic_set(&pipe->readers, 1);
67454
67455 current->splice_pipe = pipe;
67456 }
67457@@ -1497,6 +1497,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
67458
67459 partial[buffers].offset = off;
67460 partial[buffers].len = plen;
67461+ partial[buffers].private = 0;
67462
67463 off = 0;
67464 len -= plen;
67465@@ -1733,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67466 ret = -ERESTARTSYS;
67467 break;
67468 }
67469- if (!pipe->writers)
67470+ if (!atomic_read(&pipe->writers))
67471 break;
67472- if (!pipe->waiting_writers) {
67473+ if (!atomic_read(&pipe->waiting_writers)) {
67474 if (flags & SPLICE_F_NONBLOCK) {
67475 ret = -EAGAIN;
67476 break;
67477@@ -1767,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67478 pipe_lock(pipe);
67479
67480 while (pipe->nrbufs >= pipe->buffers) {
67481- if (!pipe->readers) {
67482+ if (!atomic_read(&pipe->readers)) {
67483 send_sig(SIGPIPE, current, 0);
67484 ret = -EPIPE;
67485 break;
67486@@ -1780,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67487 ret = -ERESTARTSYS;
67488 break;
67489 }
67490- pipe->waiting_writers++;
67491+ atomic_inc(&pipe->waiting_writers);
67492 pipe_wait(pipe);
67493- pipe->waiting_writers--;
67494+ atomic_dec(&pipe->waiting_writers);
67495 }
67496
67497 pipe_unlock(pipe);
67498@@ -1818,14 +1819,14 @@ retry:
67499 pipe_double_lock(ipipe, opipe);
67500
67501 do {
67502- if (!opipe->readers) {
67503+ if (!atomic_read(&opipe->readers)) {
67504 send_sig(SIGPIPE, current, 0);
67505 if (!ret)
67506 ret = -EPIPE;
67507 break;
67508 }
67509
67510- if (!ipipe->nrbufs && !ipipe->writers)
67511+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
67512 break;
67513
67514 /*
67515@@ -1922,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67516 pipe_double_lock(ipipe, opipe);
67517
67518 do {
67519- if (!opipe->readers) {
67520+ if (!atomic_read(&opipe->readers)) {
67521 send_sig(SIGPIPE, current, 0);
67522 if (!ret)
67523 ret = -EPIPE;
67524@@ -1967,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67525 * return EAGAIN if we have the potential of some data in the
67526 * future, otherwise just return 0
67527 */
67528- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
67529+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
67530 ret = -EAGAIN;
67531
67532 pipe_unlock(ipipe);
67533diff --git a/fs/stat.c b/fs/stat.c
67534index ae0c3ce..9ee641c 100644
67535--- a/fs/stat.c
67536+++ b/fs/stat.c
67537@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
67538 stat->gid = inode->i_gid;
67539 stat->rdev = inode->i_rdev;
67540 stat->size = i_size_read(inode);
67541- stat->atime = inode->i_atime;
67542- stat->mtime = inode->i_mtime;
67543+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67544+ stat->atime = inode->i_ctime;
67545+ stat->mtime = inode->i_ctime;
67546+ } else {
67547+ stat->atime = inode->i_atime;
67548+ stat->mtime = inode->i_mtime;
67549+ }
67550 stat->ctime = inode->i_ctime;
67551 stat->blksize = (1 << inode->i_blkbits);
67552 stat->blocks = inode->i_blocks;
67553@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
67554 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
67555 {
67556 struct inode *inode = path->dentry->d_inode;
67557+ int retval;
67558
67559- if (inode->i_op->getattr)
67560- return inode->i_op->getattr(path->mnt, path->dentry, stat);
67561+ if (inode->i_op->getattr) {
67562+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
67563+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67564+ stat->atime = stat->ctime;
67565+ stat->mtime = stat->ctime;
67566+ }
67567+ return retval;
67568+ }
67569
67570 generic_fillattr(inode, stat);
67571 return 0;
67572diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
67573index 0b45ff4..847de5b 100644
67574--- a/fs/sysfs/dir.c
67575+++ b/fs/sysfs/dir.c
67576@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67577 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67578 {
67579 struct kernfs_node *parent, *kn;
67580+ const char *name;
67581+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
67582+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67583+ const char *parent_name;
67584+#endif
67585
67586 BUG_ON(!kobj);
67587
67588+ name = kobject_name(kobj);
67589+
67590 if (kobj->parent)
67591 parent = kobj->parent->sd;
67592 else
67593@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67594 if (!parent)
67595 return -ENOENT;
67596
67597- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
67598- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
67599+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67600+ parent_name = parent->name;
67601+ mode = S_IRWXU;
67602+
67603+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
67604+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
67605+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
67606+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
67607+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67608+#endif
67609+
67610+ kn = kernfs_create_dir_ns(parent, name,
67611+ mode, kobj, ns);
67612 if (IS_ERR(kn)) {
67613 if (PTR_ERR(kn) == -EEXIST)
67614- sysfs_warn_dup(parent, kobject_name(kobj));
67615+ sysfs_warn_dup(parent, name);
67616 return PTR_ERR(kn);
67617 }
67618
67619diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
67620index 69d4889..a810bd4 100644
67621--- a/fs/sysv/sysv.h
67622+++ b/fs/sysv/sysv.h
67623@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
67624 #endif
67625 }
67626
67627-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67628+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67629 {
67630 if (sbi->s_bytesex == BYTESEX_PDP)
67631 return PDP_swab((__force __u32)n);
67632diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
67633index fb08b0c..65fcc7e 100644
67634--- a/fs/ubifs/io.c
67635+++ b/fs/ubifs/io.c
67636@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
67637 return err;
67638 }
67639
67640-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67641+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67642 {
67643 int err;
67644
67645diff --git a/fs/udf/misc.c b/fs/udf/misc.c
67646index c175b4d..8f36a16 100644
67647--- a/fs/udf/misc.c
67648+++ b/fs/udf/misc.c
67649@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
67650
67651 u8 udf_tag_checksum(const struct tag *t)
67652 {
67653- u8 *data = (u8 *)t;
67654+ const u8 *data = (const u8 *)t;
67655 u8 checksum = 0;
67656 int i;
67657 for (i = 0; i < sizeof(struct tag); ++i)
67658diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
67659index 8d974c4..b82f6ec 100644
67660--- a/fs/ufs/swab.h
67661+++ b/fs/ufs/swab.h
67662@@ -22,7 +22,7 @@ enum {
67663 BYTESEX_BE
67664 };
67665
67666-static inline u64
67667+static inline u64 __intentional_overflow(-1)
67668 fs64_to_cpu(struct super_block *sbp, __fs64 n)
67669 {
67670 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67671@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
67672 return (__force __fs64)cpu_to_be64(n);
67673 }
67674
67675-static inline u32
67676+static inline u32 __intentional_overflow(-1)
67677 fs32_to_cpu(struct super_block *sbp, __fs32 n)
67678 {
67679 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67680diff --git a/fs/utimes.c b/fs/utimes.c
67681index aa138d6..5f3a811 100644
67682--- a/fs/utimes.c
67683+++ b/fs/utimes.c
67684@@ -1,6 +1,7 @@
67685 #include <linux/compiler.h>
67686 #include <linux/file.h>
67687 #include <linux/fs.h>
67688+#include <linux/security.h>
67689 #include <linux/linkage.h>
67690 #include <linux/mount.h>
67691 #include <linux/namei.h>
67692@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
67693 }
67694 }
67695 retry_deleg:
67696+
67697+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
67698+ error = -EACCES;
67699+ goto mnt_drop_write_and_out;
67700+ }
67701+
67702 mutex_lock(&inode->i_mutex);
67703 error = notify_change(path->dentry, &newattrs, &delegated_inode);
67704 mutex_unlock(&inode->i_mutex);
67705diff --git a/fs/xattr.c b/fs/xattr.c
67706index 4ef6985..a6cd6567 100644
67707--- a/fs/xattr.c
67708+++ b/fs/xattr.c
67709@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
67710 return rc;
67711 }
67712
67713+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
67714+ssize_t
67715+pax_getxattr(struct dentry *dentry, void *value, size_t size)
67716+{
67717+ struct inode *inode = dentry->d_inode;
67718+ ssize_t error;
67719+
67720+ error = inode_permission(inode, MAY_EXEC);
67721+ if (error)
67722+ return error;
67723+
67724+ if (inode->i_op->getxattr)
67725+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
67726+ else
67727+ error = -EOPNOTSUPP;
67728+
67729+ return error;
67730+}
67731+EXPORT_SYMBOL(pax_getxattr);
67732+#endif
67733+
67734 ssize_t
67735 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
67736 {
67737@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
67738 * Extended attribute SET operations
67739 */
67740 static long
67741-setxattr(struct dentry *d, const char __user *name, const void __user *value,
67742+setxattr(struct path *path, const char __user *name, const void __user *value,
67743 size_t size, int flags)
67744 {
67745 int error;
67746@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
67747 posix_acl_fix_xattr_from_user(kvalue, size);
67748 }
67749
67750- error = vfs_setxattr(d, kname, kvalue, size, flags);
67751+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
67752+ error = -EACCES;
67753+ goto out;
67754+ }
67755+
67756+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
67757 out:
67758 if (vvalue)
67759 vfree(vvalue);
67760@@ -376,7 +402,7 @@ retry:
67761 return error;
67762 error = mnt_want_write(path.mnt);
67763 if (!error) {
67764- error = setxattr(path.dentry, name, value, size, flags);
67765+ error = setxattr(&path, name, value, size, flags);
67766 mnt_drop_write(path.mnt);
67767 }
67768 path_put(&path);
67769@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
67770 audit_file(f.file);
67771 error = mnt_want_write_file(f.file);
67772 if (!error) {
67773- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
67774+ error = setxattr(&f.file->f_path, name, value, size, flags);
67775 mnt_drop_write_file(f.file);
67776 }
67777 fdput(f);
67778@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
67779 * Extended attribute REMOVE operations
67780 */
67781 static long
67782-removexattr(struct dentry *d, const char __user *name)
67783+removexattr(struct path *path, const char __user *name)
67784 {
67785 int error;
67786 char kname[XATTR_NAME_MAX + 1];
67787@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
67788 if (error < 0)
67789 return error;
67790
67791- return vfs_removexattr(d, kname);
67792+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
67793+ return -EACCES;
67794+
67795+ return vfs_removexattr(path->dentry, kname);
67796 }
67797
67798 static int path_removexattr(const char __user *pathname,
67799@@ -623,7 +652,7 @@ retry:
67800 return error;
67801 error = mnt_want_write(path.mnt);
67802 if (!error) {
67803- error = removexattr(path.dentry, name);
67804+ error = removexattr(&path, name);
67805 mnt_drop_write(path.mnt);
67806 }
67807 path_put(&path);
67808@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
67809 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
67810 {
67811 struct fd f = fdget(fd);
67812+ struct path *path;
67813 int error = -EBADF;
67814
67815 if (!f.file)
67816 return error;
67817+ path = &f.file->f_path;
67818 audit_file(f.file);
67819 error = mnt_want_write_file(f.file);
67820 if (!error) {
67821- error = removexattr(f.file->f_path.dentry, name);
67822+ error = removexattr(path, name);
67823 mnt_drop_write_file(f.file);
67824 }
67825 fdput(f);
67826diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
67827index 4e20fe7..6d1a55a 100644
67828--- a/fs/xfs/libxfs/xfs_bmap.c
67829+++ b/fs/xfs/libxfs/xfs_bmap.c
67830@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
67831
67832 #else
67833 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
67834-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
67835+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
67836 #endif /* DEBUG */
67837
67838 /*
67839diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
67840index 098cd78..724d3f8 100644
67841--- a/fs/xfs/xfs_dir2_readdir.c
67842+++ b/fs/xfs/xfs_dir2_readdir.c
67843@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
67844 ino = dp->d_ops->sf_get_ino(sfp, sfep);
67845 filetype = dp->d_ops->sf_get_ftype(sfep);
67846 ctx->pos = off & 0x7fffffff;
67847- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67848+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
67849+ char name[sfep->namelen];
67850+ memcpy(name, sfep->name, sfep->namelen);
67851+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
67852+ return 0;
67853+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67854 xfs_dir3_get_dtype(dp->i_mount, filetype)))
67855 return 0;
67856 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
67857diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
67858index a183198..6b52f52 100644
67859--- a/fs/xfs/xfs_ioctl.c
67860+++ b/fs/xfs/xfs_ioctl.c
67861@@ -119,7 +119,7 @@ xfs_find_handle(
67862 }
67863
67864 error = -EFAULT;
67865- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
67866+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
67867 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
67868 goto out_put;
67869
67870diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
67871index c31d2c2..6ec8f62 100644
67872--- a/fs/xfs/xfs_linux.h
67873+++ b/fs/xfs/xfs_linux.h
67874@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
67875 * of the compiler which do not like us using do_div in the middle
67876 * of large functions.
67877 */
67878-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
67879+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
67880 {
67881 __u32 mod;
67882
67883@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
67884 return 0;
67885 }
67886 #else
67887-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
67888+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
67889 {
67890 __u32 mod;
67891
67892diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
67893new file mode 100644
67894index 0000000..31f8fe4
67895--- /dev/null
67896+++ b/grsecurity/Kconfig
67897@@ -0,0 +1,1182 @@
67898+#
67899+# grecurity configuration
67900+#
67901+menu "Memory Protections"
67902+depends on GRKERNSEC
67903+
67904+config GRKERNSEC_KMEM
67905+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
67906+ default y if GRKERNSEC_CONFIG_AUTO
67907+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
67908+ help
67909+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
67910+ be written to or read from to modify or leak the contents of the running
67911+ kernel. /dev/port will also not be allowed to be opened, writing to
67912+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
67913+ If you have module support disabled, enabling this will close up several
67914+ ways that are currently used to insert malicious code into the running
67915+ kernel.
67916+
67917+ Even with this feature enabled, we still highly recommend that
67918+ you use the RBAC system, as it is still possible for an attacker to
67919+ modify the running kernel through other more obscure methods.
67920+
67921+ It is highly recommended that you say Y here if you meet all the
67922+ conditions above.
67923+
67924+config GRKERNSEC_VM86
67925+ bool "Restrict VM86 mode"
67926+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
67927+ depends on X86_32
67928+
67929+ help
67930+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
67931+ make use of a special execution mode on 32bit x86 processors called
67932+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
67933+ video cards and will still work with this option enabled. The purpose
67934+ of the option is to prevent exploitation of emulation errors in
67935+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
67936+ Nearly all users should be able to enable this option.
67937+
67938+config GRKERNSEC_IO
67939+ bool "Disable privileged I/O"
67940+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
67941+ depends on X86
67942+ select RTC_CLASS
67943+ select RTC_INTF_DEV
67944+ select RTC_DRV_CMOS
67945+
67946+ help
67947+ If you say Y here, all ioperm and iopl calls will return an error.
67948+ Ioperm and iopl can be used to modify the running kernel.
67949+ Unfortunately, some programs need this access to operate properly,
67950+ the most notable of which are XFree86 and hwclock. hwclock can be
67951+ remedied by having RTC support in the kernel, so real-time
67952+ clock support is enabled if this option is enabled, to ensure
67953+ that hwclock operates correctly. If hwclock still does not work,
67954+ either update udev or symlink /dev/rtc to /dev/rtc0.
67955+
67956+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
67957+ you may not be able to boot into a graphical environment with this
67958+ option enabled. In this case, you should use the RBAC system instead.
67959+
67960+config GRKERNSEC_BPF_HARDEN
67961+ bool "Harden BPF interpreter"
67962+ default y if GRKERNSEC_CONFIG_AUTO
67963+ help
67964+ Unlike previous versions of grsecurity that hardened both the BPF
67965+ interpreted code against corruption at rest as well as the JIT code
67966+ against JIT-spray attacks and attacker-controlled immediate values
67967+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
67968+ and will ensure the interpreted code is read-only at rest. This feature
67969+ may be removed at a later time when eBPF stabilizes to entirely revert
67970+ back to the more secure pre-3.16 BPF interpreter/JIT.
67971+
67972+ If you're using KERNEXEC, it's recommended that you enable this option
67973+ to supplement the hardening of the kernel.
67974+
67975+config GRKERNSEC_PERF_HARDEN
67976+ bool "Disable unprivileged PERF_EVENTS usage by default"
67977+ default y if GRKERNSEC_CONFIG_AUTO
67978+ depends on PERF_EVENTS
67979+ help
67980+ If you say Y here, the range of acceptable values for the
67981+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
67982+ default to a new value: 3. When the sysctl is set to this value, no
67983+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
67984+
67985+ Though PERF_EVENTS can be used legitimately for performance monitoring
67986+ and low-level application profiling, it is forced on regardless of
67987+ configuration, has been at fault for several vulnerabilities, and
67988+ creates new opportunities for side channels and other information leaks.
67989+
67990+ This feature puts PERF_EVENTS into a secure default state and permits
67991+ the administrator to change out of it temporarily if unprivileged
67992+ application profiling is needed.
67993+
67994+config GRKERNSEC_RAND_THREADSTACK
67995+ bool "Insert random gaps between thread stacks"
67996+ default y if GRKERNSEC_CONFIG_AUTO
67997+ depends on PAX_RANDMMAP && !PPC
67998+ help
67999+ If you say Y here, a random-sized gap will be enforced between allocated
68000+ thread stacks. Glibc's NPTL and other threading libraries that
68001+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68002+ The implementation currently provides 8 bits of entropy for the gap.
68003+
68004+ Many distributions do not compile threaded remote services with the
68005+ -fstack-check argument to GCC, causing the variable-sized stack-based
68006+ allocator, alloca(), to not probe the stack on allocation. This
68007+ permits an unbounded alloca() to skip over any guard page and potentially
68008+ modify another thread's stack reliably. An enforced random gap
68009+ reduces the reliability of such an attack and increases the chance
68010+ that such a read/write to another thread's stack instead lands in
68011+ an unmapped area, causing a crash and triggering grsecurity's
68012+ anti-bruteforcing logic.
68013+
68014+config GRKERNSEC_PROC_MEMMAP
68015+ bool "Harden ASLR against information leaks and entropy reduction"
68016+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68017+ depends on PAX_NOEXEC || PAX_ASLR
68018+ help
68019+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68020+ give no information about the addresses of its mappings if
68021+ PaX features that rely on random addresses are enabled on the task.
68022+ In addition to sanitizing this information and disabling other
68023+ dangerous sources of information, this option causes reads of sensitive
68024+ /proc/<pid> entries where the file descriptor was opened in a different
68025+ task than the one performing the read. Such attempts are logged.
68026+ This option also limits argv/env strings for suid/sgid binaries
68027+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68028+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68029+ binaries to prevent alternative mmap layouts from being abused.
68030+
68031+ If you use PaX it is essential that you say Y here as it closes up
68032+ several holes that make full ASLR useless locally.
68033+
68034+
68035+config GRKERNSEC_KSTACKOVERFLOW
68036+ bool "Prevent kernel stack overflows"
68037+ default y if GRKERNSEC_CONFIG_AUTO
68038+ depends on !IA64 && 64BIT
68039+ help
68040+ If you say Y here, the kernel's process stacks will be allocated
68041+ with vmalloc instead of the kernel's default allocator. This
68042+ introduces guard pages that in combination with the alloca checking
68043+ of the STACKLEAK feature prevents all forms of kernel process stack
68044+ overflow abuse. Note that this is different from kernel stack
68045+ buffer overflows.
68046+
68047+config GRKERNSEC_BRUTE
68048+ bool "Deter exploit bruteforcing"
68049+ default y if GRKERNSEC_CONFIG_AUTO
68050+ help
68051+ If you say Y here, attempts to bruteforce exploits against forking
68052+ daemons such as apache or sshd, as well as against suid/sgid binaries
68053+ will be deterred. When a child of a forking daemon is killed by PaX
68054+ or crashes due to an illegal instruction or other suspicious signal,
68055+ the parent process will be delayed 30 seconds upon every subsequent
68056+ fork until the administrator is able to assess the situation and
68057+ restart the daemon.
68058+ In the suid/sgid case, the attempt is logged, the user has all their
68059+ existing instances of the suid/sgid binary terminated and will
68060+ be unable to execute any suid/sgid binaries for 15 minutes.
68061+
68062+ It is recommended that you also enable signal logging in the auditing
68063+ section so that logs are generated when a process triggers a suspicious
68064+ signal.
68065+ If the sysctl option is enabled, a sysctl option with name
68066+ "deter_bruteforce" is created.
68067+
68068+config GRKERNSEC_MODHARDEN
68069+ bool "Harden module auto-loading"
68070+ default y if GRKERNSEC_CONFIG_AUTO
68071+ depends on MODULES
68072+ help
68073+ If you say Y here, module auto-loading in response to use of some
68074+ feature implemented by an unloaded module will be restricted to
68075+ root users. Enabling this option helps defend against attacks
68076+ by unprivileged users who abuse the auto-loading behavior to
68077+ cause a vulnerable module to load that is then exploited.
68078+
68079+ If this option prevents a legitimate use of auto-loading for a
68080+ non-root user, the administrator can execute modprobe manually
68081+ with the exact name of the module mentioned in the alert log.
68082+ Alternatively, the administrator can add the module to the list
68083+ of modules loaded at boot by modifying init scripts.
68084+
68085+ Modification of init scripts will most likely be needed on
68086+ Ubuntu servers with encrypted home directory support enabled,
68087+ as the first non-root user logging in will cause the ecb(aes),
68088+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68089+
68090+config GRKERNSEC_HIDESYM
68091+ bool "Hide kernel symbols"
68092+ default y if GRKERNSEC_CONFIG_AUTO
68093+ select PAX_USERCOPY_SLABS
68094+ help
68095+ If you say Y here, getting information on loaded modules, and
68096+ displaying all kernel symbols through a syscall will be restricted
68097+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68098+ /proc/kallsyms will be restricted to the root user. The RBAC
68099+ system can hide that entry even from root.
68100+
68101+ This option also prevents leaking of kernel addresses through
68102+ several /proc entries.
68103+
68104+ Note that this option is only effective provided the following
68105+ conditions are met:
68106+ 1) The kernel using grsecurity is not precompiled by some distribution
68107+ 2) You have also enabled GRKERNSEC_DMESG
68108+ 3) You are using the RBAC system and hiding other files such as your
68109+ kernel image and System.map. Alternatively, enabling this option
68110+ causes the permissions on /boot, /lib/modules, and the kernel
68111+ source directory to change at compile time to prevent
68112+ reading by non-root users.
68113+ If the above conditions are met, this option will aid in providing a
68114+ useful protection against local kernel exploitation of overflows
68115+ and arbitrary read/write vulnerabilities.
68116+
68117+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68118+ in addition to this feature.
68119+
68120+config GRKERNSEC_RANDSTRUCT
68121+ bool "Randomize layout of sensitive kernel structures"
68122+ default y if GRKERNSEC_CONFIG_AUTO
68123+ select GRKERNSEC_HIDESYM
68124+ select MODVERSIONS if MODULES
68125+ help
68126+ If you say Y here, the layouts of a number of sensitive kernel
68127+ structures (task, fs, cred, etc) and all structures composed entirely
68128+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68129+ This can introduce the requirement of an additional infoleak
68130+ vulnerability for exploits targeting these structure types.
68131+
68132+ Enabling this feature will introduce some performance impact, slightly
68133+ increase memory usage, and prevent the use of forensic tools like
68134+ Volatility against the system (unless the kernel source tree isn't
68135+ cleaned after kernel installation).
68136+
68137+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68138+ It remains after a make clean to allow for external modules to be compiled
68139+ with the existing seed and will be removed by a make mrproper or
68140+ make distclean.
68141+
68142+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68143+ to install the supporting headers explicitly in addition to the normal
68144+ gcc package.
68145+
68146+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68147+ bool "Use cacheline-aware structure randomization"
68148+ depends on GRKERNSEC_RANDSTRUCT
68149+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68150+ help
68151+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68152+ at restricting randomization to cacheline-sized groups of elements. It
68153+ will further not randomize bitfields in structures. This reduces the
68154+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68155+
68156+config GRKERNSEC_KERN_LOCKOUT
68157+ bool "Active kernel exploit response"
68158+ default y if GRKERNSEC_CONFIG_AUTO
68159+ depends on X86 || ARM || PPC || SPARC
68160+ help
68161+ If you say Y here, when a PaX alert is triggered due to suspicious
68162+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68163+ or an OOPS occurs due to bad memory accesses, instead of just
68164+ terminating the offending process (and potentially allowing
68165+ a subsequent exploit from the same user), we will take one of two
68166+ actions:
68167+ If the user was root, we will panic the system
68168+ If the user was non-root, we will log the attempt, terminate
68169+ all processes owned by the user, then prevent them from creating
68170+ any new processes until the system is restarted
68171+ This deters repeated kernel exploitation/bruteforcing attempts
68172+ and is useful for later forensics.
68173+
68174+config GRKERNSEC_OLD_ARM_USERLAND
68175+ bool "Old ARM userland compatibility"
68176+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68177+ help
68178+ If you say Y here, stubs of executable code to perform such operations
68179+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68180+ table. This is unfortunately needed for old ARM userland meant to run
68181+ across a wide range of processors. Without this option enabled,
68182+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68183+ which is enough for Linaro userlands or other userlands designed for v6
68184+ and newer ARM CPUs. It's recommended that you try without this option enabled
68185+ first, and only enable it if your userland does not boot (it will likely fail
68186+ at init time).
68187+
68188+endmenu
68189+menu "Role Based Access Control Options"
68190+depends on GRKERNSEC
68191+
68192+config GRKERNSEC_RBAC_DEBUG
68193+ bool
68194+
68195+config GRKERNSEC_NO_RBAC
68196+ bool "Disable RBAC system"
68197+ help
68198+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68199+ preventing the RBAC system from being enabled. You should only say Y
68200+ here if you have no intention of using the RBAC system, so as to prevent
68201+ an attacker with root access from misusing the RBAC system to hide files
68202+ and processes when loadable module support and /dev/[k]mem have been
68203+ locked down.
68204+
68205+config GRKERNSEC_ACL_HIDEKERN
68206+ bool "Hide kernel processes"
68207+ help
68208+ If you say Y here, all kernel threads will be hidden to all
68209+ processes but those whose subject has the "view hidden processes"
68210+ flag.
68211+
68212+config GRKERNSEC_ACL_MAXTRIES
68213+ int "Maximum tries before password lockout"
68214+ default 3
68215+ help
68216+ This option enforces the maximum number of times a user can attempt
68217+ to authorize themselves with the grsecurity RBAC system before being
68218+ denied the ability to attempt authorization again for a specified time.
68219+ The lower the number, the harder it will be to brute-force a password.
68220+
68221+config GRKERNSEC_ACL_TIMEOUT
68222+ int "Time to wait after max password tries, in seconds"
68223+ default 30
68224+ help
68225+ This option specifies the time the user must wait after attempting to
68226+ authorize to the RBAC system with the maximum number of invalid
68227+ passwords. The higher the number, the harder it will be to brute-force
68228+ a password.
68229+
68230+endmenu
68231+menu "Filesystem Protections"
68232+depends on GRKERNSEC
68233+
68234+config GRKERNSEC_PROC
68235+ bool "Proc restrictions"
68236+ default y if GRKERNSEC_CONFIG_AUTO
68237+ help
68238+ If you say Y here, the permissions of the /proc filesystem
68239+ will be altered to enhance system security and privacy. You MUST
68240+ choose either a user only restriction or a user and group restriction.
68241+ Depending upon the option you choose, you can either restrict users to
68242+ see only the processes they themselves run, or choose a group that can
68243+ view all processes and files normally restricted to root if you choose
68244+ the "restrict to user only" option. NOTE: If you're running identd or
68245+ ntpd as a non-root user, you will have to run it as the group you
68246+ specify here.
68247+
68248+config GRKERNSEC_PROC_USER
68249+ bool "Restrict /proc to user only"
68250+ depends on GRKERNSEC_PROC
68251+ help
68252+ If you say Y here, non-root users will only be able to view their own
68253+ processes, and restricts them from viewing network-related information,
68254+ and viewing kernel symbol and module information.
68255+
68256+config GRKERNSEC_PROC_USERGROUP
68257+ bool "Allow special group"
68258+ default y if GRKERNSEC_CONFIG_AUTO
68259+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68260+ help
68261+ If you say Y here, you will be able to select a group that will be
68262+ able to view all processes and network-related information. If you've
68263+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68264+ remain hidden. This option is useful if you want to run identd as
68265+ a non-root user. The group you select may also be chosen at boot time
68266+ via "grsec_proc_gid=" on the kernel commandline.
68267+
68268+config GRKERNSEC_PROC_GID
68269+ int "GID for special group"
68270+ depends on GRKERNSEC_PROC_USERGROUP
68271+ default 1001
68272+
68273+config GRKERNSEC_PROC_ADD
68274+ bool "Additional restrictions"
68275+ default y if GRKERNSEC_CONFIG_AUTO
68276+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68277+ help
68278+ If you say Y here, additional restrictions will be placed on
68279+ /proc that keep normal users from viewing device information and
68280+ slabinfo information that could be useful for exploits.
68281+
68282+config GRKERNSEC_LINK
68283+ bool "Linking restrictions"
68284+ default y if GRKERNSEC_CONFIG_AUTO
68285+ help
68286+ If you say Y here, /tmp race exploits will be prevented, since users
68287+ will no longer be able to follow symlinks owned by other users in
68288+ world-writable +t directories (e.g. /tmp), unless the owner of the
68289+ symlink is the owner of the directory. users will also not be
68290+ able to hardlink to files they do not own. If the sysctl option is
68291+ enabled, a sysctl option with name "linking_restrictions" is created.
68292+
68293+config GRKERNSEC_SYMLINKOWN
68294+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68295+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68296+ help
68297+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68298+ that prevents it from being used as a security feature. As Apache
68299+ verifies the symlink by performing a stat() against the target of
68300+ the symlink before it is followed, an attacker can setup a symlink
68301+ to point to a same-owned file, then replace the symlink with one
68302+ that targets another user's file just after Apache "validates" the
68303+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68304+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68305+ will be in place for the group you specify. If the sysctl option
68306+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68307+ created.
68308+
68309+config GRKERNSEC_SYMLINKOWN_GID
68310+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68311+ depends on GRKERNSEC_SYMLINKOWN
68312+ default 1006
68313+ help
68314+ Setting this GID determines what group kernel-enforced
68315+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68316+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68317+
68318+config GRKERNSEC_FIFO
68319+ bool "FIFO restrictions"
68320+ default y if GRKERNSEC_CONFIG_AUTO
68321+ help
68322+ If you say Y here, users will not be able to write to FIFOs they don't
68323+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68324+ the FIFO is the same owner of the directory it's held in. If the sysctl
68325+ option is enabled, a sysctl option with name "fifo_restrictions" is
68326+ created.
68327+
68328+config GRKERNSEC_SYSFS_RESTRICT
68329+ bool "Sysfs/debugfs restriction"
68330+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68331+ depends on SYSFS
68332+ help
68333+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68334+ any filesystem normally mounted under it (e.g. debugfs) will be
68335+ mostly accessible only by root. These filesystems generally provide access
68336+ to hardware and debug information that isn't appropriate for unprivileged
68337+ users of the system. Sysfs and debugfs have also become a large source
68338+ of new vulnerabilities, ranging from infoleaks to local compromise.
68339+ There has been very little oversight with an eye toward security involved
68340+ in adding new exporters of information to these filesystems, so their
68341+ use is discouraged.
68342+ For reasons of compatibility, a few directories have been whitelisted
68343+ for access by non-root users:
68344+ /sys/fs/selinux
68345+ /sys/fs/fuse
68346+ /sys/devices/system/cpu
68347+
68348+config GRKERNSEC_ROFS
68349+ bool "Runtime read-only mount protection"
68350+ depends on SYSCTL
68351+ help
68352+ If you say Y here, a sysctl option with name "romount_protect" will
68353+ be created. By setting this option to 1 at runtime, filesystems
68354+ will be protected in the following ways:
68355+ * No new writable mounts will be allowed
68356+ * Existing read-only mounts won't be able to be remounted read/write
68357+ * Write operations will be denied on all block devices
68358+ This option acts independently of grsec_lock: once it is set to 1,
68359+ it cannot be turned off. Therefore, please be mindful of the resulting
68360+ behavior if this option is enabled in an init script on a read-only
68361+ filesystem.
68362+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
68363+ and GRKERNSEC_IO should be enabled and module loading disabled via
68364+ config or at runtime.
68365+ This feature is mainly intended for secure embedded systems.
68366+
68367+
68368+config GRKERNSEC_DEVICE_SIDECHANNEL
68369+ bool "Eliminate stat/notify-based device sidechannels"
68370+ default y if GRKERNSEC_CONFIG_AUTO
68371+ help
68372+ If you say Y here, timing analyses on block or character
68373+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
68374+ will be thwarted for unprivileged users. If a process without
68375+ CAP_MKNOD stats such a device, the last access and last modify times
68376+ will match the device's create time. No access or modify events
68377+ will be triggered through inotify/dnotify/fanotify for such devices.
68378+ This feature will prevent attacks that may at a minimum
68379+ allow an attacker to determine the administrator's password length.
68380+
68381+config GRKERNSEC_CHROOT
68382+ bool "Chroot jail restrictions"
68383+ default y if GRKERNSEC_CONFIG_AUTO
68384+ help
68385+ If you say Y here, you will be able to choose several options that will
68386+ make breaking out of a chrooted jail much more difficult. If you
68387+ encounter no software incompatibilities with the following options, it
68388+ is recommended that you enable each one.
68389+
68390+ Note that the chroot restrictions are not intended to apply to "chroots"
68391+ to directories that are simple bind mounts of the global root filesystem.
68392+ For several other reasons, a user shouldn't expect any significant
68393+ security by performing such a chroot.
68394+
68395+config GRKERNSEC_CHROOT_MOUNT
68396+ bool "Deny mounts"
68397+ default y if GRKERNSEC_CONFIG_AUTO
68398+ depends on GRKERNSEC_CHROOT
68399+ help
68400+ If you say Y here, processes inside a chroot will not be able to
68401+ mount or remount filesystems. If the sysctl option is enabled, a
68402+ sysctl option with name "chroot_deny_mount" is created.
68403+
68404+config GRKERNSEC_CHROOT_DOUBLE
68405+ bool "Deny double-chroots"
68406+ default y if GRKERNSEC_CONFIG_AUTO
68407+ depends on GRKERNSEC_CHROOT
68408+ help
68409+ If you say Y here, processes inside a chroot will not be able to chroot
68410+ again outside the chroot. This is a widely used method of breaking
68411+ out of a chroot jail and should not be allowed. If the sysctl
68412+ option is enabled, a sysctl option with name
68413+ "chroot_deny_chroot" is created.
68414+
68415+config GRKERNSEC_CHROOT_PIVOT
68416+ bool "Deny pivot_root in chroot"
68417+ default y if GRKERNSEC_CONFIG_AUTO
68418+ depends on GRKERNSEC_CHROOT
68419+ help
68420+ If you say Y here, processes inside a chroot will not be able to use
68421+ a function called pivot_root() that was introduced in Linux 2.3.41. It
68422+ works similar to chroot in that it changes the root filesystem. This
68423+ function could be misused in a chrooted process to attempt to break out
68424+ of the chroot, and therefore should not be allowed. If the sysctl
68425+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
68426+ created.
68427+
68428+config GRKERNSEC_CHROOT_CHDIR
68429+ bool "Enforce chdir(\"/\") on all chroots"
68430+ default y if GRKERNSEC_CONFIG_AUTO
68431+ depends on GRKERNSEC_CHROOT
68432+ help
68433+ If you say Y here, the current working directory of all newly-chrooted
68434+ applications will be set to the the root directory of the chroot.
68435+ The man page on chroot(2) states:
68436+ Note that this call does not change the current working
68437+ directory, so that `.' can be outside the tree rooted at
68438+ `/'. In particular, the super-user can escape from a
68439+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
68440+
68441+ It is recommended that you say Y here, since it's not known to break
68442+ any software. If the sysctl option is enabled, a sysctl option with
68443+ name "chroot_enforce_chdir" is created.
68444+
68445+config GRKERNSEC_CHROOT_CHMOD
68446+ bool "Deny (f)chmod +s"
68447+ default y if GRKERNSEC_CONFIG_AUTO
68448+ depends on GRKERNSEC_CHROOT
68449+ help
68450+ If you say Y here, processes inside a chroot will not be able to chmod
68451+ or fchmod files to make them have suid or sgid bits. This protects
68452+ against another published method of breaking a chroot. If the sysctl
68453+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
68454+ created.
68455+
68456+config GRKERNSEC_CHROOT_FCHDIR
68457+ bool "Deny fchdir and fhandle out of chroot"
68458+ default y if GRKERNSEC_CONFIG_AUTO
68459+ depends on GRKERNSEC_CHROOT
68460+ help
68461+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
68462+ to a file descriptor of the chrooting process that points to a directory
68463+ outside the filesystem will be stopped. Additionally, this option prevents
68464+ use of the recently-created syscall for opening files by a guessable "file
68465+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
68466+ with name "chroot_deny_fchdir" is created.
68467+
68468+config GRKERNSEC_CHROOT_MKNOD
68469+ bool "Deny mknod"
68470+ default y if GRKERNSEC_CONFIG_AUTO
68471+ depends on GRKERNSEC_CHROOT
68472+ help
68473+ If you say Y here, processes inside a chroot will not be allowed to
68474+ mknod. The problem with using mknod inside a chroot is that it
68475+ would allow an attacker to create a device entry that is the same
68476+ as one on the physical root of your system, which could range from
68477+ anything from the console device to a device for your harddrive (which
68478+ they could then use to wipe the drive or steal data). It is recommended
68479+ that you say Y here, unless you run into software incompatibilities.
68480+ If the sysctl option is enabled, a sysctl option with name
68481+ "chroot_deny_mknod" is created.
68482+
68483+config GRKERNSEC_CHROOT_SHMAT
68484+ bool "Deny shmat() out of chroot"
68485+ default y if GRKERNSEC_CONFIG_AUTO
68486+ depends on GRKERNSEC_CHROOT
68487+ help
68488+ If you say Y here, processes inside a chroot will not be able to attach
68489+ to shared memory segments that were created outside of the chroot jail.
68490+ It is recommended that you say Y here. If the sysctl option is enabled,
68491+ a sysctl option with name "chroot_deny_shmat" is created.
68492+
68493+config GRKERNSEC_CHROOT_UNIX
68494+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
68495+ default y if GRKERNSEC_CONFIG_AUTO
68496+ depends on GRKERNSEC_CHROOT
68497+ help
68498+ If you say Y here, processes inside a chroot will not be able to
68499+ connect to abstract (meaning not belonging to a filesystem) Unix
68500+ domain sockets that were bound outside of a chroot. It is recommended
68501+ that you say Y here. If the sysctl option is enabled, a sysctl option
68502+ with name "chroot_deny_unix" is created.
68503+
68504+config GRKERNSEC_CHROOT_FINDTASK
68505+ bool "Protect outside processes"
68506+ default y if GRKERNSEC_CONFIG_AUTO
68507+ depends on GRKERNSEC_CHROOT
68508+ help
68509+ If you say Y here, processes inside a chroot will not be able to
68510+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
68511+ getsid, or view any process outside of the chroot. If the sysctl
68512+ option is enabled, a sysctl option with name "chroot_findtask" is
68513+ created.
68514+
68515+config GRKERNSEC_CHROOT_NICE
68516+ bool "Restrict priority changes"
68517+ default y if GRKERNSEC_CONFIG_AUTO
68518+ depends on GRKERNSEC_CHROOT
68519+ help
68520+ If you say Y here, processes inside a chroot will not be able to raise
68521+ the priority of processes in the chroot, or alter the priority of
68522+ processes outside the chroot. This provides more security than simply
68523+ removing CAP_SYS_NICE from the process' capability set. If the
68524+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
68525+ is created.
68526+
68527+config GRKERNSEC_CHROOT_SYSCTL
68528+ bool "Deny sysctl writes"
68529+ default y if GRKERNSEC_CONFIG_AUTO
68530+ depends on GRKERNSEC_CHROOT
68531+ help
68532+ If you say Y here, an attacker in a chroot will not be able to
68533+ write to sysctl entries, either by sysctl(2) or through a /proc
68534+ interface. It is strongly recommended that you say Y here. If the
68535+ sysctl option is enabled, a sysctl option with name
68536+ "chroot_deny_sysctl" is created.
68537+
68538+config GRKERNSEC_CHROOT_RENAME
68539+ bool "Deny bad renames"
68540+ default y if GRKERNSEC_CONFIG_AUTO
68541+ depends on GRKERNSEC_CHROOT
68542+ help
68543+ If you say Y here, an attacker in a chroot will not be able to
68544+ abuse the ability to create double chroots to break out of the
68545+ chroot by exploiting a race condition between a rename of a directory
68546+ within a chroot against an open of a symlink with relative path
68547+ components. This feature will likewise prevent an accomplice outside
68548+ a chroot from enabling a user inside the chroot to break out and make
68549+ use of their credentials on the global filesystem. Enabling this
68550+ feature is essential to prevent root users from breaking out of a
68551+ chroot. If the sysctl option is enabled, a sysctl option with name
68552+ "chroot_deny_bad_rename" is created.
68553+
68554+config GRKERNSEC_CHROOT_CAPS
68555+ bool "Capability restrictions"
68556+ default y if GRKERNSEC_CONFIG_AUTO
68557+ depends on GRKERNSEC_CHROOT
68558+ help
68559+ If you say Y here, the capabilities on all processes within a
68560+ chroot jail will be lowered to stop module insertion, raw i/o,
68561+ system and net admin tasks, rebooting the system, modifying immutable
68562+ files, modifying IPC owned by another, and changing the system time.
68563+ This is left an option because it can break some apps. Disable this
68564+ if your chrooted apps are having problems performing those kinds of
68565+ tasks. If the sysctl option is enabled, a sysctl option with
68566+ name "chroot_caps" is created.
68567+
68568+config GRKERNSEC_CHROOT_INITRD
68569+ bool "Exempt initrd tasks from restrictions"
68570+ default y if GRKERNSEC_CONFIG_AUTO
68571+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
68572+ help
68573+ If you say Y here, tasks started prior to init will be exempted from
68574+ grsecurity's chroot restrictions. This option is mainly meant to
68575+ resolve Plymouth's performing privileged operations unnecessarily
68576+ in a chroot.
68577+
68578+endmenu
68579+menu "Kernel Auditing"
68580+depends on GRKERNSEC
68581+
68582+config GRKERNSEC_AUDIT_GROUP
68583+ bool "Single group for auditing"
68584+ help
68585+ If you say Y here, the exec and chdir logging features will only operate
68586+ on a group you specify. This option is recommended if you only want to
68587+ watch certain users instead of having a large amount of logs from the
68588+ entire system. If the sysctl option is enabled, a sysctl option with
68589+ name "audit_group" is created.
68590+
68591+config GRKERNSEC_AUDIT_GID
68592+ int "GID for auditing"
68593+ depends on GRKERNSEC_AUDIT_GROUP
68594+ default 1007
68595+
68596+config GRKERNSEC_EXECLOG
68597+ bool "Exec logging"
68598+ help
68599+ If you say Y here, all execve() calls will be logged (since the
68600+ other exec*() calls are frontends to execve(), all execution
68601+ will be logged). Useful for shell-servers that like to keep track
68602+ of their users. If the sysctl option is enabled, a sysctl option with
68603+ name "exec_logging" is created.
68604+ WARNING: This option when enabled will produce a LOT of logs, especially
68605+ on an active system.
68606+
68607+config GRKERNSEC_RESLOG
68608+ bool "Resource logging"
68609+ default y if GRKERNSEC_CONFIG_AUTO
68610+ help
68611+ If you say Y here, all attempts to overstep resource limits will
68612+ be logged with the resource name, the requested size, and the current
68613+ limit. It is highly recommended that you say Y here. If the sysctl
68614+ option is enabled, a sysctl option with name "resource_logging" is
68615+ created. If the RBAC system is enabled, the sysctl value is ignored.
68616+
68617+config GRKERNSEC_CHROOT_EXECLOG
68618+ bool "Log execs within chroot"
68619+ help
68620+ If you say Y here, all executions inside a chroot jail will be logged
68621+ to syslog. This can cause a large amount of logs if certain
68622+ applications (eg. djb's daemontools) are installed on the system, and
68623+ is therefore left as an option. If the sysctl option is enabled, a
68624+ sysctl option with name "chroot_execlog" is created.
68625+
68626+config GRKERNSEC_AUDIT_PTRACE
68627+ bool "Ptrace logging"
68628+ help
68629+ If you say Y here, all attempts to attach to a process via ptrace
68630+ will be logged. If the sysctl option is enabled, a sysctl option
68631+ with name "audit_ptrace" is created.
68632+
68633+config GRKERNSEC_AUDIT_CHDIR
68634+ bool "Chdir logging"
68635+ help
68636+ If you say Y here, all chdir() calls will be logged. If the sysctl
68637+ option is enabled, a sysctl option with name "audit_chdir" is created.
68638+
68639+config GRKERNSEC_AUDIT_MOUNT
68640+ bool "(Un)Mount logging"
68641+ help
68642+ If you say Y here, all mounts and unmounts will be logged. If the
68643+ sysctl option is enabled, a sysctl option with name "audit_mount" is
68644+ created.
68645+
68646+config GRKERNSEC_SIGNAL
68647+ bool "Signal logging"
68648+ default y if GRKERNSEC_CONFIG_AUTO
68649+ help
68650+ If you say Y here, certain important signals will be logged, such as
68651+ SIGSEGV, which will as a result inform you of when a error in a program
68652+ occurred, which in some cases could mean a possible exploit attempt.
68653+ If the sysctl option is enabled, a sysctl option with name
68654+ "signal_logging" is created.
68655+
68656+config GRKERNSEC_FORKFAIL
68657+ bool "Fork failure logging"
68658+ help
68659+ If you say Y here, all failed fork() attempts will be logged.
68660+ This could suggest a fork bomb, or someone attempting to overstep
68661+ their process limit. If the sysctl option is enabled, a sysctl option
68662+ with name "forkfail_logging" is created.
68663+
68664+config GRKERNSEC_TIME
68665+ bool "Time change logging"
68666+ default y if GRKERNSEC_CONFIG_AUTO
68667+ help
68668+ If you say Y here, any changes of the system clock will be logged.
68669+ If the sysctl option is enabled, a sysctl option with name
68670+ "timechange_logging" is created.
68671+
68672+config GRKERNSEC_PROC_IPADDR
68673+ bool "/proc/<pid>/ipaddr support"
68674+ default y if GRKERNSEC_CONFIG_AUTO
68675+ help
68676+ If you say Y here, a new entry will be added to each /proc/<pid>
68677+ directory that contains the IP address of the person using the task.
68678+ The IP is carried across local TCP and AF_UNIX stream sockets.
68679+ This information can be useful for IDS/IPSes to perform remote response
68680+ to a local attack. The entry is readable by only the owner of the
68681+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
68682+ the RBAC system), and thus does not create privacy concerns.
68683+
68684+config GRKERNSEC_RWXMAP_LOG
68685+ bool 'Denied RWX mmap/mprotect logging'
68686+ default y if GRKERNSEC_CONFIG_AUTO
68687+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
68688+ help
68689+ If you say Y here, calls to mmap() and mprotect() with explicit
68690+ usage of PROT_WRITE and PROT_EXEC together will be logged when
68691+ denied by the PAX_MPROTECT feature. This feature will also
68692+ log other problematic scenarios that can occur when PAX_MPROTECT
68693+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
68694+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
68695+ is created.
68696+
68697+endmenu
68698+
68699+menu "Executable Protections"
68700+depends on GRKERNSEC
68701+
68702+config GRKERNSEC_DMESG
68703+ bool "Dmesg(8) restriction"
68704+ default y if GRKERNSEC_CONFIG_AUTO
68705+ help
68706+ If you say Y here, non-root users will not be able to use dmesg(8)
68707+ to view the contents of the kernel's circular log buffer.
68708+ The kernel's log buffer often contains kernel addresses and other
68709+ identifying information useful to an attacker in fingerprinting a
68710+ system for a targeted exploit.
68711+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
68712+ created.
68713+
68714+config GRKERNSEC_HARDEN_PTRACE
68715+ bool "Deter ptrace-based process snooping"
68716+ default y if GRKERNSEC_CONFIG_AUTO
68717+ help
68718+ If you say Y here, TTY sniffers and other malicious monitoring
68719+ programs implemented through ptrace will be defeated. If you
68720+ have been using the RBAC system, this option has already been
68721+ enabled for several years for all users, with the ability to make
68722+ fine-grained exceptions.
68723+
68724+ This option only affects the ability of non-root users to ptrace
68725+ processes that are not a descendent of the ptracing process.
68726+ This means that strace ./binary and gdb ./binary will still work,
68727+ but attaching to arbitrary processes will not. If the sysctl
68728+ option is enabled, a sysctl option with name "harden_ptrace" is
68729+ created.
68730+
68731+config GRKERNSEC_PTRACE_READEXEC
68732+ bool "Require read access to ptrace sensitive binaries"
68733+ default y if GRKERNSEC_CONFIG_AUTO
68734+ help
68735+ If you say Y here, unprivileged users will not be able to ptrace unreadable
68736+ binaries. This option is useful in environments that
68737+ remove the read bits (e.g. file mode 4711) from suid binaries to
68738+ prevent infoleaking of their contents. This option adds
68739+ consistency to the use of that file mode, as the binary could normally
68740+ be read out when run without privileges while ptracing.
68741+
68742+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
68743+ is created.
68744+
68745+config GRKERNSEC_SETXID
68746+ bool "Enforce consistent multithreaded privileges"
68747+ default y if GRKERNSEC_CONFIG_AUTO
68748+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
68749+ help
68750+ If you say Y here, a change from a root uid to a non-root uid
68751+ in a multithreaded application will cause the resulting uids,
68752+ gids, supplementary groups, and capabilities in that thread
68753+ to be propagated to the other threads of the process. In most
68754+ cases this is unnecessary, as glibc will emulate this behavior
68755+ on behalf of the application. Other libcs do not act in the
68756+ same way, allowing the other threads of the process to continue
68757+ running with root privileges. If the sysctl option is enabled,
68758+ a sysctl option with name "consistent_setxid" is created.
68759+
68760+config GRKERNSEC_HARDEN_IPC
68761+ bool "Disallow access to overly-permissive IPC objects"
68762+ default y if GRKERNSEC_CONFIG_AUTO
68763+ depends on SYSVIPC
68764+ help
68765+ If you say Y here, access to overly-permissive IPC objects (shared
68766+ memory, message queues, and semaphores) will be denied for processes
68767+ given the following criteria beyond normal permission checks:
68768+ 1) If the IPC object is world-accessible and the euid doesn't match
68769+ that of the creator or current uid for the IPC object
68770+ 2) If the IPC object is group-accessible and the egid doesn't
68771+ match that of the creator or current gid for the IPC object
68772+ It's a common error to grant too much permission to these objects,
68773+ with impact ranging from denial of service and information leaking to
68774+ privilege escalation. This feature was developed in response to
68775+ research by Tim Brown:
68776+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
68777+ who found hundreds of such insecure usages. Processes with
68778+ CAP_IPC_OWNER are still permitted to access these IPC objects.
68779+ If the sysctl option is enabled, a sysctl option with name
68780+ "harden_ipc" is created.
68781+
68782+config GRKERNSEC_TPE
68783+ bool "Trusted Path Execution (TPE)"
68784+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68785+ help
68786+ If you say Y here, you will be able to choose a gid to add to the
68787+ supplementary groups of users you want to mark as "untrusted."
68788+ These users will not be able to execute any files that are not in
68789+ root-owned directories writable only by root. If the sysctl option
68790+ is enabled, a sysctl option with name "tpe" is created.
68791+
68792+config GRKERNSEC_TPE_ALL
68793+ bool "Partially restrict all non-root users"
68794+ depends on GRKERNSEC_TPE
68795+ help
68796+ If you say Y here, all non-root users will be covered under
68797+ a weaker TPE restriction. This is separate from, and in addition to,
68798+ the main TPE options that you have selected elsewhere. Thus, if a
68799+ "trusted" GID is chosen, this restriction applies to even that GID.
68800+ Under this restriction, all non-root users will only be allowed to
68801+ execute files in directories they own that are not group or
68802+ world-writable, or in directories owned by root and writable only by
68803+ root. If the sysctl option is enabled, a sysctl option with name
68804+ "tpe_restrict_all" is created.
68805+
68806+config GRKERNSEC_TPE_INVERT
68807+ bool "Invert GID option"
68808+ depends on GRKERNSEC_TPE
68809+ help
68810+ If you say Y here, the group you specify in the TPE configuration will
68811+ decide what group TPE restrictions will be *disabled* for. This
68812+ option is useful if you want TPE restrictions to be applied to most
68813+ users on the system. If the sysctl option is enabled, a sysctl option
68814+ with name "tpe_invert" is created. Unlike other sysctl options, this
68815+ entry will default to on for backward-compatibility.
68816+
68817+config GRKERNSEC_TPE_GID
68818+ int
68819+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
68820+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
68821+
68822+config GRKERNSEC_TPE_UNTRUSTED_GID
68823+ int "GID for TPE-untrusted users"
68824+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
68825+ default 1005
68826+ help
68827+ Setting this GID determines what group TPE restrictions will be
68828+ *enabled* for. If the sysctl option is enabled, a sysctl option
68829+ with name "tpe_gid" is created.
68830+
68831+config GRKERNSEC_TPE_TRUSTED_GID
68832+ int "GID for TPE-trusted users"
68833+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
68834+ default 1005
68835+ help
68836+ Setting this GID determines what group TPE restrictions will be
68837+ *disabled* for. If the sysctl option is enabled, a sysctl option
68838+ with name "tpe_gid" is created.
68839+
68840+endmenu
68841+menu "Network Protections"
68842+depends on GRKERNSEC
68843+
68844+config GRKERNSEC_BLACKHOLE
68845+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
68846+ default y if GRKERNSEC_CONFIG_AUTO
68847+ depends on NET
68848+ help
68849+ If you say Y here, neither TCP resets nor ICMP
68850+ destination-unreachable packets will be sent in response to packets
68851+ sent to ports for which no associated listening process exists.
68852+ It will also prevent the sending of ICMP protocol unreachable packets
68853+ in response to packets with unknown protocols.
68854+ This feature supports both IPV4 and IPV6 and exempts the
68855+ loopback interface from blackholing. Enabling this feature
68856+ makes a host more resilient to DoS attacks and reduces network
68857+ visibility against scanners.
68858+
68859+ The blackhole feature as-implemented is equivalent to the FreeBSD
68860+ blackhole feature, as it prevents RST responses to all packets, not
68861+ just SYNs. Under most application behavior this causes no
68862+ problems, but applications (like haproxy) may not close certain
68863+ connections in a way that cleanly terminates them on the remote
68864+ end, leaving the remote host in LAST_ACK state. Because of this
68865+ side-effect and to prevent intentional LAST_ACK DoSes, this
68866+ feature also adds automatic mitigation against such attacks.
68867+ The mitigation drastically reduces the amount of time a socket
68868+ can spend in LAST_ACK state. If you're using haproxy and not
68869+ all servers it connects to have this option enabled, consider
68870+ disabling this feature on the haproxy host.
68871+
68872+ If the sysctl option is enabled, two sysctl options with names
68873+ "ip_blackhole" and "lastack_retries" will be created.
68874+ While "ip_blackhole" takes the standard zero/non-zero on/off
68875+ toggle, "lastack_retries" uses the same kinds of values as
68876+ "tcp_retries1" and "tcp_retries2". The default value of 4
68877+ prevents a socket from lasting more than 45 seconds in LAST_ACK
68878+ state.
68879+
68880+config GRKERNSEC_NO_SIMULT_CONNECT
68881+ bool "Disable TCP Simultaneous Connect"
68882+ default y if GRKERNSEC_CONFIG_AUTO
68883+ depends on NET
68884+ help
68885+ If you say Y here, a feature by Willy Tarreau will be enabled that
68886+ removes a weakness in Linux's strict implementation of TCP that
68887+ allows two clients to connect to each other without either entering
68888+ a listening state. The weakness allows an attacker to easily prevent
68889+ a client from connecting to a known server provided the source port
68890+ for the connection is guessed correctly.
68891+
68892+ As the weakness could be used to prevent an antivirus or IPS from
68893+ fetching updates, or prevent an SSL gateway from fetching a CRL,
68894+ it should be eliminated by enabling this option. Though Linux is
68895+ one of few operating systems supporting simultaneous connect, it
68896+ has no legitimate use in practice and is rarely supported by firewalls.
68897+
68898+config GRKERNSEC_SOCKET
68899+ bool "Socket restrictions"
68900+ depends on NET
68901+ help
68902+ If you say Y here, you will be able to choose from several options.
68903+ If you assign a GID on your system and add it to the supplementary
68904+ groups of users you want to restrict socket access to, this patch
68905+ will perform up to three things, based on the option(s) you choose.
68906+
68907+config GRKERNSEC_SOCKET_ALL
68908+ bool "Deny any sockets to group"
68909+ depends on GRKERNSEC_SOCKET
68910+ help
68911+ If you say Y here, you will be able to choose a GID of whose users will
68912+ be unable to connect to other hosts from your machine or run server
68913+ applications from your machine. If the sysctl option is enabled, a
68914+ sysctl option with name "socket_all" is created.
68915+
68916+config GRKERNSEC_SOCKET_ALL_GID
68917+ int "GID to deny all sockets for"
68918+ depends on GRKERNSEC_SOCKET_ALL
68919+ default 1004
68920+ help
68921+ Here you can choose the GID to disable socket access for. Remember to
68922+ add the users you want socket access disabled for to the GID
68923+ specified here. If the sysctl option is enabled, a sysctl option
68924+ with name "socket_all_gid" is created.
68925+
68926+config GRKERNSEC_SOCKET_CLIENT
68927+ bool "Deny client sockets to group"
68928+ depends on GRKERNSEC_SOCKET
68929+ help
68930+ If you say Y here, you will be able to choose a GID of whose users will
68931+ be unable to connect to other hosts from your machine, but will be
68932+ able to run servers. If this option is enabled, all users in the group
68933+ you specify will have to use passive mode when initiating ftp transfers
68934+ from the shell on your machine. If the sysctl option is enabled, a
68935+ sysctl option with name "socket_client" is created.
68936+
68937+config GRKERNSEC_SOCKET_CLIENT_GID
68938+ int "GID to deny client sockets for"
68939+ depends on GRKERNSEC_SOCKET_CLIENT
68940+ default 1003
68941+ help
68942+ Here you can choose the GID to disable client socket access for.
68943+ Remember to add the users you want client socket access disabled for to
68944+ the GID specified here. If the sysctl option is enabled, a sysctl
68945+ option with name "socket_client_gid" is created.
68946+
68947+config GRKERNSEC_SOCKET_SERVER
68948+ bool "Deny server sockets to group"
68949+ depends on GRKERNSEC_SOCKET
68950+ help
68951+ If you say Y here, you will be able to choose a GID of whose users will
68952+ be unable to run server applications from your machine. If the sysctl
68953+ option is enabled, a sysctl option with name "socket_server" is created.
68954+
68955+config GRKERNSEC_SOCKET_SERVER_GID
68956+ int "GID to deny server sockets for"
68957+ depends on GRKERNSEC_SOCKET_SERVER
68958+ default 1002
68959+ help
68960+ Here you can choose the GID to disable server socket access for.
68961+ Remember to add the users you want server socket access disabled for to
68962+ the GID specified here. If the sysctl option is enabled, a sysctl
68963+ option with name "socket_server_gid" is created.
68964+
68965+endmenu
68966+
68967+menu "Physical Protections"
68968+depends on GRKERNSEC
68969+
68970+config GRKERNSEC_DENYUSB
68971+ bool "Deny new USB connections after toggle"
68972+ default y if GRKERNSEC_CONFIG_AUTO
68973+ depends on SYSCTL && USB_SUPPORT
68974+ help
68975+ If you say Y here, a new sysctl option with name "deny_new_usb"
68976+ will be created. Setting its value to 1 will prevent any new
68977+ USB devices from being recognized by the OS. Any attempted USB
68978+ device insertion will be logged. This option is intended to be
68979+ used against custom USB devices designed to exploit vulnerabilities
68980+ in various USB device drivers.
68981+
68982+ For greatest effectiveness, this sysctl should be set after any
68983+ relevant init scripts. This option is safe to enable in distros
68984+ as each user can choose whether or not to toggle the sysctl.
68985+
68986+config GRKERNSEC_DENYUSB_FORCE
68987+ bool "Reject all USB devices not connected at boot"
68988+ select USB
68989+ depends on GRKERNSEC_DENYUSB
68990+ help
68991+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
68992+ that doesn't involve a sysctl entry. This option should only be
68993+ enabled if you're sure you want to deny all new USB connections
68994+ at runtime and don't want to modify init scripts. This should not
68995+ be enabled by distros. It forces the core USB code to be built
68996+ into the kernel image so that all devices connected at boot time
68997+ can be recognized and new USB device connections can be prevented
68998+ prior to init running.
68999+
69000+endmenu
69001+
69002+menu "Sysctl Support"
69003+depends on GRKERNSEC && SYSCTL
69004+
69005+config GRKERNSEC_SYSCTL
69006+ bool "Sysctl support"
69007+ default y if GRKERNSEC_CONFIG_AUTO
69008+ help
69009+ If you say Y here, you will be able to change the options that
69010+ grsecurity runs with at bootup, without having to recompile your
69011+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69012+ to enable (1) or disable (0) various features. All the sysctl entries
69013+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69014+ All features enabled in the kernel configuration are disabled at boot
69015+ if you do not say Y to the "Turn on features by default" option.
69016+ All options should be set at startup, and the grsec_lock entry should
69017+ be set to a non-zero value after all the options are set.
69018+ *THIS IS EXTREMELY IMPORTANT*
69019+
69020+config GRKERNSEC_SYSCTL_DISTRO
69021+ bool "Extra sysctl support for distro makers (READ HELP)"
69022+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69023+ help
69024+ If you say Y here, additional sysctl options will be created
69025+ for features that affect processes running as root. Therefore,
69026+ it is critical when using this option that the grsec_lock entry be
69027+ enabled after boot. Only distros with prebuilt kernel packages
69028+ with this option enabled that can ensure grsec_lock is enabled
69029+ after boot should use this option.
69030+ *Failure to set grsec_lock after boot makes all grsec features
69031+ this option covers useless*
69032+
69033+ Currently this option creates the following sysctl entries:
69034+ "Disable Privileged I/O": "disable_priv_io"
69035+
69036+config GRKERNSEC_SYSCTL_ON
69037+ bool "Turn on features by default"
69038+ default y if GRKERNSEC_CONFIG_AUTO
69039+ depends on GRKERNSEC_SYSCTL
69040+ help
69041+ If you say Y here, instead of having all features enabled in the
69042+ kernel configuration disabled at boot time, the features will be
69043+ enabled at boot time. It is recommended you say Y here unless
69044+ there is some reason you would want all sysctl-tunable features to
69045+ be disabled by default. As mentioned elsewhere, it is important
69046+ to enable the grsec_lock entry once you have finished modifying
69047+ the sysctl entries.
69048+
69049+endmenu
69050+menu "Logging Options"
69051+depends on GRKERNSEC
69052+
69053+config GRKERNSEC_FLOODTIME
69054+ int "Seconds in between log messages (minimum)"
69055+ default 10
69056+ help
69057+ This option allows you to enforce the number of seconds between
69058+ grsecurity log messages. The default should be suitable for most
69059+ people, however, if you choose to change it, choose a value small enough
69060+ to allow informative logs to be produced, but large enough to
69061+ prevent flooding.
69062+
69063+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69064+ any rate limiting on grsecurity log messages.
69065+
69066+config GRKERNSEC_FLOODBURST
69067+ int "Number of messages in a burst (maximum)"
69068+ default 6
69069+ help
69070+ This option allows you to choose the maximum number of messages allowed
69071+ within the flood time interval you chose in a separate option. The
69072+ default should be suitable for most people, however if you find that
69073+ many of your logs are being interpreted as flooding, you may want to
69074+ raise this value.
69075+
69076+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69077+ any rate limiting on grsecurity log messages.
69078+
69079+endmenu
69080diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69081new file mode 100644
69082index 0000000..30ababb
69083--- /dev/null
69084+++ b/grsecurity/Makefile
69085@@ -0,0 +1,54 @@
69086+# grsecurity – access control and security hardening for Linux
69087+# All code in this directory and various hooks located throughout the Linux kernel are
69088+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69089+# http://www.grsecurity.net spender@grsecurity.net
69090+#
69091+# This program is free software; you can redistribute it and/or
69092+# modify it under the terms of the GNU General Public License version 2
69093+# as published by the Free Software Foundation.
69094+#
69095+# This program is distributed in the hope that it will be useful,
69096+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69097+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69098+# GNU General Public License for more details.
69099+#
69100+# You should have received a copy of the GNU General Public License
69101+# along with this program; if not, write to the Free Software
69102+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69103+
69104+KBUILD_CFLAGS += -Werror
69105+
69106+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69107+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69108+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69109+ grsec_usb.o grsec_ipc.o grsec_proc.o
69110+
69111+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69112+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69113+ gracl_learn.o grsec_log.o gracl_policy.o
69114+ifdef CONFIG_COMPAT
69115+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69116+endif
69117+
69118+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69119+
69120+ifdef CONFIG_NET
69121+obj-y += grsec_sock.o
69122+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69123+endif
69124+
69125+ifndef CONFIG_GRKERNSEC
69126+obj-y += grsec_disabled.o
69127+endif
69128+
69129+ifdef CONFIG_GRKERNSEC_HIDESYM
69130+extra-y := grsec_hidesym.o
69131+$(obj)/grsec_hidesym.o:
69132+ @-chmod -f 500 /boot
69133+ @-chmod -f 500 /lib/modules
69134+ @-chmod -f 500 /lib64/modules
69135+ @-chmod -f 500 /lib32/modules
69136+ @-chmod -f 700 .
69137+ @-chmod -f 700 $(objtree)
69138+ @echo ' grsec: protected kernel image paths'
69139+endif
69140diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69141new file mode 100644
69142index 0000000..6c1e154
69143--- /dev/null
69144+++ b/grsecurity/gracl.c
69145@@ -0,0 +1,2749 @@
69146+#include <linux/kernel.h>
69147+#include <linux/module.h>
69148+#include <linux/sched.h>
69149+#include <linux/mm.h>
69150+#include <linux/file.h>
69151+#include <linux/fs.h>
69152+#include <linux/namei.h>
69153+#include <linux/mount.h>
69154+#include <linux/tty.h>
69155+#include <linux/proc_fs.h>
69156+#include <linux/lglock.h>
69157+#include <linux/slab.h>
69158+#include <linux/vmalloc.h>
69159+#include <linux/types.h>
69160+#include <linux/sysctl.h>
69161+#include <linux/netdevice.h>
69162+#include <linux/ptrace.h>
69163+#include <linux/gracl.h>
69164+#include <linux/gralloc.h>
69165+#include <linux/security.h>
69166+#include <linux/grinternal.h>
69167+#include <linux/pid_namespace.h>
69168+#include <linux/stop_machine.h>
69169+#include <linux/fdtable.h>
69170+#include <linux/percpu.h>
69171+#include <linux/lglock.h>
69172+#include <linux/hugetlb.h>
69173+#include <linux/posix-timers.h>
69174+#include <linux/prefetch.h>
69175+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69176+#include <linux/magic.h>
69177+#include <linux/pagemap.h>
69178+#include "../fs/btrfs/async-thread.h"
69179+#include "../fs/btrfs/ctree.h"
69180+#include "../fs/btrfs/btrfs_inode.h"
69181+#endif
69182+#include "../fs/mount.h"
69183+
69184+#include <asm/uaccess.h>
69185+#include <asm/errno.h>
69186+#include <asm/mman.h>
69187+
69188+#define FOR_EACH_ROLE_START(role) \
69189+ role = running_polstate.role_list; \
69190+ while (role) {
69191+
69192+#define FOR_EACH_ROLE_END(role) \
69193+ role = role->prev; \
69194+ }
69195+
69196+extern struct path gr_real_root;
69197+
69198+static struct gr_policy_state running_polstate;
69199+struct gr_policy_state *polstate = &running_polstate;
69200+extern struct gr_alloc_state *current_alloc_state;
69201+
69202+extern char *gr_shared_page[4];
69203+DEFINE_RWLOCK(gr_inode_lock);
69204+
69205+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69206+
69207+#ifdef CONFIG_NET
69208+extern struct vfsmount *sock_mnt;
69209+#endif
69210+
69211+extern struct vfsmount *pipe_mnt;
69212+extern struct vfsmount *shm_mnt;
69213+
69214+#ifdef CONFIG_HUGETLBFS
69215+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69216+#endif
69217+
69218+extern u16 acl_sp_role_value;
69219+extern struct acl_object_label *fakefs_obj_rw;
69220+extern struct acl_object_label *fakefs_obj_rwx;
69221+
69222+int gr_acl_is_enabled(void)
69223+{
69224+ return (gr_status & GR_READY);
69225+}
69226+
69227+void gr_enable_rbac_system(void)
69228+{
69229+ pax_open_kernel();
69230+ gr_status |= GR_READY;
69231+ pax_close_kernel();
69232+}
69233+
69234+int gr_rbac_disable(void *unused)
69235+{
69236+ pax_open_kernel();
69237+ gr_status &= ~GR_READY;
69238+ pax_close_kernel();
69239+
69240+ return 0;
69241+}
69242+
69243+static inline dev_t __get_dev(const struct dentry *dentry)
69244+{
69245+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69246+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69247+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69248+ else
69249+#endif
69250+ return dentry->d_sb->s_dev;
69251+}
69252+
69253+static inline u64 __get_ino(const struct dentry *dentry)
69254+{
69255+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69256+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69257+ return btrfs_ino(dentry->d_inode);
69258+ else
69259+#endif
69260+ return dentry->d_inode->i_ino;
69261+}
69262+
69263+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69264+{
69265+ return __get_dev(dentry);
69266+}
69267+
69268+u64 gr_get_ino_from_dentry(struct dentry *dentry)
69269+{
69270+ return __get_ino(dentry);
69271+}
69272+
69273+static char gr_task_roletype_to_char(struct task_struct *task)
69274+{
69275+ switch (task->role->roletype &
69276+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69277+ GR_ROLE_SPECIAL)) {
69278+ case GR_ROLE_DEFAULT:
69279+ return 'D';
69280+ case GR_ROLE_USER:
69281+ return 'U';
69282+ case GR_ROLE_GROUP:
69283+ return 'G';
69284+ case GR_ROLE_SPECIAL:
69285+ return 'S';
69286+ }
69287+
69288+ return 'X';
69289+}
69290+
69291+char gr_roletype_to_char(void)
69292+{
69293+ return gr_task_roletype_to_char(current);
69294+}
69295+
69296+__inline__ int
69297+gr_acl_tpe_check(void)
69298+{
69299+ if (unlikely(!(gr_status & GR_READY)))
69300+ return 0;
69301+ if (current->role->roletype & GR_ROLE_TPE)
69302+ return 1;
69303+ else
69304+ return 0;
69305+}
69306+
69307+int
69308+gr_handle_rawio(const struct inode *inode)
69309+{
69310+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69311+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69312+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69313+ !capable(CAP_SYS_RAWIO))
69314+ return 1;
69315+#endif
69316+ return 0;
69317+}
69318+
69319+int
69320+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69321+{
69322+ if (likely(lena != lenb))
69323+ return 0;
69324+
69325+ return !memcmp(a, b, lena);
69326+}
69327+
69328+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69329+{
69330+ *buflen -= namelen;
69331+ if (*buflen < 0)
69332+ return -ENAMETOOLONG;
69333+ *buffer -= namelen;
69334+ memcpy(*buffer, str, namelen);
69335+ return 0;
69336+}
69337+
69338+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69339+{
69340+ return prepend(buffer, buflen, name->name, name->len);
69341+}
69342+
69343+static int prepend_path(const struct path *path, struct path *root,
69344+ char **buffer, int *buflen)
69345+{
69346+ struct dentry *dentry = path->dentry;
69347+ struct vfsmount *vfsmnt = path->mnt;
69348+ struct mount *mnt = real_mount(vfsmnt);
69349+ bool slash = false;
69350+ int error = 0;
69351+
69352+ while (dentry != root->dentry || vfsmnt != root->mnt) {
69353+ struct dentry * parent;
69354+
69355+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
69356+ /* Global root? */
69357+ if (!mnt_has_parent(mnt)) {
69358+ goto out;
69359+ }
69360+ dentry = mnt->mnt_mountpoint;
69361+ mnt = mnt->mnt_parent;
69362+ vfsmnt = &mnt->mnt;
69363+ continue;
69364+ }
69365+ parent = dentry->d_parent;
69366+ prefetch(parent);
69367+ spin_lock(&dentry->d_lock);
69368+ error = prepend_name(buffer, buflen, &dentry->d_name);
69369+ spin_unlock(&dentry->d_lock);
69370+ if (!error)
69371+ error = prepend(buffer, buflen, "/", 1);
69372+ if (error)
69373+ break;
69374+
69375+ slash = true;
69376+ dentry = parent;
69377+ }
69378+
69379+out:
69380+ if (!error && !slash)
69381+ error = prepend(buffer, buflen, "/", 1);
69382+
69383+ return error;
69384+}
69385+
69386+/* this must be called with mount_lock and rename_lock held */
69387+
69388+static char *__our_d_path(const struct path *path, struct path *root,
69389+ char *buf, int buflen)
69390+{
69391+ char *res = buf + buflen;
69392+ int error;
69393+
69394+ prepend(&res, &buflen, "\0", 1);
69395+ error = prepend_path(path, root, &res, &buflen);
69396+ if (error)
69397+ return ERR_PTR(error);
69398+
69399+ return res;
69400+}
69401+
69402+static char *
69403+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
69404+{
69405+ char *retval;
69406+
69407+ retval = __our_d_path(path, root, buf, buflen);
69408+ if (unlikely(IS_ERR(retval)))
69409+ retval = strcpy(buf, "<path too long>");
69410+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
69411+ retval[1] = '\0';
69412+
69413+ return retval;
69414+}
69415+
69416+static char *
69417+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69418+ char *buf, int buflen)
69419+{
69420+ struct path path;
69421+ char *res;
69422+
69423+ path.dentry = (struct dentry *)dentry;
69424+ path.mnt = (struct vfsmount *)vfsmnt;
69425+
69426+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
69427+ by the RBAC system */
69428+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
69429+
69430+ return res;
69431+}
69432+
69433+static char *
69434+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69435+ char *buf, int buflen)
69436+{
69437+ char *res;
69438+ struct path path;
69439+ struct path root;
69440+ struct task_struct *reaper = init_pid_ns.child_reaper;
69441+
69442+ path.dentry = (struct dentry *)dentry;
69443+ path.mnt = (struct vfsmount *)vfsmnt;
69444+
69445+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
69446+ get_fs_root(reaper->fs, &root);
69447+
69448+ read_seqlock_excl(&mount_lock);
69449+ write_seqlock(&rename_lock);
69450+ res = gen_full_path(&path, &root, buf, buflen);
69451+ write_sequnlock(&rename_lock);
69452+ read_sequnlock_excl(&mount_lock);
69453+
69454+ path_put(&root);
69455+ return res;
69456+}
69457+
69458+char *
69459+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69460+{
69461+ char *ret;
69462+ read_seqlock_excl(&mount_lock);
69463+ write_seqlock(&rename_lock);
69464+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69465+ PAGE_SIZE);
69466+ write_sequnlock(&rename_lock);
69467+ read_sequnlock_excl(&mount_lock);
69468+ return ret;
69469+}
69470+
69471+static char *
69472+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69473+{
69474+ char *ret;
69475+ char *buf;
69476+ int buflen;
69477+
69478+ read_seqlock_excl(&mount_lock);
69479+ write_seqlock(&rename_lock);
69480+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
69481+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
69482+ buflen = (int)(ret - buf);
69483+ if (buflen >= 5)
69484+ prepend(&ret, &buflen, "/proc", 5);
69485+ else
69486+ ret = strcpy(buf, "<path too long>");
69487+ write_sequnlock(&rename_lock);
69488+ read_sequnlock_excl(&mount_lock);
69489+ return ret;
69490+}
69491+
69492+char *
69493+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
69494+{
69495+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69496+ PAGE_SIZE);
69497+}
69498+
69499+char *
69500+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
69501+{
69502+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69503+ PAGE_SIZE);
69504+}
69505+
69506+char *
69507+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
69508+{
69509+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
69510+ PAGE_SIZE);
69511+}
69512+
69513+char *
69514+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
69515+{
69516+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
69517+ PAGE_SIZE);
69518+}
69519+
69520+char *
69521+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
69522+{
69523+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
69524+ PAGE_SIZE);
69525+}
69526+
69527+__inline__ __u32
69528+to_gr_audit(const __u32 reqmode)
69529+{
69530+ /* masks off auditable permission flags, then shifts them to create
69531+ auditing flags, and adds the special case of append auditing if
69532+ we're requesting write */
69533+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
69534+}
69535+
69536+struct acl_role_label *
69537+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
69538+ const gid_t gid)
69539+{
69540+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
69541+ struct acl_role_label *match;
69542+ struct role_allowed_ip *ipp;
69543+ unsigned int x;
69544+ u32 curr_ip = task->signal->saved_ip;
69545+
69546+ match = state->acl_role_set.r_hash[index];
69547+
69548+ while (match) {
69549+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
69550+ for (x = 0; x < match->domain_child_num; x++) {
69551+ if (match->domain_children[x] == uid)
69552+ goto found;
69553+ }
69554+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
69555+ break;
69556+ match = match->next;
69557+ }
69558+found:
69559+ if (match == NULL) {
69560+ try_group:
69561+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
69562+ match = state->acl_role_set.r_hash[index];
69563+
69564+ while (match) {
69565+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
69566+ for (x = 0; x < match->domain_child_num; x++) {
69567+ if (match->domain_children[x] == gid)
69568+ goto found2;
69569+ }
69570+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
69571+ break;
69572+ match = match->next;
69573+ }
69574+found2:
69575+ if (match == NULL)
69576+ match = state->default_role;
69577+ if (match->allowed_ips == NULL)
69578+ return match;
69579+ else {
69580+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69581+ if (likely
69582+ ((ntohl(curr_ip) & ipp->netmask) ==
69583+ (ntohl(ipp->addr) & ipp->netmask)))
69584+ return match;
69585+ }
69586+ match = state->default_role;
69587+ }
69588+ } else if (match->allowed_ips == NULL) {
69589+ return match;
69590+ } else {
69591+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69592+ if (likely
69593+ ((ntohl(curr_ip) & ipp->netmask) ==
69594+ (ntohl(ipp->addr) & ipp->netmask)))
69595+ return match;
69596+ }
69597+ goto try_group;
69598+ }
69599+
69600+ return match;
69601+}
69602+
69603+static struct acl_role_label *
69604+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
69605+ const gid_t gid)
69606+{
69607+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
69608+}
69609+
69610+struct acl_subject_label *
69611+lookup_acl_subj_label(const u64 ino, const dev_t dev,
69612+ const struct acl_role_label *role)
69613+{
69614+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69615+ struct acl_subject_label *match;
69616+
69617+ match = role->subj_hash[index];
69618+
69619+ while (match && (match->inode != ino || match->device != dev ||
69620+ (match->mode & GR_DELETED))) {
69621+ match = match->next;
69622+ }
69623+
69624+ if (match && !(match->mode & GR_DELETED))
69625+ return match;
69626+ else
69627+ return NULL;
69628+}
69629+
69630+struct acl_subject_label *
69631+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
69632+ const struct acl_role_label *role)
69633+{
69634+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69635+ struct acl_subject_label *match;
69636+
69637+ match = role->subj_hash[index];
69638+
69639+ while (match && (match->inode != ino || match->device != dev ||
69640+ !(match->mode & GR_DELETED))) {
69641+ match = match->next;
69642+ }
69643+
69644+ if (match && (match->mode & GR_DELETED))
69645+ return match;
69646+ else
69647+ return NULL;
69648+}
69649+
69650+static struct acl_object_label *
69651+lookup_acl_obj_label(const u64 ino, const dev_t dev,
69652+ const struct acl_subject_label *subj)
69653+{
69654+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69655+ struct acl_object_label *match;
69656+
69657+ match = subj->obj_hash[index];
69658+
69659+ while (match && (match->inode != ino || match->device != dev ||
69660+ (match->mode & GR_DELETED))) {
69661+ match = match->next;
69662+ }
69663+
69664+ if (match && !(match->mode & GR_DELETED))
69665+ return match;
69666+ else
69667+ return NULL;
69668+}
69669+
69670+static struct acl_object_label *
69671+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
69672+ const struct acl_subject_label *subj)
69673+{
69674+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69675+ struct acl_object_label *match;
69676+
69677+ match = subj->obj_hash[index];
69678+
69679+ while (match && (match->inode != ino || match->device != dev ||
69680+ !(match->mode & GR_DELETED))) {
69681+ match = match->next;
69682+ }
69683+
69684+ if (match && (match->mode & GR_DELETED))
69685+ return match;
69686+
69687+ match = subj->obj_hash[index];
69688+
69689+ while (match && (match->inode != ino || match->device != dev ||
69690+ (match->mode & GR_DELETED))) {
69691+ match = match->next;
69692+ }
69693+
69694+ if (match && !(match->mode & GR_DELETED))
69695+ return match;
69696+ else
69697+ return NULL;
69698+}
69699+
69700+struct name_entry *
69701+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
69702+{
69703+ unsigned int len = strlen(name);
69704+ unsigned int key = full_name_hash(name, len);
69705+ unsigned int index = key % state->name_set.n_size;
69706+ struct name_entry *match;
69707+
69708+ match = state->name_set.n_hash[index];
69709+
69710+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
69711+ match = match->next;
69712+
69713+ return match;
69714+}
69715+
69716+static struct name_entry *
69717+lookup_name_entry(const char *name)
69718+{
69719+ return __lookup_name_entry(&running_polstate, name);
69720+}
69721+
69722+static struct name_entry *
69723+lookup_name_entry_create(const char *name)
69724+{
69725+ unsigned int len = strlen(name);
69726+ unsigned int key = full_name_hash(name, len);
69727+ unsigned int index = key % running_polstate.name_set.n_size;
69728+ struct name_entry *match;
69729+
69730+ match = running_polstate.name_set.n_hash[index];
69731+
69732+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69733+ !match->deleted))
69734+ match = match->next;
69735+
69736+ if (match && match->deleted)
69737+ return match;
69738+
69739+ match = running_polstate.name_set.n_hash[index];
69740+
69741+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69742+ match->deleted))
69743+ match = match->next;
69744+
69745+ if (match && !match->deleted)
69746+ return match;
69747+ else
69748+ return NULL;
69749+}
69750+
69751+static struct inodev_entry *
69752+lookup_inodev_entry(const u64 ino, const dev_t dev)
69753+{
69754+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
69755+ struct inodev_entry *match;
69756+
69757+ match = running_polstate.inodev_set.i_hash[index];
69758+
69759+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
69760+ match = match->next;
69761+
69762+ return match;
69763+}
69764+
69765+void
69766+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
69767+{
69768+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
69769+ state->inodev_set.i_size);
69770+ struct inodev_entry **curr;
69771+
69772+ entry->prev = NULL;
69773+
69774+ curr = &state->inodev_set.i_hash[index];
69775+ if (*curr != NULL)
69776+ (*curr)->prev = entry;
69777+
69778+ entry->next = *curr;
69779+ *curr = entry;
69780+
69781+ return;
69782+}
69783+
69784+static void
69785+insert_inodev_entry(struct inodev_entry *entry)
69786+{
69787+ __insert_inodev_entry(&running_polstate, entry);
69788+}
69789+
69790+void
69791+insert_acl_obj_label(struct acl_object_label *obj,
69792+ struct acl_subject_label *subj)
69793+{
69794+ unsigned int index =
69795+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
69796+ struct acl_object_label **curr;
69797+
69798+ obj->prev = NULL;
69799+
69800+ curr = &subj->obj_hash[index];
69801+ if (*curr != NULL)
69802+ (*curr)->prev = obj;
69803+
69804+ obj->next = *curr;
69805+ *curr = obj;
69806+
69807+ return;
69808+}
69809+
69810+void
69811+insert_acl_subj_label(struct acl_subject_label *obj,
69812+ struct acl_role_label *role)
69813+{
69814+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
69815+ struct acl_subject_label **curr;
69816+
69817+ obj->prev = NULL;
69818+
69819+ curr = &role->subj_hash[index];
69820+ if (*curr != NULL)
69821+ (*curr)->prev = obj;
69822+
69823+ obj->next = *curr;
69824+ *curr = obj;
69825+
69826+ return;
69827+}
69828+
69829+/* derived from glibc fnmatch() 0: match, 1: no match*/
69830+
69831+static int
69832+glob_match(const char *p, const char *n)
69833+{
69834+ char c;
69835+
69836+ while ((c = *p++) != '\0') {
69837+ switch (c) {
69838+ case '?':
69839+ if (*n == '\0')
69840+ return 1;
69841+ else if (*n == '/')
69842+ return 1;
69843+ break;
69844+ case '\\':
69845+ if (*n != c)
69846+ return 1;
69847+ break;
69848+ case '*':
69849+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
69850+ if (*n == '/')
69851+ return 1;
69852+ else if (c == '?') {
69853+ if (*n == '\0')
69854+ return 1;
69855+ else
69856+ ++n;
69857+ }
69858+ }
69859+ if (c == '\0') {
69860+ return 0;
69861+ } else {
69862+ const char *endp;
69863+
69864+ if ((endp = strchr(n, '/')) == NULL)
69865+ endp = n + strlen(n);
69866+
69867+ if (c == '[') {
69868+ for (--p; n < endp; ++n)
69869+ if (!glob_match(p, n))
69870+ return 0;
69871+ } else if (c == '/') {
69872+ while (*n != '\0' && *n != '/')
69873+ ++n;
69874+ if (*n == '/' && !glob_match(p, n + 1))
69875+ return 0;
69876+ } else {
69877+ for (--p; n < endp; ++n)
69878+ if (*n == c && !glob_match(p, n))
69879+ return 0;
69880+ }
69881+
69882+ return 1;
69883+ }
69884+ case '[':
69885+ {
69886+ int not;
69887+ char cold;
69888+
69889+ if (*n == '\0' || *n == '/')
69890+ return 1;
69891+
69892+ not = (*p == '!' || *p == '^');
69893+ if (not)
69894+ ++p;
69895+
69896+ c = *p++;
69897+ for (;;) {
69898+ unsigned char fn = (unsigned char)*n;
69899+
69900+ if (c == '\0')
69901+ return 1;
69902+ else {
69903+ if (c == fn)
69904+ goto matched;
69905+ cold = c;
69906+ c = *p++;
69907+
69908+ if (c == '-' && *p != ']') {
69909+ unsigned char cend = *p++;
69910+
69911+ if (cend == '\0')
69912+ return 1;
69913+
69914+ if (cold <= fn && fn <= cend)
69915+ goto matched;
69916+
69917+ c = *p++;
69918+ }
69919+ }
69920+
69921+ if (c == ']')
69922+ break;
69923+ }
69924+ if (!not)
69925+ return 1;
69926+ break;
69927+ matched:
69928+ while (c != ']') {
69929+ if (c == '\0')
69930+ return 1;
69931+
69932+ c = *p++;
69933+ }
69934+ if (not)
69935+ return 1;
69936+ }
69937+ break;
69938+ default:
69939+ if (c != *n)
69940+ return 1;
69941+ }
69942+
69943+ ++n;
69944+ }
69945+
69946+ if (*n == '\0')
69947+ return 0;
69948+
69949+ if (*n == '/')
69950+ return 0;
69951+
69952+ return 1;
69953+}
69954+
69955+static struct acl_object_label *
69956+chk_glob_label(struct acl_object_label *globbed,
69957+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
69958+{
69959+ struct acl_object_label *tmp;
69960+
69961+ if (*path == NULL)
69962+ *path = gr_to_filename_nolock(dentry, mnt);
69963+
69964+ tmp = globbed;
69965+
69966+ while (tmp) {
69967+ if (!glob_match(tmp->filename, *path))
69968+ return tmp;
69969+ tmp = tmp->next;
69970+ }
69971+
69972+ return NULL;
69973+}
69974+
69975+static struct acl_object_label *
69976+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
69977+ const u64 curr_ino, const dev_t curr_dev,
69978+ const struct acl_subject_label *subj, char **path, const int checkglob)
69979+{
69980+ struct acl_subject_label *tmpsubj;
69981+ struct acl_object_label *retval;
69982+ struct acl_object_label *retval2;
69983+
69984+ tmpsubj = (struct acl_subject_label *) subj;
69985+ read_lock(&gr_inode_lock);
69986+ do {
69987+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
69988+ if (retval) {
69989+ if (checkglob && retval->globbed) {
69990+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
69991+ if (retval2)
69992+ retval = retval2;
69993+ }
69994+ break;
69995+ }
69996+ } while ((tmpsubj = tmpsubj->parent_subject));
69997+ read_unlock(&gr_inode_lock);
69998+
69999+ return retval;
70000+}
70001+
70002+static __inline__ struct acl_object_label *
70003+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70004+ struct dentry *curr_dentry,
70005+ const struct acl_subject_label *subj, char **path, const int checkglob)
70006+{
70007+ int newglob = checkglob;
70008+ u64 inode;
70009+ dev_t device;
70010+
70011+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70012+ as we don't want a / * rule to match instead of the / object
70013+ don't do this for create lookups that call this function though, since they're looking up
70014+ on the parent and thus need globbing checks on all paths
70015+ */
70016+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70017+ newglob = GR_NO_GLOB;
70018+
70019+ spin_lock(&curr_dentry->d_lock);
70020+ inode = __get_ino(curr_dentry);
70021+ device = __get_dev(curr_dentry);
70022+ spin_unlock(&curr_dentry->d_lock);
70023+
70024+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70025+}
70026+
70027+#ifdef CONFIG_HUGETLBFS
70028+static inline bool
70029+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70030+{
70031+ int i;
70032+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70033+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70034+ return true;
70035+ }
70036+
70037+ return false;
70038+}
70039+#endif
70040+
70041+static struct acl_object_label *
70042+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70043+ const struct acl_subject_label *subj, char *path, const int checkglob)
70044+{
70045+ struct dentry *dentry = (struct dentry *) l_dentry;
70046+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70047+ struct mount *real_mnt = real_mount(mnt);
70048+ struct acl_object_label *retval;
70049+ struct dentry *parent;
70050+
70051+ read_seqlock_excl(&mount_lock);
70052+ write_seqlock(&rename_lock);
70053+
70054+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70055+#ifdef CONFIG_NET
70056+ mnt == sock_mnt ||
70057+#endif
70058+#ifdef CONFIG_HUGETLBFS
70059+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70060+#endif
70061+ /* ignore Eric Biederman */
70062+ IS_PRIVATE(l_dentry->d_inode))) {
70063+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70064+ goto out;
70065+ }
70066+
70067+ for (;;) {
70068+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70069+ break;
70070+
70071+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70072+ if (!mnt_has_parent(real_mnt))
70073+ break;
70074+
70075+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70076+ if (retval != NULL)
70077+ goto out;
70078+
70079+ dentry = real_mnt->mnt_mountpoint;
70080+ real_mnt = real_mnt->mnt_parent;
70081+ mnt = &real_mnt->mnt;
70082+ continue;
70083+ }
70084+
70085+ parent = dentry->d_parent;
70086+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70087+ if (retval != NULL)
70088+ goto out;
70089+
70090+ dentry = parent;
70091+ }
70092+
70093+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70094+
70095+ /* gr_real_root is pinned so we don't have to hold a reference */
70096+ if (retval == NULL)
70097+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70098+out:
70099+ write_sequnlock(&rename_lock);
70100+ read_sequnlock_excl(&mount_lock);
70101+
70102+ BUG_ON(retval == NULL);
70103+
70104+ return retval;
70105+}
70106+
70107+static __inline__ struct acl_object_label *
70108+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70109+ const struct acl_subject_label *subj)
70110+{
70111+ char *path = NULL;
70112+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70113+}
70114+
70115+static __inline__ struct acl_object_label *
70116+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70117+ const struct acl_subject_label *subj)
70118+{
70119+ char *path = NULL;
70120+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70121+}
70122+
70123+static __inline__ struct acl_object_label *
70124+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70125+ const struct acl_subject_label *subj, char *path)
70126+{
70127+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70128+}
70129+
70130+struct acl_subject_label *
70131+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70132+ const struct acl_role_label *role)
70133+{
70134+ struct dentry *dentry = (struct dentry *) l_dentry;
70135+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70136+ struct mount *real_mnt = real_mount(mnt);
70137+ struct acl_subject_label *retval;
70138+ struct dentry *parent;
70139+
70140+ read_seqlock_excl(&mount_lock);
70141+ write_seqlock(&rename_lock);
70142+
70143+ for (;;) {
70144+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70145+ break;
70146+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70147+ if (!mnt_has_parent(real_mnt))
70148+ break;
70149+
70150+ spin_lock(&dentry->d_lock);
70151+ read_lock(&gr_inode_lock);
70152+ retval =
70153+ lookup_acl_subj_label(__get_ino(dentry),
70154+ __get_dev(dentry), role);
70155+ read_unlock(&gr_inode_lock);
70156+ spin_unlock(&dentry->d_lock);
70157+ if (retval != NULL)
70158+ goto out;
70159+
70160+ dentry = real_mnt->mnt_mountpoint;
70161+ real_mnt = real_mnt->mnt_parent;
70162+ mnt = &real_mnt->mnt;
70163+ continue;
70164+ }
70165+
70166+ spin_lock(&dentry->d_lock);
70167+ read_lock(&gr_inode_lock);
70168+ retval = lookup_acl_subj_label(__get_ino(dentry),
70169+ __get_dev(dentry), role);
70170+ read_unlock(&gr_inode_lock);
70171+ parent = dentry->d_parent;
70172+ spin_unlock(&dentry->d_lock);
70173+
70174+ if (retval != NULL)
70175+ goto out;
70176+
70177+ dentry = parent;
70178+ }
70179+
70180+ spin_lock(&dentry->d_lock);
70181+ read_lock(&gr_inode_lock);
70182+ retval = lookup_acl_subj_label(__get_ino(dentry),
70183+ __get_dev(dentry), role);
70184+ read_unlock(&gr_inode_lock);
70185+ spin_unlock(&dentry->d_lock);
70186+
70187+ if (unlikely(retval == NULL)) {
70188+ /* gr_real_root is pinned, we don't need to hold a reference */
70189+ read_lock(&gr_inode_lock);
70190+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
70191+ __get_dev(gr_real_root.dentry), role);
70192+ read_unlock(&gr_inode_lock);
70193+ }
70194+out:
70195+ write_sequnlock(&rename_lock);
70196+ read_sequnlock_excl(&mount_lock);
70197+
70198+ BUG_ON(retval == NULL);
70199+
70200+ return retval;
70201+}
70202+
70203+void
70204+assign_special_role(const char *rolename)
70205+{
70206+ struct acl_object_label *obj;
70207+ struct acl_role_label *r;
70208+ struct acl_role_label *assigned = NULL;
70209+ struct task_struct *tsk;
70210+ struct file *filp;
70211+
70212+ FOR_EACH_ROLE_START(r)
70213+ if (!strcmp(rolename, r->rolename) &&
70214+ (r->roletype & GR_ROLE_SPECIAL)) {
70215+ assigned = r;
70216+ break;
70217+ }
70218+ FOR_EACH_ROLE_END(r)
70219+
70220+ if (!assigned)
70221+ return;
70222+
70223+ read_lock(&tasklist_lock);
70224+ read_lock(&grsec_exec_file_lock);
70225+
70226+ tsk = current->real_parent;
70227+ if (tsk == NULL)
70228+ goto out_unlock;
70229+
70230+ filp = tsk->exec_file;
70231+ if (filp == NULL)
70232+ goto out_unlock;
70233+
70234+ tsk->is_writable = 0;
70235+ tsk->inherited = 0;
70236+
70237+ tsk->acl_sp_role = 1;
70238+ tsk->acl_role_id = ++acl_sp_role_value;
70239+ tsk->role = assigned;
70240+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70241+
70242+ /* ignore additional mmap checks for processes that are writable
70243+ by the default ACL */
70244+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70245+ if (unlikely(obj->mode & GR_WRITE))
70246+ tsk->is_writable = 1;
70247+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70248+ if (unlikely(obj->mode & GR_WRITE))
70249+ tsk->is_writable = 1;
70250+
70251+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70252+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70253+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70254+#endif
70255+
70256+out_unlock:
70257+ read_unlock(&grsec_exec_file_lock);
70258+ read_unlock(&tasklist_lock);
70259+ return;
70260+}
70261+
70262+
70263+static void
70264+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70265+{
70266+ struct task_struct *task = current;
70267+ const struct cred *cred = current_cred();
70268+
70269+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70270+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70271+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70272+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70273+
70274+ return;
70275+}
70276+
70277+static void
70278+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70279+{
70280+ struct task_struct *task = current;
70281+ const struct cred *cred = current_cred();
70282+
70283+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70284+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70285+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70286+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70287+
70288+ return;
70289+}
70290+
70291+static void
70292+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70293+{
70294+ struct task_struct *task = current;
70295+ const struct cred *cred = current_cred();
70296+
70297+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70298+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70299+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70300+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70301+
70302+ return;
70303+}
70304+
70305+static void
70306+gr_set_proc_res(struct task_struct *task)
70307+{
70308+ struct acl_subject_label *proc;
70309+ unsigned short i;
70310+
70311+ proc = task->acl;
70312+
70313+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70314+ return;
70315+
70316+ for (i = 0; i < RLIM_NLIMITS; i++) {
70317+ unsigned long rlim_cur, rlim_max;
70318+
70319+ if (!(proc->resmask & (1U << i)))
70320+ continue;
70321+
70322+ rlim_cur = proc->res[i].rlim_cur;
70323+ rlim_max = proc->res[i].rlim_max;
70324+
70325+ if (i == RLIMIT_NOFILE) {
70326+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
70327+ if (rlim_cur > saved_sysctl_nr_open)
70328+ rlim_cur = saved_sysctl_nr_open;
70329+ if (rlim_max > saved_sysctl_nr_open)
70330+ rlim_max = saved_sysctl_nr_open;
70331+ }
70332+
70333+ task->signal->rlim[i].rlim_cur = rlim_cur;
70334+ task->signal->rlim[i].rlim_max = rlim_max;
70335+
70336+ if (i == RLIMIT_CPU)
70337+ update_rlimit_cpu(task, rlim_cur);
70338+ }
70339+
70340+ return;
70341+}
70342+
70343+/* both of the below must be called with
70344+ rcu_read_lock();
70345+ read_lock(&tasklist_lock);
70346+ read_lock(&grsec_exec_file_lock);
70347+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
70348+*/
70349+
70350+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
70351+{
70352+ char *tmpname;
70353+ struct acl_subject_label *tmpsubj;
70354+ struct file *filp;
70355+ struct name_entry *nmatch;
70356+
70357+ filp = task->exec_file;
70358+ if (filp == NULL)
70359+ return NULL;
70360+
70361+ /* the following is to apply the correct subject
70362+ on binaries running when the RBAC system
70363+ is enabled, when the binaries have been
70364+ replaced or deleted since their execution
70365+ -----
70366+ when the RBAC system starts, the inode/dev
70367+ from exec_file will be one the RBAC system
70368+ is unaware of. It only knows the inode/dev
70369+ of the present file on disk, or the absence
70370+ of it.
70371+ */
70372+
70373+ if (filename)
70374+ nmatch = __lookup_name_entry(state, filename);
70375+ else {
70376+ preempt_disable();
70377+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
70378+
70379+ nmatch = __lookup_name_entry(state, tmpname);
70380+ preempt_enable();
70381+ }
70382+ tmpsubj = NULL;
70383+ if (nmatch) {
70384+ if (nmatch->deleted)
70385+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
70386+ else
70387+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
70388+ }
70389+ /* this also works for the reload case -- if we don't match a potentially inherited subject
70390+ then we fall back to a normal lookup based on the binary's ino/dev
70391+ */
70392+ if (tmpsubj == NULL && fallback)
70393+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
70394+
70395+ return tmpsubj;
70396+}
70397+
70398+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
70399+{
70400+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
70401+}
70402+
70403+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
70404+{
70405+ struct acl_object_label *obj;
70406+ struct file *filp;
70407+
70408+ filp = task->exec_file;
70409+
70410+ task->acl = subj;
70411+ task->is_writable = 0;
70412+ /* ignore additional mmap checks for processes that are writable
70413+ by the default ACL */
70414+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
70415+ if (unlikely(obj->mode & GR_WRITE))
70416+ task->is_writable = 1;
70417+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70418+ if (unlikely(obj->mode & GR_WRITE))
70419+ task->is_writable = 1;
70420+
70421+ gr_set_proc_res(task);
70422+
70423+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70424+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70425+#endif
70426+}
70427+
70428+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
70429+{
70430+ __gr_apply_subject_to_task(&running_polstate, task, subj);
70431+}
70432+
70433+__u32
70434+gr_search_file(const struct dentry * dentry, const __u32 mode,
70435+ const struct vfsmount * mnt)
70436+{
70437+ __u32 retval = mode;
70438+ struct acl_subject_label *curracl;
70439+ struct acl_object_label *currobj;
70440+
70441+ if (unlikely(!(gr_status & GR_READY)))
70442+ return (mode & ~GR_AUDITS);
70443+
70444+ curracl = current->acl;
70445+
70446+ currobj = chk_obj_label(dentry, mnt, curracl);
70447+ retval = currobj->mode & mode;
70448+
70449+ /* if we're opening a specified transfer file for writing
70450+ (e.g. /dev/initctl), then transfer our role to init
70451+ */
70452+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
70453+ current->role->roletype & GR_ROLE_PERSIST)) {
70454+ struct task_struct *task = init_pid_ns.child_reaper;
70455+
70456+ if (task->role != current->role) {
70457+ struct acl_subject_label *subj;
70458+
70459+ task->acl_sp_role = 0;
70460+ task->acl_role_id = current->acl_role_id;
70461+ task->role = current->role;
70462+ rcu_read_lock();
70463+ read_lock(&grsec_exec_file_lock);
70464+ subj = gr_get_subject_for_task(task, NULL, 1);
70465+ gr_apply_subject_to_task(task, subj);
70466+ read_unlock(&grsec_exec_file_lock);
70467+ rcu_read_unlock();
70468+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
70469+ }
70470+ }
70471+
70472+ if (unlikely
70473+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
70474+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
70475+ __u32 new_mode = mode;
70476+
70477+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70478+
70479+ retval = new_mode;
70480+
70481+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
70482+ new_mode |= GR_INHERIT;
70483+
70484+ if (!(mode & GR_NOLEARN))
70485+ gr_log_learn(dentry, mnt, new_mode);
70486+ }
70487+
70488+ return retval;
70489+}
70490+
70491+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
70492+ const struct dentry *parent,
70493+ const struct vfsmount *mnt)
70494+{
70495+ struct name_entry *match;
70496+ struct acl_object_label *matchpo;
70497+ struct acl_subject_label *curracl;
70498+ char *path;
70499+
70500+ if (unlikely(!(gr_status & GR_READY)))
70501+ return NULL;
70502+
70503+ preempt_disable();
70504+ path = gr_to_filename_rbac(new_dentry, mnt);
70505+ match = lookup_name_entry_create(path);
70506+
70507+ curracl = current->acl;
70508+
70509+ if (match) {
70510+ read_lock(&gr_inode_lock);
70511+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
70512+ read_unlock(&gr_inode_lock);
70513+
70514+ if (matchpo) {
70515+ preempt_enable();
70516+ return matchpo;
70517+ }
70518+ }
70519+
70520+ // lookup parent
70521+
70522+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
70523+
70524+ preempt_enable();
70525+ return matchpo;
70526+}
70527+
70528+__u32
70529+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
70530+ const struct vfsmount * mnt, const __u32 mode)
70531+{
70532+ struct acl_object_label *matchpo;
70533+ __u32 retval;
70534+
70535+ if (unlikely(!(gr_status & GR_READY)))
70536+ return (mode & ~GR_AUDITS);
70537+
70538+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
70539+
70540+ retval = matchpo->mode & mode;
70541+
70542+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
70543+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70544+ __u32 new_mode = mode;
70545+
70546+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70547+
70548+ gr_log_learn(new_dentry, mnt, new_mode);
70549+ return new_mode;
70550+ }
70551+
70552+ return retval;
70553+}
70554+
70555+__u32
70556+gr_check_link(const struct dentry * new_dentry,
70557+ const struct dentry * parent_dentry,
70558+ const struct vfsmount * parent_mnt,
70559+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
70560+{
70561+ struct acl_object_label *obj;
70562+ __u32 oldmode, newmode;
70563+ __u32 needmode;
70564+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
70565+ GR_DELETE | GR_INHERIT;
70566+
70567+ if (unlikely(!(gr_status & GR_READY)))
70568+ return (GR_CREATE | GR_LINK);
70569+
70570+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
70571+ oldmode = obj->mode;
70572+
70573+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
70574+ newmode = obj->mode;
70575+
70576+ needmode = newmode & checkmodes;
70577+
70578+ // old name for hardlink must have at least the permissions of the new name
70579+ if ((oldmode & needmode) != needmode)
70580+ goto bad;
70581+
70582+ // if old name had restrictions/auditing, make sure the new name does as well
70583+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
70584+
70585+ // don't allow hardlinking of suid/sgid/fcapped files without permission
70586+ if (is_privileged_binary(old_dentry))
70587+ needmode |= GR_SETID;
70588+
70589+ if ((newmode & needmode) != needmode)
70590+ goto bad;
70591+
70592+ // enforce minimum permissions
70593+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
70594+ return newmode;
70595+bad:
70596+ needmode = oldmode;
70597+ if (is_privileged_binary(old_dentry))
70598+ needmode |= GR_SETID;
70599+
70600+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
70601+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
70602+ return (GR_CREATE | GR_LINK);
70603+ } else if (newmode & GR_SUPPRESS)
70604+ return GR_SUPPRESS;
70605+ else
70606+ return 0;
70607+}
70608+
70609+int
70610+gr_check_hidden_task(const struct task_struct *task)
70611+{
70612+ if (unlikely(!(gr_status & GR_READY)))
70613+ return 0;
70614+
70615+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
70616+ return 1;
70617+
70618+ return 0;
70619+}
70620+
70621+int
70622+gr_check_protected_task(const struct task_struct *task)
70623+{
70624+ if (unlikely(!(gr_status & GR_READY) || !task))
70625+ return 0;
70626+
70627+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70628+ task->acl != current->acl)
70629+ return 1;
70630+
70631+ return 0;
70632+}
70633+
70634+int
70635+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70636+{
70637+ struct task_struct *p;
70638+ int ret = 0;
70639+
70640+ if (unlikely(!(gr_status & GR_READY) || !pid))
70641+ return ret;
70642+
70643+ read_lock(&tasklist_lock);
70644+ do_each_pid_task(pid, type, p) {
70645+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70646+ p->acl != current->acl) {
70647+ ret = 1;
70648+ goto out;
70649+ }
70650+ } while_each_pid_task(pid, type, p);
70651+out:
70652+ read_unlock(&tasklist_lock);
70653+
70654+ return ret;
70655+}
70656+
70657+void
70658+gr_copy_label(struct task_struct *tsk)
70659+{
70660+ struct task_struct *p = current;
70661+
70662+ tsk->inherited = p->inherited;
70663+ tsk->acl_sp_role = 0;
70664+ tsk->acl_role_id = p->acl_role_id;
70665+ tsk->acl = p->acl;
70666+ tsk->role = p->role;
70667+ tsk->signal->used_accept = 0;
70668+ tsk->signal->curr_ip = p->signal->curr_ip;
70669+ tsk->signal->saved_ip = p->signal->saved_ip;
70670+ if (p->exec_file)
70671+ get_file(p->exec_file);
70672+ tsk->exec_file = p->exec_file;
70673+ tsk->is_writable = p->is_writable;
70674+ if (unlikely(p->signal->used_accept)) {
70675+ p->signal->curr_ip = 0;
70676+ p->signal->saved_ip = 0;
70677+ }
70678+
70679+ return;
70680+}
70681+
70682+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
70683+
70684+int
70685+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
70686+{
70687+ unsigned int i;
70688+ __u16 num;
70689+ uid_t *uidlist;
70690+ uid_t curuid;
70691+ int realok = 0;
70692+ int effectiveok = 0;
70693+ int fsok = 0;
70694+ uid_t globalreal, globaleffective, globalfs;
70695+
70696+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
70697+ struct user_struct *user;
70698+
70699+ if (!uid_valid(real))
70700+ goto skipit;
70701+
70702+ /* find user based on global namespace */
70703+
70704+ globalreal = GR_GLOBAL_UID(real);
70705+
70706+ user = find_user(make_kuid(&init_user_ns, globalreal));
70707+ if (user == NULL)
70708+ goto skipit;
70709+
70710+ if (gr_process_kernel_setuid_ban(user)) {
70711+ /* for find_user */
70712+ free_uid(user);
70713+ return 1;
70714+ }
70715+
70716+ /* for find_user */
70717+ free_uid(user);
70718+
70719+skipit:
70720+#endif
70721+
70722+ if (unlikely(!(gr_status & GR_READY)))
70723+ return 0;
70724+
70725+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70726+ gr_log_learn_uid_change(real, effective, fs);
70727+
70728+ num = current->acl->user_trans_num;
70729+ uidlist = current->acl->user_transitions;
70730+
70731+ if (uidlist == NULL)
70732+ return 0;
70733+
70734+ if (!uid_valid(real)) {
70735+ realok = 1;
70736+ globalreal = (uid_t)-1;
70737+ } else {
70738+ globalreal = GR_GLOBAL_UID(real);
70739+ }
70740+ if (!uid_valid(effective)) {
70741+ effectiveok = 1;
70742+ globaleffective = (uid_t)-1;
70743+ } else {
70744+ globaleffective = GR_GLOBAL_UID(effective);
70745+ }
70746+ if (!uid_valid(fs)) {
70747+ fsok = 1;
70748+ globalfs = (uid_t)-1;
70749+ } else {
70750+ globalfs = GR_GLOBAL_UID(fs);
70751+ }
70752+
70753+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
70754+ for (i = 0; i < num; i++) {
70755+ curuid = uidlist[i];
70756+ if (globalreal == curuid)
70757+ realok = 1;
70758+ if (globaleffective == curuid)
70759+ effectiveok = 1;
70760+ if (globalfs == curuid)
70761+ fsok = 1;
70762+ }
70763+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
70764+ for (i = 0; i < num; i++) {
70765+ curuid = uidlist[i];
70766+ if (globalreal == curuid)
70767+ break;
70768+ if (globaleffective == curuid)
70769+ break;
70770+ if (globalfs == curuid)
70771+ break;
70772+ }
70773+ /* not in deny list */
70774+ if (i == num) {
70775+ realok = 1;
70776+ effectiveok = 1;
70777+ fsok = 1;
70778+ }
70779+ }
70780+
70781+ if (realok && effectiveok && fsok)
70782+ return 0;
70783+ else {
70784+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70785+ return 1;
70786+ }
70787+}
70788+
70789+int
70790+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
70791+{
70792+ unsigned int i;
70793+ __u16 num;
70794+ gid_t *gidlist;
70795+ gid_t curgid;
70796+ int realok = 0;
70797+ int effectiveok = 0;
70798+ int fsok = 0;
70799+ gid_t globalreal, globaleffective, globalfs;
70800+
70801+ if (unlikely(!(gr_status & GR_READY)))
70802+ return 0;
70803+
70804+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70805+ gr_log_learn_gid_change(real, effective, fs);
70806+
70807+ num = current->acl->group_trans_num;
70808+ gidlist = current->acl->group_transitions;
70809+
70810+ if (gidlist == NULL)
70811+ return 0;
70812+
70813+ if (!gid_valid(real)) {
70814+ realok = 1;
70815+ globalreal = (gid_t)-1;
70816+ } else {
70817+ globalreal = GR_GLOBAL_GID(real);
70818+ }
70819+ if (!gid_valid(effective)) {
70820+ effectiveok = 1;
70821+ globaleffective = (gid_t)-1;
70822+ } else {
70823+ globaleffective = GR_GLOBAL_GID(effective);
70824+ }
70825+ if (!gid_valid(fs)) {
70826+ fsok = 1;
70827+ globalfs = (gid_t)-1;
70828+ } else {
70829+ globalfs = GR_GLOBAL_GID(fs);
70830+ }
70831+
70832+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
70833+ for (i = 0; i < num; i++) {
70834+ curgid = gidlist[i];
70835+ if (globalreal == curgid)
70836+ realok = 1;
70837+ if (globaleffective == curgid)
70838+ effectiveok = 1;
70839+ if (globalfs == curgid)
70840+ fsok = 1;
70841+ }
70842+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
70843+ for (i = 0; i < num; i++) {
70844+ curgid = gidlist[i];
70845+ if (globalreal == curgid)
70846+ break;
70847+ if (globaleffective == curgid)
70848+ break;
70849+ if (globalfs == curgid)
70850+ break;
70851+ }
70852+ /* not in deny list */
70853+ if (i == num) {
70854+ realok = 1;
70855+ effectiveok = 1;
70856+ fsok = 1;
70857+ }
70858+ }
70859+
70860+ if (realok && effectiveok && fsok)
70861+ return 0;
70862+ else {
70863+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70864+ return 1;
70865+ }
70866+}
70867+
70868+extern int gr_acl_is_capable(const int cap);
70869+
70870+void
70871+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
70872+{
70873+ struct acl_role_label *role = task->role;
70874+ struct acl_role_label *origrole = role;
70875+ struct acl_subject_label *subj = NULL;
70876+ struct acl_object_label *obj;
70877+ struct file *filp;
70878+ uid_t uid;
70879+ gid_t gid;
70880+
70881+ if (unlikely(!(gr_status & GR_READY)))
70882+ return;
70883+
70884+ uid = GR_GLOBAL_UID(kuid);
70885+ gid = GR_GLOBAL_GID(kgid);
70886+
70887+ filp = task->exec_file;
70888+
70889+ /* kernel process, we'll give them the kernel role */
70890+ if (unlikely(!filp)) {
70891+ task->role = running_polstate.kernel_role;
70892+ task->acl = running_polstate.kernel_role->root_label;
70893+ return;
70894+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
70895+ /* save the current ip at time of role lookup so that the proper
70896+ IP will be learned for role_allowed_ip */
70897+ task->signal->saved_ip = task->signal->curr_ip;
70898+ role = lookup_acl_role_label(task, uid, gid);
70899+ }
70900+
70901+ /* don't change the role if we're not a privileged process */
70902+ if (role && task->role != role &&
70903+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
70904+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
70905+ return;
70906+
70907+ task->role = role;
70908+
70909+ if (task->inherited) {
70910+ /* if we reached our subject through inheritance, then first see
70911+ if there's a subject of the same name in the new role that has
70912+ an object that would result in the same inherited subject
70913+ */
70914+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
70915+ if (subj) {
70916+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
70917+ if (!(obj->mode & GR_INHERIT))
70918+ subj = NULL;
70919+ }
70920+
70921+ }
70922+ if (subj == NULL) {
70923+ /* otherwise:
70924+ perform subject lookup in possibly new role
70925+ we can use this result below in the case where role == task->role
70926+ */
70927+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
70928+ }
70929+
70930+ /* if we changed uid/gid, but result in the same role
70931+ and are using inheritance, don't lose the inherited subject
70932+ if current subject is other than what normal lookup
70933+ would result in, we arrived via inheritance, don't
70934+ lose subject
70935+ */
70936+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
70937+ (subj == task->acl)))
70938+ task->acl = subj;
70939+
70940+ /* leave task->inherited unaffected */
70941+
70942+ task->is_writable = 0;
70943+
70944+ /* ignore additional mmap checks for processes that are writable
70945+ by the default ACL */
70946+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70947+ if (unlikely(obj->mode & GR_WRITE))
70948+ task->is_writable = 1;
70949+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70950+ if (unlikely(obj->mode & GR_WRITE))
70951+ task->is_writable = 1;
70952+
70953+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70954+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70955+#endif
70956+
70957+ gr_set_proc_res(task);
70958+
70959+ return;
70960+}
70961+
70962+int
70963+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
70964+ const int unsafe_flags)
70965+{
70966+ struct task_struct *task = current;
70967+ struct acl_subject_label *newacl;
70968+ struct acl_object_label *obj;
70969+ __u32 retmode;
70970+
70971+ if (unlikely(!(gr_status & GR_READY)))
70972+ return 0;
70973+
70974+ newacl = chk_subj_label(dentry, mnt, task->role);
70975+
70976+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
70977+ did an exec
70978+ */
70979+ rcu_read_lock();
70980+ read_lock(&tasklist_lock);
70981+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
70982+ (task->parent->acl->mode & GR_POVERRIDE))) {
70983+ read_unlock(&tasklist_lock);
70984+ rcu_read_unlock();
70985+ goto skip_check;
70986+ }
70987+ read_unlock(&tasklist_lock);
70988+ rcu_read_unlock();
70989+
70990+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
70991+ !(task->role->roletype & GR_ROLE_GOD) &&
70992+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
70993+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70994+ if (unsafe_flags & LSM_UNSAFE_SHARE)
70995+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
70996+ else
70997+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
70998+ return -EACCES;
70999+ }
71000+
71001+skip_check:
71002+
71003+ obj = chk_obj_label(dentry, mnt, task->acl);
71004+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71005+
71006+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71007+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71008+ if (obj->nested)
71009+ task->acl = obj->nested;
71010+ else
71011+ task->acl = newacl;
71012+ task->inherited = 0;
71013+ } else {
71014+ task->inherited = 1;
71015+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71016+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71017+ }
71018+
71019+ task->is_writable = 0;
71020+
71021+ /* ignore additional mmap checks for processes that are writable
71022+ by the default ACL */
71023+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71024+ if (unlikely(obj->mode & GR_WRITE))
71025+ task->is_writable = 1;
71026+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71027+ if (unlikely(obj->mode & GR_WRITE))
71028+ task->is_writable = 1;
71029+
71030+ gr_set_proc_res(task);
71031+
71032+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71033+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71034+#endif
71035+ return 0;
71036+}
71037+
71038+/* always called with valid inodev ptr */
71039+static void
71040+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
71041+{
71042+ struct acl_object_label *matchpo;
71043+ struct acl_subject_label *matchps;
71044+ struct acl_subject_label *subj;
71045+ struct acl_role_label *role;
71046+ unsigned int x;
71047+
71048+ FOR_EACH_ROLE_START(role)
71049+ FOR_EACH_SUBJECT_START(role, subj, x)
71050+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71051+ matchpo->mode |= GR_DELETED;
71052+ FOR_EACH_SUBJECT_END(subj,x)
71053+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71054+ /* nested subjects aren't in the role's subj_hash table */
71055+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71056+ matchpo->mode |= GR_DELETED;
71057+ FOR_EACH_NESTED_SUBJECT_END(subj)
71058+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71059+ matchps->mode |= GR_DELETED;
71060+ FOR_EACH_ROLE_END(role)
71061+
71062+ inodev->nentry->deleted = 1;
71063+
71064+ return;
71065+}
71066+
71067+void
71068+gr_handle_delete(const u64 ino, const dev_t dev)
71069+{
71070+ struct inodev_entry *inodev;
71071+
71072+ if (unlikely(!(gr_status & GR_READY)))
71073+ return;
71074+
71075+ write_lock(&gr_inode_lock);
71076+ inodev = lookup_inodev_entry(ino, dev);
71077+ if (inodev != NULL)
71078+ do_handle_delete(inodev, ino, dev);
71079+ write_unlock(&gr_inode_lock);
71080+
71081+ return;
71082+}
71083+
71084+static void
71085+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
71086+ const u64 newinode, const dev_t newdevice,
71087+ struct acl_subject_label *subj)
71088+{
71089+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71090+ struct acl_object_label *match;
71091+
71092+ match = subj->obj_hash[index];
71093+
71094+ while (match && (match->inode != oldinode ||
71095+ match->device != olddevice ||
71096+ !(match->mode & GR_DELETED)))
71097+ match = match->next;
71098+
71099+ if (match && (match->inode == oldinode)
71100+ && (match->device == olddevice)
71101+ && (match->mode & GR_DELETED)) {
71102+ if (match->prev == NULL) {
71103+ subj->obj_hash[index] = match->next;
71104+ if (match->next != NULL)
71105+ match->next->prev = NULL;
71106+ } else {
71107+ match->prev->next = match->next;
71108+ if (match->next != NULL)
71109+ match->next->prev = match->prev;
71110+ }
71111+ match->prev = NULL;
71112+ match->next = NULL;
71113+ match->inode = newinode;
71114+ match->device = newdevice;
71115+ match->mode &= ~GR_DELETED;
71116+
71117+ insert_acl_obj_label(match, subj);
71118+ }
71119+
71120+ return;
71121+}
71122+
71123+static void
71124+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
71125+ const u64 newinode, const dev_t newdevice,
71126+ struct acl_role_label *role)
71127+{
71128+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71129+ struct acl_subject_label *match;
71130+
71131+ match = role->subj_hash[index];
71132+
71133+ while (match && (match->inode != oldinode ||
71134+ match->device != olddevice ||
71135+ !(match->mode & GR_DELETED)))
71136+ match = match->next;
71137+
71138+ if (match && (match->inode == oldinode)
71139+ && (match->device == olddevice)
71140+ && (match->mode & GR_DELETED)) {
71141+ if (match->prev == NULL) {
71142+ role->subj_hash[index] = match->next;
71143+ if (match->next != NULL)
71144+ match->next->prev = NULL;
71145+ } else {
71146+ match->prev->next = match->next;
71147+ if (match->next != NULL)
71148+ match->next->prev = match->prev;
71149+ }
71150+ match->prev = NULL;
71151+ match->next = NULL;
71152+ match->inode = newinode;
71153+ match->device = newdevice;
71154+ match->mode &= ~GR_DELETED;
71155+
71156+ insert_acl_subj_label(match, role);
71157+ }
71158+
71159+ return;
71160+}
71161+
71162+static void
71163+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
71164+ const u64 newinode, const dev_t newdevice)
71165+{
71166+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71167+ struct inodev_entry *match;
71168+
71169+ match = running_polstate.inodev_set.i_hash[index];
71170+
71171+ while (match && (match->nentry->inode != oldinode ||
71172+ match->nentry->device != olddevice || !match->nentry->deleted))
71173+ match = match->next;
71174+
71175+ if (match && (match->nentry->inode == oldinode)
71176+ && (match->nentry->device == olddevice) &&
71177+ match->nentry->deleted) {
71178+ if (match->prev == NULL) {
71179+ running_polstate.inodev_set.i_hash[index] = match->next;
71180+ if (match->next != NULL)
71181+ match->next->prev = NULL;
71182+ } else {
71183+ match->prev->next = match->next;
71184+ if (match->next != NULL)
71185+ match->next->prev = match->prev;
71186+ }
71187+ match->prev = NULL;
71188+ match->next = NULL;
71189+ match->nentry->inode = newinode;
71190+ match->nentry->device = newdevice;
71191+ match->nentry->deleted = 0;
71192+
71193+ insert_inodev_entry(match);
71194+ }
71195+
71196+ return;
71197+}
71198+
71199+static void
71200+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
71201+{
71202+ struct acl_subject_label *subj;
71203+ struct acl_role_label *role;
71204+ unsigned int x;
71205+
71206+ FOR_EACH_ROLE_START(role)
71207+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71208+
71209+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71210+ if ((subj->inode == ino) && (subj->device == dev)) {
71211+ subj->inode = ino;
71212+ subj->device = dev;
71213+ }
71214+ /* nested subjects aren't in the role's subj_hash table */
71215+ update_acl_obj_label(matchn->inode, matchn->device,
71216+ ino, dev, subj);
71217+ FOR_EACH_NESTED_SUBJECT_END(subj)
71218+ FOR_EACH_SUBJECT_START(role, subj, x)
71219+ update_acl_obj_label(matchn->inode, matchn->device,
71220+ ino, dev, subj);
71221+ FOR_EACH_SUBJECT_END(subj,x)
71222+ FOR_EACH_ROLE_END(role)
71223+
71224+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71225+
71226+ return;
71227+}
71228+
71229+static void
71230+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71231+ const struct vfsmount *mnt)
71232+{
71233+ u64 ino = __get_ino(dentry);
71234+ dev_t dev = __get_dev(dentry);
71235+
71236+ __do_handle_create(matchn, ino, dev);
71237+
71238+ return;
71239+}
71240+
71241+void
71242+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71243+{
71244+ struct name_entry *matchn;
71245+
71246+ if (unlikely(!(gr_status & GR_READY)))
71247+ return;
71248+
71249+ preempt_disable();
71250+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71251+
71252+ if (unlikely((unsigned long)matchn)) {
71253+ write_lock(&gr_inode_lock);
71254+ do_handle_create(matchn, dentry, mnt);
71255+ write_unlock(&gr_inode_lock);
71256+ }
71257+ preempt_enable();
71258+
71259+ return;
71260+}
71261+
71262+void
71263+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71264+{
71265+ struct name_entry *matchn;
71266+
71267+ if (unlikely(!(gr_status & GR_READY)))
71268+ return;
71269+
71270+ preempt_disable();
71271+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71272+
71273+ if (unlikely((unsigned long)matchn)) {
71274+ write_lock(&gr_inode_lock);
71275+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71276+ write_unlock(&gr_inode_lock);
71277+ }
71278+ preempt_enable();
71279+
71280+ return;
71281+}
71282+
71283+void
71284+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71285+ struct dentry *old_dentry,
71286+ struct dentry *new_dentry,
71287+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71288+{
71289+ struct name_entry *matchn;
71290+ struct name_entry *matchn2 = NULL;
71291+ struct inodev_entry *inodev;
71292+ struct inode *inode = new_dentry->d_inode;
71293+ u64 old_ino = __get_ino(old_dentry);
71294+ dev_t old_dev = __get_dev(old_dentry);
71295+ unsigned int exchange = flags & RENAME_EXCHANGE;
71296+
71297+ /* vfs_rename swaps the name and parent link for old_dentry and
71298+ new_dentry
71299+ at this point, old_dentry has the new name, parent link, and inode
71300+ for the renamed file
71301+ if a file is being replaced by a rename, new_dentry has the inode
71302+ and name for the replaced file
71303+ */
71304+
71305+ if (unlikely(!(gr_status & GR_READY)))
71306+ return;
71307+
71308+ preempt_disable();
71309+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71310+
71311+ /* exchange cases:
71312+ a filename exists for the source, but not dest
71313+ do a recreate on source
71314+ a filename exists for the dest, but not source
71315+ do a recreate on dest
71316+ a filename exists for both source and dest
71317+ delete source and dest, then create source and dest
71318+ a filename exists for neither source nor dest
71319+ no updates needed
71320+
71321+ the name entry lookups get us the old inode/dev associated with
71322+ each name, so do the deletes first (if possible) so that when
71323+ we do the create, we pick up on the right entries
71324+ */
71325+
71326+ if (exchange)
71327+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71328+
71329+ /* we wouldn't have to check d_inode if it weren't for
71330+ NFS silly-renaming
71331+ */
71332+
71333+ write_lock(&gr_inode_lock);
71334+ if (unlikely((replace || exchange) && inode)) {
71335+ u64 new_ino = __get_ino(new_dentry);
71336+ dev_t new_dev = __get_dev(new_dentry);
71337+
71338+ inodev = lookup_inodev_entry(new_ino, new_dev);
71339+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71340+ do_handle_delete(inodev, new_ino, new_dev);
71341+ }
71342+
71343+ inodev = lookup_inodev_entry(old_ino, old_dev);
71344+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
71345+ do_handle_delete(inodev, old_ino, old_dev);
71346+
71347+ if (unlikely(matchn != NULL))
71348+ do_handle_create(matchn, old_dentry, mnt);
71349+
71350+ if (unlikely(matchn2 != NULL))
71351+ do_handle_create(matchn2, new_dentry, mnt);
71352+
71353+ write_unlock(&gr_inode_lock);
71354+ preempt_enable();
71355+
71356+ return;
71357+}
71358+
71359+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
71360+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
71361+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
71362+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
71363+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
71364+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
71365+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
71366+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
71367+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
71368+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
71369+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
71370+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
71371+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
71372+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
71373+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
71374+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
71375+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
71376+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
71377+};
71378+
71379+void
71380+gr_learn_resource(const struct task_struct *task,
71381+ const int res, const unsigned long wanted, const int gt)
71382+{
71383+ struct acl_subject_label *acl;
71384+ const struct cred *cred;
71385+
71386+ if (unlikely((gr_status & GR_READY) &&
71387+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
71388+ goto skip_reslog;
71389+
71390+ gr_log_resource(task, res, wanted, gt);
71391+skip_reslog:
71392+
71393+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
71394+ return;
71395+
71396+ acl = task->acl;
71397+
71398+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
71399+ !(acl->resmask & (1U << (unsigned short) res))))
71400+ return;
71401+
71402+ if (wanted >= acl->res[res].rlim_cur) {
71403+ unsigned long res_add;
71404+
71405+ res_add = wanted + res_learn_bumps[res];
71406+
71407+ acl->res[res].rlim_cur = res_add;
71408+
71409+ if (wanted > acl->res[res].rlim_max)
71410+ acl->res[res].rlim_max = res_add;
71411+
71412+ /* only log the subject filename, since resource logging is supported for
71413+ single-subject learning only */
71414+ rcu_read_lock();
71415+ cred = __task_cred(task);
71416+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
71417+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
71418+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
71419+ "", (unsigned long) res, &task->signal->saved_ip);
71420+ rcu_read_unlock();
71421+ }
71422+
71423+ return;
71424+}
71425+EXPORT_SYMBOL_GPL(gr_learn_resource);
71426+#endif
71427+
71428+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
71429+void
71430+pax_set_initial_flags(struct linux_binprm *bprm)
71431+{
71432+ struct task_struct *task = current;
71433+ struct acl_subject_label *proc;
71434+ unsigned long flags;
71435+
71436+ if (unlikely(!(gr_status & GR_READY)))
71437+ return;
71438+
71439+ flags = pax_get_flags(task);
71440+
71441+ proc = task->acl;
71442+
71443+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
71444+ flags &= ~MF_PAX_PAGEEXEC;
71445+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
71446+ flags &= ~MF_PAX_SEGMEXEC;
71447+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
71448+ flags &= ~MF_PAX_RANDMMAP;
71449+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
71450+ flags &= ~MF_PAX_EMUTRAMP;
71451+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
71452+ flags &= ~MF_PAX_MPROTECT;
71453+
71454+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
71455+ flags |= MF_PAX_PAGEEXEC;
71456+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
71457+ flags |= MF_PAX_SEGMEXEC;
71458+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
71459+ flags |= MF_PAX_RANDMMAP;
71460+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
71461+ flags |= MF_PAX_EMUTRAMP;
71462+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
71463+ flags |= MF_PAX_MPROTECT;
71464+
71465+ pax_set_flags(task, flags);
71466+
71467+ return;
71468+}
71469+#endif
71470+
71471+int
71472+gr_handle_proc_ptrace(struct task_struct *task)
71473+{
71474+ struct file *filp;
71475+ struct task_struct *tmp = task;
71476+ struct task_struct *curtemp = current;
71477+ __u32 retmode;
71478+
71479+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71480+ if (unlikely(!(gr_status & GR_READY)))
71481+ return 0;
71482+#endif
71483+
71484+ read_lock(&tasklist_lock);
71485+ read_lock(&grsec_exec_file_lock);
71486+ filp = task->exec_file;
71487+
71488+ while (task_pid_nr(tmp) > 0) {
71489+ if (tmp == curtemp)
71490+ break;
71491+ tmp = tmp->real_parent;
71492+ }
71493+
71494+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71495+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
71496+ read_unlock(&grsec_exec_file_lock);
71497+ read_unlock(&tasklist_lock);
71498+ return 1;
71499+ }
71500+
71501+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71502+ if (!(gr_status & GR_READY)) {
71503+ read_unlock(&grsec_exec_file_lock);
71504+ read_unlock(&tasklist_lock);
71505+ return 0;
71506+ }
71507+#endif
71508+
71509+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
71510+ read_unlock(&grsec_exec_file_lock);
71511+ read_unlock(&tasklist_lock);
71512+
71513+ if (retmode & GR_NOPTRACE)
71514+ return 1;
71515+
71516+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
71517+ && (current->acl != task->acl || (current->acl != current->role->root_label
71518+ && task_pid_nr(current) != task_pid_nr(task))))
71519+ return 1;
71520+
71521+ return 0;
71522+}
71523+
71524+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
71525+{
71526+ if (unlikely(!(gr_status & GR_READY)))
71527+ return;
71528+
71529+ if (!(current->role->roletype & GR_ROLE_GOD))
71530+ return;
71531+
71532+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
71533+ p->role->rolename, gr_task_roletype_to_char(p),
71534+ p->acl->filename);
71535+}
71536+
71537+int
71538+gr_handle_ptrace(struct task_struct *task, const long request)
71539+{
71540+ struct task_struct *tmp = task;
71541+ struct task_struct *curtemp = current;
71542+ __u32 retmode;
71543+
71544+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71545+ if (unlikely(!(gr_status & GR_READY)))
71546+ return 0;
71547+#endif
71548+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71549+ read_lock(&tasklist_lock);
71550+ while (task_pid_nr(tmp) > 0) {
71551+ if (tmp == curtemp)
71552+ break;
71553+ tmp = tmp->real_parent;
71554+ }
71555+
71556+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71557+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
71558+ read_unlock(&tasklist_lock);
71559+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71560+ return 1;
71561+ }
71562+ read_unlock(&tasklist_lock);
71563+ }
71564+
71565+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71566+ if (!(gr_status & GR_READY))
71567+ return 0;
71568+#endif
71569+
71570+ read_lock(&grsec_exec_file_lock);
71571+ if (unlikely(!task->exec_file)) {
71572+ read_unlock(&grsec_exec_file_lock);
71573+ return 0;
71574+ }
71575+
71576+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
71577+ read_unlock(&grsec_exec_file_lock);
71578+
71579+ if (retmode & GR_NOPTRACE) {
71580+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71581+ return 1;
71582+ }
71583+
71584+ if (retmode & GR_PTRACERD) {
71585+ switch (request) {
71586+ case PTRACE_SEIZE:
71587+ case PTRACE_POKETEXT:
71588+ case PTRACE_POKEDATA:
71589+ case PTRACE_POKEUSR:
71590+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
71591+ case PTRACE_SETREGS:
71592+ case PTRACE_SETFPREGS:
71593+#endif
71594+#ifdef CONFIG_X86
71595+ case PTRACE_SETFPXREGS:
71596+#endif
71597+#ifdef CONFIG_ALTIVEC
71598+ case PTRACE_SETVRREGS:
71599+#endif
71600+ return 1;
71601+ default:
71602+ return 0;
71603+ }
71604+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
71605+ !(current->role->roletype & GR_ROLE_GOD) &&
71606+ (current->acl != task->acl)) {
71607+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71608+ return 1;
71609+ }
71610+
71611+ return 0;
71612+}
71613+
71614+static int is_writable_mmap(const struct file *filp)
71615+{
71616+ struct task_struct *task = current;
71617+ struct acl_object_label *obj, *obj2;
71618+
71619+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71620+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
71621+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71622+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
71623+ task->role->root_label);
71624+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
71625+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
71626+ return 1;
71627+ }
71628+ }
71629+ return 0;
71630+}
71631+
71632+int
71633+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
71634+{
71635+ __u32 mode;
71636+
71637+ if (unlikely(!file || !(prot & PROT_EXEC)))
71638+ return 1;
71639+
71640+ if (is_writable_mmap(file))
71641+ return 0;
71642+
71643+ mode =
71644+ gr_search_file(file->f_path.dentry,
71645+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71646+ file->f_path.mnt);
71647+
71648+ if (!gr_tpe_allow(file))
71649+ return 0;
71650+
71651+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71652+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71653+ return 0;
71654+ } else if (unlikely(!(mode & GR_EXEC))) {
71655+ return 0;
71656+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71657+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71658+ return 1;
71659+ }
71660+
71661+ return 1;
71662+}
71663+
71664+int
71665+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
71666+{
71667+ __u32 mode;
71668+
71669+ if (unlikely(!file || !(prot & PROT_EXEC)))
71670+ return 1;
71671+
71672+ if (is_writable_mmap(file))
71673+ return 0;
71674+
71675+ mode =
71676+ gr_search_file(file->f_path.dentry,
71677+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71678+ file->f_path.mnt);
71679+
71680+ if (!gr_tpe_allow(file))
71681+ return 0;
71682+
71683+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71684+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71685+ return 0;
71686+ } else if (unlikely(!(mode & GR_EXEC))) {
71687+ return 0;
71688+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71689+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71690+ return 1;
71691+ }
71692+
71693+ return 1;
71694+}
71695+
71696+void
71697+gr_acl_handle_psacct(struct task_struct *task, const long code)
71698+{
71699+ unsigned long runtime, cputime;
71700+ cputime_t utime, stime;
71701+ unsigned int wday, cday;
71702+ __u8 whr, chr;
71703+ __u8 wmin, cmin;
71704+ __u8 wsec, csec;
71705+ struct timespec curtime, starttime;
71706+
71707+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
71708+ !(task->acl->mode & GR_PROCACCT)))
71709+ return;
71710+
71711+ curtime = ns_to_timespec(ktime_get_ns());
71712+ starttime = ns_to_timespec(task->start_time);
71713+ runtime = curtime.tv_sec - starttime.tv_sec;
71714+ wday = runtime / (60 * 60 * 24);
71715+ runtime -= wday * (60 * 60 * 24);
71716+ whr = runtime / (60 * 60);
71717+ runtime -= whr * (60 * 60);
71718+ wmin = runtime / 60;
71719+ runtime -= wmin * 60;
71720+ wsec = runtime;
71721+
71722+ task_cputime(task, &utime, &stime);
71723+ cputime = cputime_to_secs(utime + stime);
71724+ cday = cputime / (60 * 60 * 24);
71725+ cputime -= cday * (60 * 60 * 24);
71726+ chr = cputime / (60 * 60);
71727+ cputime -= chr * (60 * 60);
71728+ cmin = cputime / 60;
71729+ cputime -= cmin * 60;
71730+ csec = cputime;
71731+
71732+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
71733+
71734+ return;
71735+}
71736+
71737+#ifdef CONFIG_TASKSTATS
71738+int gr_is_taskstats_denied(int pid)
71739+{
71740+ struct task_struct *task;
71741+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71742+ const struct cred *cred;
71743+#endif
71744+ int ret = 0;
71745+
71746+ /* restrict taskstats viewing to un-chrooted root users
71747+ who have the 'view' subject flag if the RBAC system is enabled
71748+ */
71749+
71750+ rcu_read_lock();
71751+ read_lock(&tasklist_lock);
71752+ task = find_task_by_vpid(pid);
71753+ if (task) {
71754+#ifdef CONFIG_GRKERNSEC_CHROOT
71755+ if (proc_is_chrooted(task))
71756+ ret = -EACCES;
71757+#endif
71758+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71759+ cred = __task_cred(task);
71760+#ifdef CONFIG_GRKERNSEC_PROC_USER
71761+ if (gr_is_global_nonroot(cred->uid))
71762+ ret = -EACCES;
71763+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71764+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
71765+ ret = -EACCES;
71766+#endif
71767+#endif
71768+ if (gr_status & GR_READY) {
71769+ if (!(task->acl->mode & GR_VIEW))
71770+ ret = -EACCES;
71771+ }
71772+ } else
71773+ ret = -ENOENT;
71774+
71775+ read_unlock(&tasklist_lock);
71776+ rcu_read_unlock();
71777+
71778+ return ret;
71779+}
71780+#endif
71781+
71782+/* AUXV entries are filled via a descendant of search_binary_handler
71783+ after we've already applied the subject for the target
71784+*/
71785+int gr_acl_enable_at_secure(void)
71786+{
71787+ if (unlikely(!(gr_status & GR_READY)))
71788+ return 0;
71789+
71790+ if (current->acl->mode & GR_ATSECURE)
71791+ return 1;
71792+
71793+ return 0;
71794+}
71795+
71796+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
71797+{
71798+ struct task_struct *task = current;
71799+ struct dentry *dentry = file->f_path.dentry;
71800+ struct vfsmount *mnt = file->f_path.mnt;
71801+ struct acl_object_label *obj, *tmp;
71802+ struct acl_subject_label *subj;
71803+ unsigned int bufsize;
71804+ int is_not_root;
71805+ char *path;
71806+ dev_t dev = __get_dev(dentry);
71807+
71808+ if (unlikely(!(gr_status & GR_READY)))
71809+ return 1;
71810+
71811+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71812+ return 1;
71813+
71814+ /* ignore Eric Biederman */
71815+ if (IS_PRIVATE(dentry->d_inode))
71816+ return 1;
71817+
71818+ subj = task->acl;
71819+ read_lock(&gr_inode_lock);
71820+ do {
71821+ obj = lookup_acl_obj_label(ino, dev, subj);
71822+ if (obj != NULL) {
71823+ read_unlock(&gr_inode_lock);
71824+ return (obj->mode & GR_FIND) ? 1 : 0;
71825+ }
71826+ } while ((subj = subj->parent_subject));
71827+ read_unlock(&gr_inode_lock);
71828+
71829+ /* this is purely an optimization since we're looking for an object
71830+ for the directory we're doing a readdir on
71831+ if it's possible for any globbed object to match the entry we're
71832+ filling into the directory, then the object we find here will be
71833+ an anchor point with attached globbed objects
71834+ */
71835+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
71836+ if (obj->globbed == NULL)
71837+ return (obj->mode & GR_FIND) ? 1 : 0;
71838+
71839+ is_not_root = ((obj->filename[0] == '/') &&
71840+ (obj->filename[1] == '\0')) ? 0 : 1;
71841+ bufsize = PAGE_SIZE - namelen - is_not_root;
71842+
71843+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
71844+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
71845+ return 1;
71846+
71847+ preempt_disable();
71848+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
71849+ bufsize);
71850+
71851+ bufsize = strlen(path);
71852+
71853+ /* if base is "/", don't append an additional slash */
71854+ if (is_not_root)
71855+ *(path + bufsize) = '/';
71856+ memcpy(path + bufsize + is_not_root, name, namelen);
71857+ *(path + bufsize + namelen + is_not_root) = '\0';
71858+
71859+ tmp = obj->globbed;
71860+ while (tmp) {
71861+ if (!glob_match(tmp->filename, path)) {
71862+ preempt_enable();
71863+ return (tmp->mode & GR_FIND) ? 1 : 0;
71864+ }
71865+ tmp = tmp->next;
71866+ }
71867+ preempt_enable();
71868+ return (obj->mode & GR_FIND) ? 1 : 0;
71869+}
71870+
71871+void gr_put_exec_file(struct task_struct *task)
71872+{
71873+ struct file *filp;
71874+
71875+ write_lock(&grsec_exec_file_lock);
71876+ filp = task->exec_file;
71877+ task->exec_file = NULL;
71878+ write_unlock(&grsec_exec_file_lock);
71879+
71880+ if (filp)
71881+ fput(filp);
71882+
71883+ return;
71884+}
71885+
71886+
71887+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
71888+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
71889+#endif
71890+#ifdef CONFIG_SECURITY
71891+EXPORT_SYMBOL_GPL(gr_check_user_change);
71892+EXPORT_SYMBOL_GPL(gr_check_group_change);
71893+#endif
71894+
71895diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
71896new file mode 100644
71897index 0000000..18ffbbd
71898--- /dev/null
71899+++ b/grsecurity/gracl_alloc.c
71900@@ -0,0 +1,105 @@
71901+#include <linux/kernel.h>
71902+#include <linux/mm.h>
71903+#include <linux/slab.h>
71904+#include <linux/vmalloc.h>
71905+#include <linux/gracl.h>
71906+#include <linux/grsecurity.h>
71907+
71908+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
71909+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
71910+
71911+static __inline__ int
71912+alloc_pop(void)
71913+{
71914+ if (current_alloc_state->alloc_stack_next == 1)
71915+ return 0;
71916+
71917+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
71918+
71919+ current_alloc_state->alloc_stack_next--;
71920+
71921+ return 1;
71922+}
71923+
71924+static __inline__ int
71925+alloc_push(void *buf)
71926+{
71927+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
71928+ return 1;
71929+
71930+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
71931+
71932+ current_alloc_state->alloc_stack_next++;
71933+
71934+ return 0;
71935+}
71936+
71937+void *
71938+acl_alloc(unsigned long len)
71939+{
71940+ void *ret = NULL;
71941+
71942+ if (!len || len > PAGE_SIZE)
71943+ goto out;
71944+
71945+ ret = kmalloc(len, GFP_KERNEL);
71946+
71947+ if (ret) {
71948+ if (alloc_push(ret)) {
71949+ kfree(ret);
71950+ ret = NULL;
71951+ }
71952+ }
71953+
71954+out:
71955+ return ret;
71956+}
71957+
71958+void *
71959+acl_alloc_num(unsigned long num, unsigned long len)
71960+{
71961+ if (!len || (num > (PAGE_SIZE / len)))
71962+ return NULL;
71963+
71964+ return acl_alloc(num * len);
71965+}
71966+
71967+void
71968+acl_free_all(void)
71969+{
71970+ if (!current_alloc_state->alloc_stack)
71971+ return;
71972+
71973+ while (alloc_pop()) ;
71974+
71975+ if (current_alloc_state->alloc_stack) {
71976+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
71977+ kfree(current_alloc_state->alloc_stack);
71978+ else
71979+ vfree(current_alloc_state->alloc_stack);
71980+ }
71981+
71982+ current_alloc_state->alloc_stack = NULL;
71983+ current_alloc_state->alloc_stack_size = 1;
71984+ current_alloc_state->alloc_stack_next = 1;
71985+
71986+ return;
71987+}
71988+
71989+int
71990+acl_alloc_stack_init(unsigned long size)
71991+{
71992+ if ((size * sizeof (void *)) <= PAGE_SIZE)
71993+ current_alloc_state->alloc_stack =
71994+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
71995+ else
71996+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
71997+
71998+ current_alloc_state->alloc_stack_size = size;
71999+ current_alloc_state->alloc_stack_next = 1;
72000+
72001+ if (!current_alloc_state->alloc_stack)
72002+ return 0;
72003+ else
72004+ return 1;
72005+}
72006diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72007new file mode 100644
72008index 0000000..1a94c11
72009--- /dev/null
72010+++ b/grsecurity/gracl_cap.c
72011@@ -0,0 +1,127 @@
72012+#include <linux/kernel.h>
72013+#include <linux/module.h>
72014+#include <linux/sched.h>
72015+#include <linux/gracl.h>
72016+#include <linux/grsecurity.h>
72017+#include <linux/grinternal.h>
72018+
72019+extern const char *captab_log[];
72020+extern int captab_log_entries;
72021+
72022+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72023+{
72024+ struct acl_subject_label *curracl;
72025+
72026+ if (!gr_acl_is_enabled())
72027+ return 1;
72028+
72029+ curracl = task->acl;
72030+
72031+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72032+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72033+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72034+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72035+ gr_to_filename(task->exec_file->f_path.dentry,
72036+ task->exec_file->f_path.mnt) : curracl->filename,
72037+ curracl->filename, 0UL,
72038+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72039+ return 1;
72040+ }
72041+
72042+ return 0;
72043+}
72044+
72045+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72046+{
72047+ struct acl_subject_label *curracl;
72048+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72049+ kernel_cap_t cap_audit = __cap_empty_set;
72050+
72051+ if (!gr_acl_is_enabled())
72052+ return 1;
72053+
72054+ curracl = task->acl;
72055+
72056+ cap_drop = curracl->cap_lower;
72057+ cap_mask = curracl->cap_mask;
72058+ cap_audit = curracl->cap_invert_audit;
72059+
72060+ while ((curracl = curracl->parent_subject)) {
72061+ /* if the cap isn't specified in the current computed mask but is specified in the
72062+ current level subject, and is lowered in the current level subject, then add
72063+ it to the set of dropped capabilities
72064+ otherwise, add the current level subject's mask to the current computed mask
72065+ */
72066+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72067+ cap_raise(cap_mask, cap);
72068+ if (cap_raised(curracl->cap_lower, cap))
72069+ cap_raise(cap_drop, cap);
72070+ if (cap_raised(curracl->cap_invert_audit, cap))
72071+ cap_raise(cap_audit, cap);
72072+ }
72073+ }
72074+
72075+ if (!cap_raised(cap_drop, cap)) {
72076+ if (cap_raised(cap_audit, cap))
72077+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72078+ return 1;
72079+ }
72080+
72081+ /* only learn the capability use if the process has the capability in the
72082+ general case, the two uses in sys.c of gr_learn_cap are an exception
72083+ to this rule to ensure any role transition involves what the full-learned
72084+ policy believes in a privileged process
72085+ */
72086+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72087+ return 1;
72088+
72089+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72090+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72091+
72092+ return 0;
72093+}
72094+
72095+int
72096+gr_acl_is_capable(const int cap)
72097+{
72098+ return gr_task_acl_is_capable(current, current_cred(), cap);
72099+}
72100+
72101+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72102+{
72103+ struct acl_subject_label *curracl;
72104+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72105+
72106+ if (!gr_acl_is_enabled())
72107+ return 1;
72108+
72109+ curracl = task->acl;
72110+
72111+ cap_drop = curracl->cap_lower;
72112+ cap_mask = curracl->cap_mask;
72113+
72114+ while ((curracl = curracl->parent_subject)) {
72115+ /* if the cap isn't specified in the current computed mask but is specified in the
72116+ current level subject, and is lowered in the current level subject, then add
72117+ it to the set of dropped capabilities
72118+ otherwise, add the current level subject's mask to the current computed mask
72119+ */
72120+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72121+ cap_raise(cap_mask, cap);
72122+ if (cap_raised(curracl->cap_lower, cap))
72123+ cap_raise(cap_drop, cap);
72124+ }
72125+ }
72126+
72127+ if (!cap_raised(cap_drop, cap))
72128+ return 1;
72129+
72130+ return 0;
72131+}
72132+
72133+int
72134+gr_acl_is_capable_nolog(const int cap)
72135+{
72136+ return gr_task_acl_is_capable_nolog(current, cap);
72137+}
72138+
72139diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72140new file mode 100644
72141index 0000000..a43dd06
72142--- /dev/null
72143+++ b/grsecurity/gracl_compat.c
72144@@ -0,0 +1,269 @@
72145+#include <linux/kernel.h>
72146+#include <linux/gracl.h>
72147+#include <linux/compat.h>
72148+#include <linux/gracl_compat.h>
72149+
72150+#include <asm/uaccess.h>
72151+
72152+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72153+{
72154+ struct gr_arg_wrapper_compat uwrapcompat;
72155+
72156+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72157+ return -EFAULT;
72158+
72159+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
72160+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72161+ return -EINVAL;
72162+
72163+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72164+ uwrap->version = uwrapcompat.version;
72165+ uwrap->size = sizeof(struct gr_arg);
72166+
72167+ return 0;
72168+}
72169+
72170+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72171+{
72172+ struct gr_arg_compat argcompat;
72173+
72174+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72175+ return -EFAULT;
72176+
72177+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72178+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72179+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72180+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72181+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72182+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72183+
72184+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72185+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72186+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72187+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72188+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72189+ arg->segv_device = argcompat.segv_device;
72190+ arg->segv_inode = argcompat.segv_inode;
72191+ arg->segv_uid = argcompat.segv_uid;
72192+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72193+ arg->mode = argcompat.mode;
72194+
72195+ return 0;
72196+}
72197+
72198+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72199+{
72200+ struct acl_object_label_compat objcompat;
72201+
72202+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72203+ return -EFAULT;
72204+
72205+ obj->filename = compat_ptr(objcompat.filename);
72206+ obj->inode = objcompat.inode;
72207+ obj->device = objcompat.device;
72208+ obj->mode = objcompat.mode;
72209+
72210+ obj->nested = compat_ptr(objcompat.nested);
72211+ obj->globbed = compat_ptr(objcompat.globbed);
72212+
72213+ obj->prev = compat_ptr(objcompat.prev);
72214+ obj->next = compat_ptr(objcompat.next);
72215+
72216+ return 0;
72217+}
72218+
72219+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72220+{
72221+ unsigned int i;
72222+ struct acl_subject_label_compat subjcompat;
72223+
72224+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72225+ return -EFAULT;
72226+
72227+ subj->filename = compat_ptr(subjcompat.filename);
72228+ subj->inode = subjcompat.inode;
72229+ subj->device = subjcompat.device;
72230+ subj->mode = subjcompat.mode;
72231+ subj->cap_mask = subjcompat.cap_mask;
72232+ subj->cap_lower = subjcompat.cap_lower;
72233+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72234+
72235+ for (i = 0; i < GR_NLIMITS; i++) {
72236+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72237+ subj->res[i].rlim_cur = RLIM_INFINITY;
72238+ else
72239+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72240+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72241+ subj->res[i].rlim_max = RLIM_INFINITY;
72242+ else
72243+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72244+ }
72245+ subj->resmask = subjcompat.resmask;
72246+
72247+ subj->user_trans_type = subjcompat.user_trans_type;
72248+ subj->group_trans_type = subjcompat.group_trans_type;
72249+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72250+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72251+ subj->user_trans_num = subjcompat.user_trans_num;
72252+ subj->group_trans_num = subjcompat.group_trans_num;
72253+
72254+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72255+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72256+ subj->ip_type = subjcompat.ip_type;
72257+ subj->ips = compat_ptr(subjcompat.ips);
72258+ subj->ip_num = subjcompat.ip_num;
72259+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72260+
72261+ subj->crashes = subjcompat.crashes;
72262+ subj->expires = subjcompat.expires;
72263+
72264+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72265+ subj->hash = compat_ptr(subjcompat.hash);
72266+ subj->prev = compat_ptr(subjcompat.prev);
72267+ subj->next = compat_ptr(subjcompat.next);
72268+
72269+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72270+ subj->obj_hash_size = subjcompat.obj_hash_size;
72271+ subj->pax_flags = subjcompat.pax_flags;
72272+
72273+ return 0;
72274+}
72275+
72276+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72277+{
72278+ struct acl_role_label_compat rolecompat;
72279+
72280+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72281+ return -EFAULT;
72282+
72283+ role->rolename = compat_ptr(rolecompat.rolename);
72284+ role->uidgid = rolecompat.uidgid;
72285+ role->roletype = rolecompat.roletype;
72286+
72287+ role->auth_attempts = rolecompat.auth_attempts;
72288+ role->expires = rolecompat.expires;
72289+
72290+ role->root_label = compat_ptr(rolecompat.root_label);
72291+ role->hash = compat_ptr(rolecompat.hash);
72292+
72293+ role->prev = compat_ptr(rolecompat.prev);
72294+ role->next = compat_ptr(rolecompat.next);
72295+
72296+ role->transitions = compat_ptr(rolecompat.transitions);
72297+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72298+ role->domain_children = compat_ptr(rolecompat.domain_children);
72299+ role->domain_child_num = rolecompat.domain_child_num;
72300+
72301+ role->umask = rolecompat.umask;
72302+
72303+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72304+ role->subj_hash_size = rolecompat.subj_hash_size;
72305+
72306+ return 0;
72307+}
72308+
72309+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72310+{
72311+ struct role_allowed_ip_compat roleip_compat;
72312+
72313+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72314+ return -EFAULT;
72315+
72316+ roleip->addr = roleip_compat.addr;
72317+ roleip->netmask = roleip_compat.netmask;
72318+
72319+ roleip->prev = compat_ptr(roleip_compat.prev);
72320+ roleip->next = compat_ptr(roleip_compat.next);
72321+
72322+ return 0;
72323+}
72324+
72325+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72326+{
72327+ struct role_transition_compat trans_compat;
72328+
72329+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72330+ return -EFAULT;
72331+
72332+ trans->rolename = compat_ptr(trans_compat.rolename);
72333+
72334+ trans->prev = compat_ptr(trans_compat.prev);
72335+ trans->next = compat_ptr(trans_compat.next);
72336+
72337+ return 0;
72338+
72339+}
72340+
72341+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72342+{
72343+ struct gr_hash_struct_compat hash_compat;
72344+
72345+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
72346+ return -EFAULT;
72347+
72348+ hash->table = compat_ptr(hash_compat.table);
72349+ hash->nametable = compat_ptr(hash_compat.nametable);
72350+ hash->first = compat_ptr(hash_compat.first);
72351+
72352+ hash->table_size = hash_compat.table_size;
72353+ hash->used_size = hash_compat.used_size;
72354+
72355+ hash->type = hash_compat.type;
72356+
72357+ return 0;
72358+}
72359+
72360+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
72361+{
72362+ compat_uptr_t ptrcompat;
72363+
72364+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
72365+ return -EFAULT;
72366+
72367+ *(void **)ptr = compat_ptr(ptrcompat);
72368+
72369+ return 0;
72370+}
72371+
72372+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
72373+{
72374+ struct acl_ip_label_compat ip_compat;
72375+
72376+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
72377+ return -EFAULT;
72378+
72379+ ip->iface = compat_ptr(ip_compat.iface);
72380+ ip->addr = ip_compat.addr;
72381+ ip->netmask = ip_compat.netmask;
72382+ ip->low = ip_compat.low;
72383+ ip->high = ip_compat.high;
72384+ ip->mode = ip_compat.mode;
72385+ ip->type = ip_compat.type;
72386+
72387+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
72388+
72389+ ip->prev = compat_ptr(ip_compat.prev);
72390+ ip->next = compat_ptr(ip_compat.next);
72391+
72392+ return 0;
72393+}
72394+
72395+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
72396+{
72397+ struct sprole_pw_compat pw_compat;
72398+
72399+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
72400+ return -EFAULT;
72401+
72402+ pw->rolename = compat_ptr(pw_compat.rolename);
72403+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
72404+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
72405+
72406+ return 0;
72407+}
72408+
72409+size_t get_gr_arg_wrapper_size_compat(void)
72410+{
72411+ return sizeof(struct gr_arg_wrapper_compat);
72412+}
72413+
72414diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
72415new file mode 100644
72416index 0000000..8ee8e4f
72417--- /dev/null
72418+++ b/grsecurity/gracl_fs.c
72419@@ -0,0 +1,447 @@
72420+#include <linux/kernel.h>
72421+#include <linux/sched.h>
72422+#include <linux/types.h>
72423+#include <linux/fs.h>
72424+#include <linux/file.h>
72425+#include <linux/stat.h>
72426+#include <linux/grsecurity.h>
72427+#include <linux/grinternal.h>
72428+#include <linux/gracl.h>
72429+
72430+umode_t
72431+gr_acl_umask(void)
72432+{
72433+ if (unlikely(!gr_acl_is_enabled()))
72434+ return 0;
72435+
72436+ return current->role->umask;
72437+}
72438+
72439+__u32
72440+gr_acl_handle_hidden_file(const struct dentry * dentry,
72441+ const struct vfsmount * mnt)
72442+{
72443+ __u32 mode;
72444+
72445+ if (unlikely(d_is_negative(dentry)))
72446+ return GR_FIND;
72447+
72448+ mode =
72449+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
72450+
72451+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
72452+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72453+ return mode;
72454+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
72455+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72456+ return 0;
72457+ } else if (unlikely(!(mode & GR_FIND)))
72458+ return 0;
72459+
72460+ return GR_FIND;
72461+}
72462+
72463+__u32
72464+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72465+ int acc_mode)
72466+{
72467+ __u32 reqmode = GR_FIND;
72468+ __u32 mode;
72469+
72470+ if (unlikely(d_is_negative(dentry)))
72471+ return reqmode;
72472+
72473+ if (acc_mode & MAY_APPEND)
72474+ reqmode |= GR_APPEND;
72475+ else if (acc_mode & MAY_WRITE)
72476+ reqmode |= GR_WRITE;
72477+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
72478+ reqmode |= GR_READ;
72479+
72480+ mode =
72481+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72482+ mnt);
72483+
72484+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72485+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72486+ reqmode & GR_READ ? " reading" : "",
72487+ reqmode & GR_WRITE ? " writing" : reqmode &
72488+ GR_APPEND ? " appending" : "");
72489+ return reqmode;
72490+ } else
72491+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72492+ {
72493+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72494+ reqmode & GR_READ ? " reading" : "",
72495+ reqmode & GR_WRITE ? " writing" : reqmode &
72496+ GR_APPEND ? " appending" : "");
72497+ return 0;
72498+ } else if (unlikely((mode & reqmode) != reqmode))
72499+ return 0;
72500+
72501+ return reqmode;
72502+}
72503+
72504+__u32
72505+gr_acl_handle_creat(const struct dentry * dentry,
72506+ const struct dentry * p_dentry,
72507+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72508+ const int imode)
72509+{
72510+ __u32 reqmode = GR_WRITE | GR_CREATE;
72511+ __u32 mode;
72512+
72513+ if (acc_mode & MAY_APPEND)
72514+ reqmode |= GR_APPEND;
72515+ // if a directory was required or the directory already exists, then
72516+ // don't count this open as a read
72517+ if ((acc_mode & MAY_READ) &&
72518+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
72519+ reqmode |= GR_READ;
72520+ if ((open_flags & O_CREAT) &&
72521+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72522+ reqmode |= GR_SETID;
72523+
72524+ mode =
72525+ gr_check_create(dentry, p_dentry, p_mnt,
72526+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72527+
72528+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72529+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72530+ reqmode & GR_READ ? " reading" : "",
72531+ reqmode & GR_WRITE ? " writing" : reqmode &
72532+ GR_APPEND ? " appending" : "");
72533+ return reqmode;
72534+ } else
72535+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72536+ {
72537+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72538+ reqmode & GR_READ ? " reading" : "",
72539+ reqmode & GR_WRITE ? " writing" : reqmode &
72540+ GR_APPEND ? " appending" : "");
72541+ return 0;
72542+ } else if (unlikely((mode & reqmode) != reqmode))
72543+ return 0;
72544+
72545+ return reqmode;
72546+}
72547+
72548+__u32
72549+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
72550+ const int fmode)
72551+{
72552+ __u32 mode, reqmode = GR_FIND;
72553+
72554+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
72555+ reqmode |= GR_EXEC;
72556+ if (fmode & S_IWOTH)
72557+ reqmode |= GR_WRITE;
72558+ if (fmode & S_IROTH)
72559+ reqmode |= GR_READ;
72560+
72561+ mode =
72562+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72563+ mnt);
72564+
72565+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72566+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72567+ reqmode & GR_READ ? " reading" : "",
72568+ reqmode & GR_WRITE ? " writing" : "",
72569+ reqmode & GR_EXEC ? " executing" : "");
72570+ return reqmode;
72571+ } else
72572+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72573+ {
72574+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72575+ reqmode & GR_READ ? " reading" : "",
72576+ reqmode & GR_WRITE ? " writing" : "",
72577+ reqmode & GR_EXEC ? " executing" : "");
72578+ return 0;
72579+ } else if (unlikely((mode & reqmode) != reqmode))
72580+ return 0;
72581+
72582+ return reqmode;
72583+}
72584+
72585+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
72586+{
72587+ __u32 mode;
72588+
72589+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
72590+
72591+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72592+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
72593+ return mode;
72594+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72595+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
72596+ return 0;
72597+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72598+ return 0;
72599+
72600+ return (reqmode);
72601+}
72602+
72603+__u32
72604+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72605+{
72606+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
72607+}
72608+
72609+__u32
72610+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
72611+{
72612+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
72613+}
72614+
72615+__u32
72616+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
72617+{
72618+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
72619+}
72620+
72621+__u32
72622+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
72623+{
72624+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
72625+}
72626+
72627+__u32
72628+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
72629+ umode_t *modeptr)
72630+{
72631+ umode_t mode;
72632+
72633+ *modeptr &= ~gr_acl_umask();
72634+ mode = *modeptr;
72635+
72636+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
72637+ return 1;
72638+
72639+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
72640+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
72641+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
72642+ GR_CHMOD_ACL_MSG);
72643+ } else {
72644+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
72645+ }
72646+}
72647+
72648+__u32
72649+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
72650+{
72651+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
72652+}
72653+
72654+__u32
72655+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
72656+{
72657+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
72658+}
72659+
72660+__u32
72661+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
72662+{
72663+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
72664+}
72665+
72666+__u32
72667+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
72668+{
72669+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
72670+}
72671+
72672+__u32
72673+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
72674+{
72675+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
72676+ GR_UNIXCONNECT_ACL_MSG);
72677+}
72678+
72679+/* hardlinks require at minimum create and link permission,
72680+ any additional privilege required is based on the
72681+ privilege of the file being linked to
72682+*/
72683+__u32
72684+gr_acl_handle_link(const struct dentry * new_dentry,
72685+ const struct dentry * parent_dentry,
72686+ const struct vfsmount * parent_mnt,
72687+ const struct dentry * old_dentry,
72688+ const struct vfsmount * old_mnt, const struct filename *to)
72689+{
72690+ __u32 mode;
72691+ __u32 needmode = GR_CREATE | GR_LINK;
72692+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
72693+
72694+ mode =
72695+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
72696+ old_mnt);
72697+
72698+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
72699+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72700+ return mode;
72701+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72702+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72703+ return 0;
72704+ } else if (unlikely((mode & needmode) != needmode))
72705+ return 0;
72706+
72707+ return 1;
72708+}
72709+
72710+__u32
72711+gr_acl_handle_symlink(const struct dentry * new_dentry,
72712+ const struct dentry * parent_dentry,
72713+ const struct vfsmount * parent_mnt, const struct filename *from)
72714+{
72715+ __u32 needmode = GR_WRITE | GR_CREATE;
72716+ __u32 mode;
72717+
72718+ mode =
72719+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
72720+ GR_CREATE | GR_AUDIT_CREATE |
72721+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
72722+
72723+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
72724+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72725+ return mode;
72726+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72727+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72728+ return 0;
72729+ } else if (unlikely((mode & needmode) != needmode))
72730+ return 0;
72731+
72732+ return (GR_WRITE | GR_CREATE);
72733+}
72734+
72735+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
72736+{
72737+ __u32 mode;
72738+
72739+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72740+
72741+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72742+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
72743+ return mode;
72744+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72745+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
72746+ return 0;
72747+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72748+ return 0;
72749+
72750+ return (reqmode);
72751+}
72752+
72753+__u32
72754+gr_acl_handle_mknod(const struct dentry * new_dentry,
72755+ const struct dentry * parent_dentry,
72756+ const struct vfsmount * parent_mnt,
72757+ const int mode)
72758+{
72759+ __u32 reqmode = GR_WRITE | GR_CREATE;
72760+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72761+ reqmode |= GR_SETID;
72762+
72763+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72764+ reqmode, GR_MKNOD_ACL_MSG);
72765+}
72766+
72767+__u32
72768+gr_acl_handle_mkdir(const struct dentry *new_dentry,
72769+ const struct dentry *parent_dentry,
72770+ const struct vfsmount *parent_mnt)
72771+{
72772+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72773+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
72774+}
72775+
72776+#define RENAME_CHECK_SUCCESS(old, new) \
72777+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
72778+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
72779+
72780+int
72781+gr_acl_handle_rename(struct dentry *new_dentry,
72782+ struct dentry *parent_dentry,
72783+ const struct vfsmount *parent_mnt,
72784+ struct dentry *old_dentry,
72785+ struct inode *old_parent_inode,
72786+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
72787+{
72788+ __u32 comp1, comp2;
72789+ int error = 0;
72790+
72791+ if (unlikely(!gr_acl_is_enabled()))
72792+ return 0;
72793+
72794+ if (flags & RENAME_EXCHANGE) {
72795+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72796+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72797+ GR_SUPPRESS, parent_mnt);
72798+ comp2 =
72799+ gr_search_file(old_dentry,
72800+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72801+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72802+ } else if (d_is_negative(new_dentry)) {
72803+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
72804+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
72805+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
72806+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
72807+ GR_DELETE | GR_AUDIT_DELETE |
72808+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72809+ GR_SUPPRESS, old_mnt);
72810+ } else {
72811+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72812+ GR_CREATE | GR_DELETE |
72813+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
72814+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72815+ GR_SUPPRESS, parent_mnt);
72816+ comp2 =
72817+ gr_search_file(old_dentry,
72818+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72819+ GR_DELETE | GR_AUDIT_DELETE |
72820+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72821+ }
72822+
72823+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
72824+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
72825+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72826+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
72827+ && !(comp2 & GR_SUPPRESS)) {
72828+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72829+ error = -EACCES;
72830+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
72831+ error = -EACCES;
72832+
72833+ return error;
72834+}
72835+
72836+void
72837+gr_acl_handle_exit(void)
72838+{
72839+ u16 id;
72840+ char *rolename;
72841+
72842+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
72843+ !(current->role->roletype & GR_ROLE_PERSIST))) {
72844+ id = current->acl_role_id;
72845+ rolename = current->role->rolename;
72846+ gr_set_acls(1);
72847+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
72848+ }
72849+
72850+ gr_put_exec_file(current);
72851+ return;
72852+}
72853+
72854+int
72855+gr_acl_handle_procpidmem(const struct task_struct *task)
72856+{
72857+ if (unlikely(!gr_acl_is_enabled()))
72858+ return 0;
72859+
72860+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
72861+ !(current->acl->mode & GR_POVERRIDE) &&
72862+ !(current->role->roletype & GR_ROLE_GOD))
72863+ return -EACCES;
72864+
72865+ return 0;
72866+}
72867diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
72868new file mode 100644
72869index 0000000..f056b81
72870--- /dev/null
72871+++ b/grsecurity/gracl_ip.c
72872@@ -0,0 +1,386 @@
72873+#include <linux/kernel.h>
72874+#include <asm/uaccess.h>
72875+#include <asm/errno.h>
72876+#include <net/sock.h>
72877+#include <linux/file.h>
72878+#include <linux/fs.h>
72879+#include <linux/net.h>
72880+#include <linux/in.h>
72881+#include <linux/skbuff.h>
72882+#include <linux/ip.h>
72883+#include <linux/udp.h>
72884+#include <linux/types.h>
72885+#include <linux/sched.h>
72886+#include <linux/netdevice.h>
72887+#include <linux/inetdevice.h>
72888+#include <linux/gracl.h>
72889+#include <linux/grsecurity.h>
72890+#include <linux/grinternal.h>
72891+
72892+#define GR_BIND 0x01
72893+#define GR_CONNECT 0x02
72894+#define GR_INVERT 0x04
72895+#define GR_BINDOVERRIDE 0x08
72896+#define GR_CONNECTOVERRIDE 0x10
72897+#define GR_SOCK_FAMILY 0x20
72898+
72899+static const char * gr_protocols[IPPROTO_MAX] = {
72900+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
72901+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
72902+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
72903+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
72904+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
72905+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
72906+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
72907+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
72908+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
72909+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
72910+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
72911+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
72912+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
72913+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
72914+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
72915+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
72916+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
72917+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
72918+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
72919+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
72920+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
72921+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
72922+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
72923+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
72924+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
72925+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
72926+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
72927+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
72928+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
72929+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
72930+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
72931+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
72932+ };
72933+
72934+static const char * gr_socktypes[SOCK_MAX] = {
72935+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
72936+ "unknown:7", "unknown:8", "unknown:9", "packet"
72937+ };
72938+
72939+static const char * gr_sockfamilies[AF_MAX+1] = {
72940+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
72941+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
72942+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
72943+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
72944+ };
72945+
72946+const char *
72947+gr_proto_to_name(unsigned char proto)
72948+{
72949+ return gr_protocols[proto];
72950+}
72951+
72952+const char *
72953+gr_socktype_to_name(unsigned char type)
72954+{
72955+ return gr_socktypes[type];
72956+}
72957+
72958+const char *
72959+gr_sockfamily_to_name(unsigned char family)
72960+{
72961+ return gr_sockfamilies[family];
72962+}
72963+
72964+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
72965+
72966+int
72967+gr_search_socket(const int domain, const int type, const int protocol)
72968+{
72969+ struct acl_subject_label *curr;
72970+ const struct cred *cred = current_cred();
72971+
72972+ if (unlikely(!gr_acl_is_enabled()))
72973+ goto exit;
72974+
72975+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
72976+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
72977+ goto exit; // let the kernel handle it
72978+
72979+ curr = current->acl;
72980+
72981+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
72982+ /* the family is allowed, if this is PF_INET allow it only if
72983+ the extra sock type/protocol checks pass */
72984+ if (domain == PF_INET)
72985+ goto inet_check;
72986+ goto exit;
72987+ } else {
72988+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
72989+ __u32 fakeip = 0;
72990+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
72991+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
72992+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
72993+ gr_to_filename(current->exec_file->f_path.dentry,
72994+ current->exec_file->f_path.mnt) :
72995+ curr->filename, curr->filename,
72996+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
72997+ &current->signal->saved_ip);
72998+ goto exit;
72999+ }
73000+ goto exit_fail;
73001+ }
73002+
73003+inet_check:
73004+ /* the rest of this checking is for IPv4 only */
73005+ if (!curr->ips)
73006+ goto exit;
73007+
73008+ if ((curr->ip_type & (1U << type)) &&
73009+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73010+ goto exit;
73011+
73012+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73013+ /* we don't place acls on raw sockets , and sometimes
73014+ dgram/ip sockets are opened for ioctl and not
73015+ bind/connect, so we'll fake a bind learn log */
73016+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73017+ __u32 fakeip = 0;
73018+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73019+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73020+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73021+ gr_to_filename(current->exec_file->f_path.dentry,
73022+ current->exec_file->f_path.mnt) :
73023+ curr->filename, curr->filename,
73024+ &fakeip, 0, type,
73025+ protocol, GR_CONNECT, &current->signal->saved_ip);
73026+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73027+ __u32 fakeip = 0;
73028+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73029+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73030+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73031+ gr_to_filename(current->exec_file->f_path.dentry,
73032+ current->exec_file->f_path.mnt) :
73033+ curr->filename, curr->filename,
73034+ &fakeip, 0, type,
73035+ protocol, GR_BIND, &current->signal->saved_ip);
73036+ }
73037+ /* we'll log when they use connect or bind */
73038+ goto exit;
73039+ }
73040+
73041+exit_fail:
73042+ if (domain == PF_INET)
73043+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73044+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73045+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73046+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73047+ gr_socktype_to_name(type), protocol);
73048+
73049+ return 0;
73050+exit:
73051+ return 1;
73052+}
73053+
73054+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73055+{
73056+ if ((ip->mode & mode) &&
73057+ (ip_port >= ip->low) &&
73058+ (ip_port <= ip->high) &&
73059+ ((ntohl(ip_addr) & our_netmask) ==
73060+ (ntohl(our_addr) & our_netmask))
73061+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73062+ && (ip->type & (1U << type))) {
73063+ if (ip->mode & GR_INVERT)
73064+ return 2; // specifically denied
73065+ else
73066+ return 1; // allowed
73067+ }
73068+
73069+ return 0; // not specifically allowed, may continue parsing
73070+}
73071+
73072+static int
73073+gr_search_connectbind(const int full_mode, struct sock *sk,
73074+ struct sockaddr_in *addr, const int type)
73075+{
73076+ char iface[IFNAMSIZ] = {0};
73077+ struct acl_subject_label *curr;
73078+ struct acl_ip_label *ip;
73079+ struct inet_sock *isk;
73080+ struct net_device *dev;
73081+ struct in_device *idev;
73082+ unsigned long i;
73083+ int ret;
73084+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73085+ __u32 ip_addr = 0;
73086+ __u32 our_addr;
73087+ __u32 our_netmask;
73088+ char *p;
73089+ __u16 ip_port = 0;
73090+ const struct cred *cred = current_cred();
73091+
73092+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73093+ return 0;
73094+
73095+ curr = current->acl;
73096+ isk = inet_sk(sk);
73097+
73098+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73099+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73100+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73101+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73102+ struct sockaddr_in saddr;
73103+ int err;
73104+
73105+ saddr.sin_family = AF_INET;
73106+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73107+ saddr.sin_port = isk->inet_sport;
73108+
73109+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73110+ if (err)
73111+ return err;
73112+
73113+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73114+ if (err)
73115+ return err;
73116+ }
73117+
73118+ if (!curr->ips)
73119+ return 0;
73120+
73121+ ip_addr = addr->sin_addr.s_addr;
73122+ ip_port = ntohs(addr->sin_port);
73123+
73124+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73125+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73126+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73127+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73128+ gr_to_filename(current->exec_file->f_path.dentry,
73129+ current->exec_file->f_path.mnt) :
73130+ curr->filename, curr->filename,
73131+ &ip_addr, ip_port, type,
73132+ sk->sk_protocol, mode, &current->signal->saved_ip);
73133+ return 0;
73134+ }
73135+
73136+ for (i = 0; i < curr->ip_num; i++) {
73137+ ip = *(curr->ips + i);
73138+ if (ip->iface != NULL) {
73139+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73140+ p = strchr(iface, ':');
73141+ if (p != NULL)
73142+ *p = '\0';
73143+ dev = dev_get_by_name(sock_net(sk), iface);
73144+ if (dev == NULL)
73145+ continue;
73146+ idev = in_dev_get(dev);
73147+ if (idev == NULL) {
73148+ dev_put(dev);
73149+ continue;
73150+ }
73151+ rcu_read_lock();
73152+ for_ifa(idev) {
73153+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73154+ our_addr = ifa->ifa_address;
73155+ our_netmask = 0xffffffff;
73156+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73157+ if (ret == 1) {
73158+ rcu_read_unlock();
73159+ in_dev_put(idev);
73160+ dev_put(dev);
73161+ return 0;
73162+ } else if (ret == 2) {
73163+ rcu_read_unlock();
73164+ in_dev_put(idev);
73165+ dev_put(dev);
73166+ goto denied;
73167+ }
73168+ }
73169+ } endfor_ifa(idev);
73170+ rcu_read_unlock();
73171+ in_dev_put(idev);
73172+ dev_put(dev);
73173+ } else {
73174+ our_addr = ip->addr;
73175+ our_netmask = ip->netmask;
73176+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73177+ if (ret == 1)
73178+ return 0;
73179+ else if (ret == 2)
73180+ goto denied;
73181+ }
73182+ }
73183+
73184+denied:
73185+ if (mode == GR_BIND)
73186+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73187+ else if (mode == GR_CONNECT)
73188+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73189+
73190+ return -EACCES;
73191+}
73192+
73193+int
73194+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73195+{
73196+ /* always allow disconnection of dgram sockets with connect */
73197+ if (addr->sin_family == AF_UNSPEC)
73198+ return 0;
73199+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73200+}
73201+
73202+int
73203+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73204+{
73205+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73206+}
73207+
73208+int gr_search_listen(struct socket *sock)
73209+{
73210+ struct sock *sk = sock->sk;
73211+ struct sockaddr_in addr;
73212+
73213+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73214+ addr.sin_port = inet_sk(sk)->inet_sport;
73215+
73216+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73217+}
73218+
73219+int gr_search_accept(struct socket *sock)
73220+{
73221+ struct sock *sk = sock->sk;
73222+ struct sockaddr_in addr;
73223+
73224+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73225+ addr.sin_port = inet_sk(sk)->inet_sport;
73226+
73227+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73228+}
73229+
73230+int
73231+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73232+{
73233+ if (addr)
73234+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73235+ else {
73236+ struct sockaddr_in sin;
73237+ const struct inet_sock *inet = inet_sk(sk);
73238+
73239+ sin.sin_addr.s_addr = inet->inet_daddr;
73240+ sin.sin_port = inet->inet_dport;
73241+
73242+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73243+ }
73244+}
73245+
73246+int
73247+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73248+{
73249+ struct sockaddr_in sin;
73250+
73251+ if (unlikely(skb->len < sizeof (struct udphdr)))
73252+ return 0; // skip this packet
73253+
73254+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73255+ sin.sin_port = udp_hdr(skb)->source;
73256+
73257+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73258+}
73259diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73260new file mode 100644
73261index 0000000..25f54ef
73262--- /dev/null
73263+++ b/grsecurity/gracl_learn.c
73264@@ -0,0 +1,207 @@
73265+#include <linux/kernel.h>
73266+#include <linux/mm.h>
73267+#include <linux/sched.h>
73268+#include <linux/poll.h>
73269+#include <linux/string.h>
73270+#include <linux/file.h>
73271+#include <linux/types.h>
73272+#include <linux/vmalloc.h>
73273+#include <linux/grinternal.h>
73274+
73275+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73276+ size_t count, loff_t *ppos);
73277+extern int gr_acl_is_enabled(void);
73278+
73279+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73280+static int gr_learn_attached;
73281+
73282+/* use a 512k buffer */
73283+#define LEARN_BUFFER_SIZE (512 * 1024)
73284+
73285+static DEFINE_SPINLOCK(gr_learn_lock);
73286+static DEFINE_MUTEX(gr_learn_user_mutex);
73287+
73288+/* we need to maintain two buffers, so that the kernel context of grlearn
73289+ uses a semaphore around the userspace copying, and the other kernel contexts
73290+ use a spinlock when copying into the buffer, since they cannot sleep
73291+*/
73292+static char *learn_buffer;
73293+static char *learn_buffer_user;
73294+static int learn_buffer_len;
73295+static int learn_buffer_user_len;
73296+
73297+static ssize_t
73298+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73299+{
73300+ DECLARE_WAITQUEUE(wait, current);
73301+ ssize_t retval = 0;
73302+
73303+ add_wait_queue(&learn_wait, &wait);
73304+ set_current_state(TASK_INTERRUPTIBLE);
73305+ do {
73306+ mutex_lock(&gr_learn_user_mutex);
73307+ spin_lock(&gr_learn_lock);
73308+ if (learn_buffer_len)
73309+ break;
73310+ spin_unlock(&gr_learn_lock);
73311+ mutex_unlock(&gr_learn_user_mutex);
73312+ if (file->f_flags & O_NONBLOCK) {
73313+ retval = -EAGAIN;
73314+ goto out;
73315+ }
73316+ if (signal_pending(current)) {
73317+ retval = -ERESTARTSYS;
73318+ goto out;
73319+ }
73320+
73321+ schedule();
73322+ } while (1);
73323+
73324+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73325+ learn_buffer_user_len = learn_buffer_len;
73326+ retval = learn_buffer_len;
73327+ learn_buffer_len = 0;
73328+
73329+ spin_unlock(&gr_learn_lock);
73330+
73331+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73332+ retval = -EFAULT;
73333+
73334+ mutex_unlock(&gr_learn_user_mutex);
73335+out:
73336+ set_current_state(TASK_RUNNING);
73337+ remove_wait_queue(&learn_wait, &wait);
73338+ return retval;
73339+}
73340+
73341+static unsigned int
73342+poll_learn(struct file * file, poll_table * wait)
73343+{
73344+ poll_wait(file, &learn_wait, wait);
73345+
73346+ if (learn_buffer_len)
73347+ return (POLLIN | POLLRDNORM);
73348+
73349+ return 0;
73350+}
73351+
73352+void
73353+gr_clear_learn_entries(void)
73354+{
73355+ char *tmp;
73356+
73357+ mutex_lock(&gr_learn_user_mutex);
73358+ spin_lock(&gr_learn_lock);
73359+ tmp = learn_buffer;
73360+ learn_buffer = NULL;
73361+ spin_unlock(&gr_learn_lock);
73362+ if (tmp)
73363+ vfree(tmp);
73364+ if (learn_buffer_user != NULL) {
73365+ vfree(learn_buffer_user);
73366+ learn_buffer_user = NULL;
73367+ }
73368+ learn_buffer_len = 0;
73369+ mutex_unlock(&gr_learn_user_mutex);
73370+
73371+ return;
73372+}
73373+
73374+void
73375+gr_add_learn_entry(const char *fmt, ...)
73376+{
73377+ va_list args;
73378+ unsigned int len;
73379+
73380+ if (!gr_learn_attached)
73381+ return;
73382+
73383+ spin_lock(&gr_learn_lock);
73384+
73385+ /* leave a gap at the end so we know when it's "full" but don't have to
73386+ compute the exact length of the string we're trying to append
73387+ */
73388+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
73389+ spin_unlock(&gr_learn_lock);
73390+ wake_up_interruptible(&learn_wait);
73391+ return;
73392+ }
73393+ if (learn_buffer == NULL) {
73394+ spin_unlock(&gr_learn_lock);
73395+ return;
73396+ }
73397+
73398+ va_start(args, fmt);
73399+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
73400+ va_end(args);
73401+
73402+ learn_buffer_len += len + 1;
73403+
73404+ spin_unlock(&gr_learn_lock);
73405+ wake_up_interruptible(&learn_wait);
73406+
73407+ return;
73408+}
73409+
73410+static int
73411+open_learn(struct inode *inode, struct file *file)
73412+{
73413+ if (file->f_mode & FMODE_READ && gr_learn_attached)
73414+ return -EBUSY;
73415+ if (file->f_mode & FMODE_READ) {
73416+ int retval = 0;
73417+ mutex_lock(&gr_learn_user_mutex);
73418+ if (learn_buffer == NULL)
73419+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
73420+ if (learn_buffer_user == NULL)
73421+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
73422+ if (learn_buffer == NULL) {
73423+ retval = -ENOMEM;
73424+ goto out_error;
73425+ }
73426+ if (learn_buffer_user == NULL) {
73427+ retval = -ENOMEM;
73428+ goto out_error;
73429+ }
73430+ learn_buffer_len = 0;
73431+ learn_buffer_user_len = 0;
73432+ gr_learn_attached = 1;
73433+out_error:
73434+ mutex_unlock(&gr_learn_user_mutex);
73435+ return retval;
73436+ }
73437+ return 0;
73438+}
73439+
73440+static int
73441+close_learn(struct inode *inode, struct file *file)
73442+{
73443+ if (file->f_mode & FMODE_READ) {
73444+ char *tmp = NULL;
73445+ mutex_lock(&gr_learn_user_mutex);
73446+ spin_lock(&gr_learn_lock);
73447+ tmp = learn_buffer;
73448+ learn_buffer = NULL;
73449+ spin_unlock(&gr_learn_lock);
73450+ if (tmp)
73451+ vfree(tmp);
73452+ if (learn_buffer_user != NULL) {
73453+ vfree(learn_buffer_user);
73454+ learn_buffer_user = NULL;
73455+ }
73456+ learn_buffer_len = 0;
73457+ learn_buffer_user_len = 0;
73458+ gr_learn_attached = 0;
73459+ mutex_unlock(&gr_learn_user_mutex);
73460+ }
73461+
73462+ return 0;
73463+}
73464+
73465+const struct file_operations grsec_fops = {
73466+ .read = read_learn,
73467+ .write = write_grsec_handler,
73468+ .open = open_learn,
73469+ .release = close_learn,
73470+ .poll = poll_learn,
73471+};
73472diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
73473new file mode 100644
73474index 0000000..fd26052
73475--- /dev/null
73476+++ b/grsecurity/gracl_policy.c
73477@@ -0,0 +1,1781 @@
73478+#include <linux/kernel.h>
73479+#include <linux/module.h>
73480+#include <linux/sched.h>
73481+#include <linux/mm.h>
73482+#include <linux/file.h>
73483+#include <linux/fs.h>
73484+#include <linux/namei.h>
73485+#include <linux/mount.h>
73486+#include <linux/tty.h>
73487+#include <linux/proc_fs.h>
73488+#include <linux/lglock.h>
73489+#include <linux/slab.h>
73490+#include <linux/vmalloc.h>
73491+#include <linux/types.h>
73492+#include <linux/sysctl.h>
73493+#include <linux/netdevice.h>
73494+#include <linux/ptrace.h>
73495+#include <linux/gracl.h>
73496+#include <linux/gralloc.h>
73497+#include <linux/security.h>
73498+#include <linux/grinternal.h>
73499+#include <linux/pid_namespace.h>
73500+#include <linux/stop_machine.h>
73501+#include <linux/fdtable.h>
73502+#include <linux/percpu.h>
73503+#include <linux/lglock.h>
73504+#include <linux/hugetlb.h>
73505+#include <linux/posix-timers.h>
73506+#include "../fs/mount.h"
73507+
73508+#include <asm/uaccess.h>
73509+#include <asm/errno.h>
73510+#include <asm/mman.h>
73511+
73512+extern struct gr_policy_state *polstate;
73513+
73514+#define FOR_EACH_ROLE_START(role) \
73515+ role = polstate->role_list; \
73516+ while (role) {
73517+
73518+#define FOR_EACH_ROLE_END(role) \
73519+ role = role->prev; \
73520+ }
73521+
73522+struct path gr_real_root;
73523+
73524+extern struct gr_alloc_state *current_alloc_state;
73525+
73526+u16 acl_sp_role_value;
73527+
73528+static DEFINE_MUTEX(gr_dev_mutex);
73529+
73530+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
73531+extern void gr_clear_learn_entries(void);
73532+
73533+struct gr_arg *gr_usermode __read_only;
73534+unsigned char *gr_system_salt __read_only;
73535+unsigned char *gr_system_sum __read_only;
73536+
73537+static unsigned int gr_auth_attempts = 0;
73538+static unsigned long gr_auth_expires = 0UL;
73539+
73540+struct acl_object_label *fakefs_obj_rw;
73541+struct acl_object_label *fakefs_obj_rwx;
73542+
73543+extern int gr_init_uidset(void);
73544+extern void gr_free_uidset(void);
73545+extern void gr_remove_uid(uid_t uid);
73546+extern int gr_find_uid(uid_t uid);
73547+
73548+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
73549+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
73550+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
73551+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
73552+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
73553+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
73554+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
73555+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
73556+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
73557+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73558+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73559+extern void assign_special_role(const char *rolename);
73560+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
73561+extern int gr_rbac_disable(void *unused);
73562+extern void gr_enable_rbac_system(void);
73563+
73564+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
73565+{
73566+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
73567+ return -EFAULT;
73568+
73569+ return 0;
73570+}
73571+
73572+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73573+{
73574+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
73575+ return -EFAULT;
73576+
73577+ return 0;
73578+}
73579+
73580+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73581+{
73582+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
73583+ return -EFAULT;
73584+
73585+ return 0;
73586+}
73587+
73588+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
73589+{
73590+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
73591+ return -EFAULT;
73592+
73593+ return 0;
73594+}
73595+
73596+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73597+{
73598+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
73599+ return -EFAULT;
73600+
73601+ return 0;
73602+}
73603+
73604+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73605+{
73606+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
73607+ return -EFAULT;
73608+
73609+ return 0;
73610+}
73611+
73612+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73613+{
73614+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
73615+ return -EFAULT;
73616+
73617+ return 0;
73618+}
73619+
73620+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
73621+{
73622+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
73623+ return -EFAULT;
73624+
73625+ return 0;
73626+}
73627+
73628+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
73629+{
73630+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
73631+ return -EFAULT;
73632+
73633+ return 0;
73634+}
73635+
73636+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
73637+{
73638+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
73639+ return -EFAULT;
73640+
73641+ if ((uwrap->version != GRSECURITY_VERSION) ||
73642+ (uwrap->size != sizeof(struct gr_arg)))
73643+ return -EINVAL;
73644+
73645+ return 0;
73646+}
73647+
73648+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
73649+{
73650+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
73651+ return -EFAULT;
73652+
73653+ return 0;
73654+}
73655+
73656+static size_t get_gr_arg_wrapper_size_normal(void)
73657+{
73658+ return sizeof(struct gr_arg_wrapper);
73659+}
73660+
73661+#ifdef CONFIG_COMPAT
73662+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
73663+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
73664+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
73665+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
73666+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
73667+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
73668+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
73669+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
73670+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
73671+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
73672+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
73673+extern size_t get_gr_arg_wrapper_size_compat(void);
73674+
73675+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
73676+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
73677+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
73678+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
73679+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
73680+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
73681+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
73682+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
73683+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
73684+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
73685+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
73686+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
73687+
73688+#else
73689+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
73690+#define copy_gr_arg copy_gr_arg_normal
73691+#define copy_gr_hash_struct copy_gr_hash_struct_normal
73692+#define copy_acl_object_label copy_acl_object_label_normal
73693+#define copy_acl_subject_label copy_acl_subject_label_normal
73694+#define copy_acl_role_label copy_acl_role_label_normal
73695+#define copy_acl_ip_label copy_acl_ip_label_normal
73696+#define copy_pointer_from_array copy_pointer_from_array_normal
73697+#define copy_sprole_pw copy_sprole_pw_normal
73698+#define copy_role_transition copy_role_transition_normal
73699+#define copy_role_allowed_ip copy_role_allowed_ip_normal
73700+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
73701+#endif
73702+
73703+static struct acl_subject_label *
73704+lookup_subject_map(const struct acl_subject_label *userp)
73705+{
73706+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
73707+ struct subject_map *match;
73708+
73709+ match = polstate->subj_map_set.s_hash[index];
73710+
73711+ while (match && match->user != userp)
73712+ match = match->next;
73713+
73714+ if (match != NULL)
73715+ return match->kernel;
73716+ else
73717+ return NULL;
73718+}
73719+
73720+static void
73721+insert_subj_map_entry(struct subject_map *subjmap)
73722+{
73723+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
73724+ struct subject_map **curr;
73725+
73726+ subjmap->prev = NULL;
73727+
73728+ curr = &polstate->subj_map_set.s_hash[index];
73729+ if (*curr != NULL)
73730+ (*curr)->prev = subjmap;
73731+
73732+ subjmap->next = *curr;
73733+ *curr = subjmap;
73734+
73735+ return;
73736+}
73737+
73738+static void
73739+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
73740+{
73741+ unsigned int index =
73742+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
73743+ struct acl_role_label **curr;
73744+ struct acl_role_label *tmp, *tmp2;
73745+
73746+ curr = &polstate->acl_role_set.r_hash[index];
73747+
73748+ /* simple case, slot is empty, just set it to our role */
73749+ if (*curr == NULL) {
73750+ *curr = role;
73751+ } else {
73752+ /* example:
73753+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
73754+ 2 -> 3
73755+ */
73756+ /* first check to see if we can already be reached via this slot */
73757+ tmp = *curr;
73758+ while (tmp && tmp != role)
73759+ tmp = tmp->next;
73760+ if (tmp == role) {
73761+ /* we don't need to add ourselves to this slot's chain */
73762+ return;
73763+ }
73764+ /* we need to add ourselves to this chain, two cases */
73765+ if (role->next == NULL) {
73766+ /* simple case, append the current chain to our role */
73767+ role->next = *curr;
73768+ *curr = role;
73769+ } else {
73770+ /* 1 -> 2 -> 3 -> 4
73771+ 2 -> 3 -> 4
73772+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
73773+ */
73774+ /* trickier case: walk our role's chain until we find
73775+ the role for the start of the current slot's chain */
73776+ tmp = role;
73777+ tmp2 = *curr;
73778+ while (tmp->next && tmp->next != tmp2)
73779+ tmp = tmp->next;
73780+ if (tmp->next == tmp2) {
73781+ /* from example above, we found 3, so just
73782+ replace this slot's chain with ours */
73783+ *curr = role;
73784+ } else {
73785+ /* we didn't find a subset of our role's chain
73786+ in the current slot's chain, so append their
73787+ chain to ours, and set us as the first role in
73788+ the slot's chain
73789+
73790+ we could fold this case with the case above,
73791+ but making it explicit for clarity
73792+ */
73793+ tmp->next = tmp2;
73794+ *curr = role;
73795+ }
73796+ }
73797+ }
73798+
73799+ return;
73800+}
73801+
73802+static void
73803+insert_acl_role_label(struct acl_role_label *role)
73804+{
73805+ int i;
73806+
73807+ if (polstate->role_list == NULL) {
73808+ polstate->role_list = role;
73809+ role->prev = NULL;
73810+ } else {
73811+ role->prev = polstate->role_list;
73812+ polstate->role_list = role;
73813+ }
73814+
73815+ /* used for hash chains */
73816+ role->next = NULL;
73817+
73818+ if (role->roletype & GR_ROLE_DOMAIN) {
73819+ for (i = 0; i < role->domain_child_num; i++)
73820+ __insert_acl_role_label(role, role->domain_children[i]);
73821+ } else
73822+ __insert_acl_role_label(role, role->uidgid);
73823+}
73824+
73825+static int
73826+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
73827+{
73828+ struct name_entry **curr, *nentry;
73829+ struct inodev_entry *ientry;
73830+ unsigned int len = strlen(name);
73831+ unsigned int key = full_name_hash(name, len);
73832+ unsigned int index = key % polstate->name_set.n_size;
73833+
73834+ curr = &polstate->name_set.n_hash[index];
73835+
73836+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
73837+ curr = &((*curr)->next);
73838+
73839+ if (*curr != NULL)
73840+ return 1;
73841+
73842+ nentry = acl_alloc(sizeof (struct name_entry));
73843+ if (nentry == NULL)
73844+ return 0;
73845+ ientry = acl_alloc(sizeof (struct inodev_entry));
73846+ if (ientry == NULL)
73847+ return 0;
73848+ ientry->nentry = nentry;
73849+
73850+ nentry->key = key;
73851+ nentry->name = name;
73852+ nentry->inode = inode;
73853+ nentry->device = device;
73854+ nentry->len = len;
73855+ nentry->deleted = deleted;
73856+
73857+ nentry->prev = NULL;
73858+ curr = &polstate->name_set.n_hash[index];
73859+ if (*curr != NULL)
73860+ (*curr)->prev = nentry;
73861+ nentry->next = *curr;
73862+ *curr = nentry;
73863+
73864+ /* insert us into the table searchable by inode/dev */
73865+ __insert_inodev_entry(polstate, ientry);
73866+
73867+ return 1;
73868+}
73869+
73870+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
73871+
73872+static void *
73873+create_table(__u32 * len, int elementsize)
73874+{
73875+ unsigned int table_sizes[] = {
73876+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
73877+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
73878+ 4194301, 8388593, 16777213, 33554393, 67108859
73879+ };
73880+ void *newtable = NULL;
73881+ unsigned int pwr = 0;
73882+
73883+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
73884+ table_sizes[pwr] <= *len)
73885+ pwr++;
73886+
73887+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
73888+ return newtable;
73889+
73890+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
73891+ newtable =
73892+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
73893+ else
73894+ newtable = vmalloc(table_sizes[pwr] * elementsize);
73895+
73896+ *len = table_sizes[pwr];
73897+
73898+ return newtable;
73899+}
73900+
73901+static int
73902+init_variables(const struct gr_arg *arg, bool reload)
73903+{
73904+ struct task_struct *reaper = init_pid_ns.child_reaper;
73905+ unsigned int stacksize;
73906+
73907+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
73908+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
73909+ polstate->name_set.n_size = arg->role_db.num_objects;
73910+ polstate->inodev_set.i_size = arg->role_db.num_objects;
73911+
73912+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
73913+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
73914+ return 1;
73915+
73916+ if (!reload) {
73917+ if (!gr_init_uidset())
73918+ return 1;
73919+ }
73920+
73921+ /* set up the stack that holds allocation info */
73922+
73923+ stacksize = arg->role_db.num_pointers + 5;
73924+
73925+ if (!acl_alloc_stack_init(stacksize))
73926+ return 1;
73927+
73928+ if (!reload) {
73929+ /* grab reference for the real root dentry and vfsmount */
73930+ get_fs_root(reaper->fs, &gr_real_root);
73931+
73932+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
73933+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
73934+#endif
73935+
73936+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
73937+ if (fakefs_obj_rw == NULL)
73938+ return 1;
73939+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
73940+
73941+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
73942+ if (fakefs_obj_rwx == NULL)
73943+ return 1;
73944+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
73945+ }
73946+
73947+ polstate->subj_map_set.s_hash =
73948+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
73949+ polstate->acl_role_set.r_hash =
73950+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
73951+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
73952+ polstate->inodev_set.i_hash =
73953+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
73954+
73955+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
73956+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
73957+ return 1;
73958+
73959+ memset(polstate->subj_map_set.s_hash, 0,
73960+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
73961+ memset(polstate->acl_role_set.r_hash, 0,
73962+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
73963+ memset(polstate->name_set.n_hash, 0,
73964+ sizeof (struct name_entry *) * polstate->name_set.n_size);
73965+ memset(polstate->inodev_set.i_hash, 0,
73966+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
73967+
73968+ return 0;
73969+}
73970+
73971+/* free information not needed after startup
73972+ currently contains user->kernel pointer mappings for subjects
73973+*/
73974+
73975+static void
73976+free_init_variables(void)
73977+{
73978+ __u32 i;
73979+
73980+ if (polstate->subj_map_set.s_hash) {
73981+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
73982+ if (polstate->subj_map_set.s_hash[i]) {
73983+ kfree(polstate->subj_map_set.s_hash[i]);
73984+ polstate->subj_map_set.s_hash[i] = NULL;
73985+ }
73986+ }
73987+
73988+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
73989+ PAGE_SIZE)
73990+ kfree(polstate->subj_map_set.s_hash);
73991+ else
73992+ vfree(polstate->subj_map_set.s_hash);
73993+ }
73994+
73995+ return;
73996+}
73997+
73998+static void
73999+free_variables(bool reload)
74000+{
74001+ struct acl_subject_label *s;
74002+ struct acl_role_label *r;
74003+ struct task_struct *task, *task2;
74004+ unsigned int x;
74005+
74006+ if (!reload) {
74007+ gr_clear_learn_entries();
74008+
74009+ read_lock(&tasklist_lock);
74010+ do_each_thread(task2, task) {
74011+ task->acl_sp_role = 0;
74012+ task->acl_role_id = 0;
74013+ task->inherited = 0;
74014+ task->acl = NULL;
74015+ task->role = NULL;
74016+ } while_each_thread(task2, task);
74017+ read_unlock(&tasklist_lock);
74018+
74019+ kfree(fakefs_obj_rw);
74020+ fakefs_obj_rw = NULL;
74021+ kfree(fakefs_obj_rwx);
74022+ fakefs_obj_rwx = NULL;
74023+
74024+ /* release the reference to the real root dentry and vfsmount */
74025+ path_put(&gr_real_root);
74026+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74027+ }
74028+
74029+ /* free all object hash tables */
74030+
74031+ FOR_EACH_ROLE_START(r)
74032+ if (r->subj_hash == NULL)
74033+ goto next_role;
74034+ FOR_EACH_SUBJECT_START(r, s, x)
74035+ if (s->obj_hash == NULL)
74036+ break;
74037+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74038+ kfree(s->obj_hash);
74039+ else
74040+ vfree(s->obj_hash);
74041+ FOR_EACH_SUBJECT_END(s, x)
74042+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74043+ if (s->obj_hash == NULL)
74044+ break;
74045+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74046+ kfree(s->obj_hash);
74047+ else
74048+ vfree(s->obj_hash);
74049+ FOR_EACH_NESTED_SUBJECT_END(s)
74050+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74051+ kfree(r->subj_hash);
74052+ else
74053+ vfree(r->subj_hash);
74054+ r->subj_hash = NULL;
74055+next_role:
74056+ FOR_EACH_ROLE_END(r)
74057+
74058+ acl_free_all();
74059+
74060+ if (polstate->acl_role_set.r_hash) {
74061+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74062+ PAGE_SIZE)
74063+ kfree(polstate->acl_role_set.r_hash);
74064+ else
74065+ vfree(polstate->acl_role_set.r_hash);
74066+ }
74067+ if (polstate->name_set.n_hash) {
74068+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74069+ PAGE_SIZE)
74070+ kfree(polstate->name_set.n_hash);
74071+ else
74072+ vfree(polstate->name_set.n_hash);
74073+ }
74074+
74075+ if (polstate->inodev_set.i_hash) {
74076+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74077+ PAGE_SIZE)
74078+ kfree(polstate->inodev_set.i_hash);
74079+ else
74080+ vfree(polstate->inodev_set.i_hash);
74081+ }
74082+
74083+ if (!reload)
74084+ gr_free_uidset();
74085+
74086+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74087+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74088+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74089+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74090+
74091+ polstate->default_role = NULL;
74092+ polstate->kernel_role = NULL;
74093+ polstate->role_list = NULL;
74094+
74095+ return;
74096+}
74097+
74098+static struct acl_subject_label *
74099+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74100+
74101+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74102+{
74103+ unsigned int len = strnlen_user(*name, maxlen);
74104+ char *tmp;
74105+
74106+ if (!len || len >= maxlen)
74107+ return -EINVAL;
74108+
74109+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74110+ return -ENOMEM;
74111+
74112+ if (copy_from_user(tmp, *name, len))
74113+ return -EFAULT;
74114+
74115+ tmp[len-1] = '\0';
74116+ *name = tmp;
74117+
74118+ return 0;
74119+}
74120+
74121+static int
74122+copy_user_glob(struct acl_object_label *obj)
74123+{
74124+ struct acl_object_label *g_tmp, **guser;
74125+ int error;
74126+
74127+ if (obj->globbed == NULL)
74128+ return 0;
74129+
74130+ guser = &obj->globbed;
74131+ while (*guser) {
74132+ g_tmp = (struct acl_object_label *)
74133+ acl_alloc(sizeof (struct acl_object_label));
74134+ if (g_tmp == NULL)
74135+ return -ENOMEM;
74136+
74137+ if (copy_acl_object_label(g_tmp, *guser))
74138+ return -EFAULT;
74139+
74140+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74141+ if (error)
74142+ return error;
74143+
74144+ *guser = g_tmp;
74145+ guser = &(g_tmp->next);
74146+ }
74147+
74148+ return 0;
74149+}
74150+
74151+static int
74152+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74153+ struct acl_role_label *role)
74154+{
74155+ struct acl_object_label *o_tmp;
74156+ int ret;
74157+
74158+ while (userp) {
74159+ if ((o_tmp = (struct acl_object_label *)
74160+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74161+ return -ENOMEM;
74162+
74163+ if (copy_acl_object_label(o_tmp, userp))
74164+ return -EFAULT;
74165+
74166+ userp = o_tmp->prev;
74167+
74168+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74169+ if (ret)
74170+ return ret;
74171+
74172+ insert_acl_obj_label(o_tmp, subj);
74173+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74174+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74175+ return -ENOMEM;
74176+
74177+ ret = copy_user_glob(o_tmp);
74178+ if (ret)
74179+ return ret;
74180+
74181+ if (o_tmp->nested) {
74182+ int already_copied;
74183+
74184+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74185+ if (IS_ERR(o_tmp->nested))
74186+ return PTR_ERR(o_tmp->nested);
74187+
74188+ /* insert into nested subject list if we haven't copied this one yet
74189+ to prevent duplicate entries */
74190+ if (!already_copied) {
74191+ o_tmp->nested->next = role->hash->first;
74192+ role->hash->first = o_tmp->nested;
74193+ }
74194+ }
74195+ }
74196+
74197+ return 0;
74198+}
74199+
74200+static __u32
74201+count_user_subjs(struct acl_subject_label *userp)
74202+{
74203+ struct acl_subject_label s_tmp;
74204+ __u32 num = 0;
74205+
74206+ while (userp) {
74207+ if (copy_acl_subject_label(&s_tmp, userp))
74208+ break;
74209+
74210+ userp = s_tmp.prev;
74211+ }
74212+
74213+ return num;
74214+}
74215+
74216+static int
74217+copy_user_allowedips(struct acl_role_label *rolep)
74218+{
74219+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74220+
74221+ ruserip = rolep->allowed_ips;
74222+
74223+ while (ruserip) {
74224+ rlast = rtmp;
74225+
74226+ if ((rtmp = (struct role_allowed_ip *)
74227+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74228+ return -ENOMEM;
74229+
74230+ if (copy_role_allowed_ip(rtmp, ruserip))
74231+ return -EFAULT;
74232+
74233+ ruserip = rtmp->prev;
74234+
74235+ if (!rlast) {
74236+ rtmp->prev = NULL;
74237+ rolep->allowed_ips = rtmp;
74238+ } else {
74239+ rlast->next = rtmp;
74240+ rtmp->prev = rlast;
74241+ }
74242+
74243+ if (!ruserip)
74244+ rtmp->next = NULL;
74245+ }
74246+
74247+ return 0;
74248+}
74249+
74250+static int
74251+copy_user_transitions(struct acl_role_label *rolep)
74252+{
74253+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74254+ int error;
74255+
74256+ rusertp = rolep->transitions;
74257+
74258+ while (rusertp) {
74259+ rlast = rtmp;
74260+
74261+ if ((rtmp = (struct role_transition *)
74262+ acl_alloc(sizeof (struct role_transition))) == NULL)
74263+ return -ENOMEM;
74264+
74265+ if (copy_role_transition(rtmp, rusertp))
74266+ return -EFAULT;
74267+
74268+ rusertp = rtmp->prev;
74269+
74270+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74271+ if (error)
74272+ return error;
74273+
74274+ if (!rlast) {
74275+ rtmp->prev = NULL;
74276+ rolep->transitions = rtmp;
74277+ } else {
74278+ rlast->next = rtmp;
74279+ rtmp->prev = rlast;
74280+ }
74281+
74282+ if (!rusertp)
74283+ rtmp->next = NULL;
74284+ }
74285+
74286+ return 0;
74287+}
74288+
74289+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74290+{
74291+ struct acl_object_label o_tmp;
74292+ __u32 num = 0;
74293+
74294+ while (userp) {
74295+ if (copy_acl_object_label(&o_tmp, userp))
74296+ break;
74297+
74298+ userp = o_tmp.prev;
74299+ num++;
74300+ }
74301+
74302+ return num;
74303+}
74304+
74305+static struct acl_subject_label *
74306+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74307+{
74308+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74309+ __u32 num_objs;
74310+ struct acl_ip_label **i_tmp, *i_utmp2;
74311+ struct gr_hash_struct ghash;
74312+ struct subject_map *subjmap;
74313+ unsigned int i_num;
74314+ int err;
74315+
74316+ if (already_copied != NULL)
74317+ *already_copied = 0;
74318+
74319+ s_tmp = lookup_subject_map(userp);
74320+
74321+ /* we've already copied this subject into the kernel, just return
74322+ the reference to it, and don't copy it over again
74323+ */
74324+ if (s_tmp) {
74325+ if (already_copied != NULL)
74326+ *already_copied = 1;
74327+ return(s_tmp);
74328+ }
74329+
74330+ if ((s_tmp = (struct acl_subject_label *)
74331+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74332+ return ERR_PTR(-ENOMEM);
74333+
74334+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74335+ if (subjmap == NULL)
74336+ return ERR_PTR(-ENOMEM);
74337+
74338+ subjmap->user = userp;
74339+ subjmap->kernel = s_tmp;
74340+ insert_subj_map_entry(subjmap);
74341+
74342+ if (copy_acl_subject_label(s_tmp, userp))
74343+ return ERR_PTR(-EFAULT);
74344+
74345+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
74346+ if (err)
74347+ return ERR_PTR(err);
74348+
74349+ if (!strcmp(s_tmp->filename, "/"))
74350+ role->root_label = s_tmp;
74351+
74352+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
74353+ return ERR_PTR(-EFAULT);
74354+
74355+ /* copy user and group transition tables */
74356+
74357+ if (s_tmp->user_trans_num) {
74358+ uid_t *uidlist;
74359+
74360+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
74361+ if (uidlist == NULL)
74362+ return ERR_PTR(-ENOMEM);
74363+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
74364+ return ERR_PTR(-EFAULT);
74365+
74366+ s_tmp->user_transitions = uidlist;
74367+ }
74368+
74369+ if (s_tmp->group_trans_num) {
74370+ gid_t *gidlist;
74371+
74372+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
74373+ if (gidlist == NULL)
74374+ return ERR_PTR(-ENOMEM);
74375+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
74376+ return ERR_PTR(-EFAULT);
74377+
74378+ s_tmp->group_transitions = gidlist;
74379+ }
74380+
74381+ /* set up object hash table */
74382+ num_objs = count_user_objs(ghash.first);
74383+
74384+ s_tmp->obj_hash_size = num_objs;
74385+ s_tmp->obj_hash =
74386+ (struct acl_object_label **)
74387+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
74388+
74389+ if (!s_tmp->obj_hash)
74390+ return ERR_PTR(-ENOMEM);
74391+
74392+ memset(s_tmp->obj_hash, 0,
74393+ s_tmp->obj_hash_size *
74394+ sizeof (struct acl_object_label *));
74395+
74396+ /* add in objects */
74397+ err = copy_user_objs(ghash.first, s_tmp, role);
74398+
74399+ if (err)
74400+ return ERR_PTR(err);
74401+
74402+ /* set pointer for parent subject */
74403+ if (s_tmp->parent_subject) {
74404+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
74405+
74406+ if (IS_ERR(s_tmp2))
74407+ return s_tmp2;
74408+
74409+ s_tmp->parent_subject = s_tmp2;
74410+ }
74411+
74412+ /* add in ip acls */
74413+
74414+ if (!s_tmp->ip_num) {
74415+ s_tmp->ips = NULL;
74416+ goto insert;
74417+ }
74418+
74419+ i_tmp =
74420+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
74421+ sizeof (struct acl_ip_label *));
74422+
74423+ if (!i_tmp)
74424+ return ERR_PTR(-ENOMEM);
74425+
74426+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
74427+ *(i_tmp + i_num) =
74428+ (struct acl_ip_label *)
74429+ acl_alloc(sizeof (struct acl_ip_label));
74430+ if (!*(i_tmp + i_num))
74431+ return ERR_PTR(-ENOMEM);
74432+
74433+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
74434+ return ERR_PTR(-EFAULT);
74435+
74436+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
74437+ return ERR_PTR(-EFAULT);
74438+
74439+ if ((*(i_tmp + i_num))->iface == NULL)
74440+ continue;
74441+
74442+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
74443+ if (err)
74444+ return ERR_PTR(err);
74445+ }
74446+
74447+ s_tmp->ips = i_tmp;
74448+
74449+insert:
74450+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
74451+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
74452+ return ERR_PTR(-ENOMEM);
74453+
74454+ return s_tmp;
74455+}
74456+
74457+static int
74458+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
74459+{
74460+ struct acl_subject_label s_pre;
74461+ struct acl_subject_label * ret;
74462+ int err;
74463+
74464+ while (userp) {
74465+ if (copy_acl_subject_label(&s_pre, userp))
74466+ return -EFAULT;
74467+
74468+ ret = do_copy_user_subj(userp, role, NULL);
74469+
74470+ err = PTR_ERR(ret);
74471+ if (IS_ERR(ret))
74472+ return err;
74473+
74474+ insert_acl_subj_label(ret, role);
74475+
74476+ userp = s_pre.prev;
74477+ }
74478+
74479+ return 0;
74480+}
74481+
74482+static int
74483+copy_user_acl(struct gr_arg *arg)
74484+{
74485+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
74486+ struct acl_subject_label *subj_list;
74487+ struct sprole_pw *sptmp;
74488+ struct gr_hash_struct *ghash;
74489+ uid_t *domainlist;
74490+ unsigned int r_num;
74491+ int err = 0;
74492+ __u16 i;
74493+ __u32 num_subjs;
74494+
74495+ /* we need a default and kernel role */
74496+ if (arg->role_db.num_roles < 2)
74497+ return -EINVAL;
74498+
74499+ /* copy special role authentication info from userspace */
74500+
74501+ polstate->num_sprole_pws = arg->num_sprole_pws;
74502+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
74503+
74504+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
74505+ return -ENOMEM;
74506+
74507+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74508+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
74509+ if (!sptmp)
74510+ return -ENOMEM;
74511+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
74512+ return -EFAULT;
74513+
74514+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
74515+ if (err)
74516+ return err;
74517+
74518+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74519+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
74520+#endif
74521+
74522+ polstate->acl_special_roles[i] = sptmp;
74523+ }
74524+
74525+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
74526+
74527+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
74528+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
74529+
74530+ if (!r_tmp)
74531+ return -ENOMEM;
74532+
74533+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
74534+ return -EFAULT;
74535+
74536+ if (copy_acl_role_label(r_tmp, r_utmp2))
74537+ return -EFAULT;
74538+
74539+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
74540+ if (err)
74541+ return err;
74542+
74543+ if (!strcmp(r_tmp->rolename, "default")
74544+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
74545+ polstate->default_role = r_tmp;
74546+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
74547+ polstate->kernel_role = r_tmp;
74548+ }
74549+
74550+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
74551+ return -ENOMEM;
74552+
74553+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
74554+ return -EFAULT;
74555+
74556+ r_tmp->hash = ghash;
74557+
74558+ num_subjs = count_user_subjs(r_tmp->hash->first);
74559+
74560+ r_tmp->subj_hash_size = num_subjs;
74561+ r_tmp->subj_hash =
74562+ (struct acl_subject_label **)
74563+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
74564+
74565+ if (!r_tmp->subj_hash)
74566+ return -ENOMEM;
74567+
74568+ err = copy_user_allowedips(r_tmp);
74569+ if (err)
74570+ return err;
74571+
74572+ /* copy domain info */
74573+ if (r_tmp->domain_children != NULL) {
74574+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
74575+ if (domainlist == NULL)
74576+ return -ENOMEM;
74577+
74578+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
74579+ return -EFAULT;
74580+
74581+ r_tmp->domain_children = domainlist;
74582+ }
74583+
74584+ err = copy_user_transitions(r_tmp);
74585+ if (err)
74586+ return err;
74587+
74588+ memset(r_tmp->subj_hash, 0,
74589+ r_tmp->subj_hash_size *
74590+ sizeof (struct acl_subject_label *));
74591+
74592+ /* acquire the list of subjects, then NULL out
74593+ the list prior to parsing the subjects for this role,
74594+ as during this parsing the list is replaced with a list
74595+ of *nested* subjects for the role
74596+ */
74597+ subj_list = r_tmp->hash->first;
74598+
74599+ /* set nested subject list to null */
74600+ r_tmp->hash->first = NULL;
74601+
74602+ err = copy_user_subjs(subj_list, r_tmp);
74603+
74604+ if (err)
74605+ return err;
74606+
74607+ insert_acl_role_label(r_tmp);
74608+ }
74609+
74610+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
74611+ return -EINVAL;
74612+
74613+ return err;
74614+}
74615+
74616+static int gracl_reload_apply_policies(void *reload)
74617+{
74618+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
74619+ struct task_struct *task, *task2;
74620+ struct acl_role_label *role, *rtmp;
74621+ struct acl_subject_label *subj;
74622+ const struct cred *cred;
74623+ int role_applied;
74624+ int ret = 0;
74625+
74626+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
74627+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
74628+
74629+ /* first make sure we'll be able to apply the new policy cleanly */
74630+ do_each_thread(task2, task) {
74631+ if (task->exec_file == NULL)
74632+ continue;
74633+ role_applied = 0;
74634+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74635+ /* preserve special roles */
74636+ FOR_EACH_ROLE_START(role)
74637+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74638+ rtmp = task->role;
74639+ task->role = role;
74640+ role_applied = 1;
74641+ break;
74642+ }
74643+ FOR_EACH_ROLE_END(role)
74644+ }
74645+ if (!role_applied) {
74646+ cred = __task_cred(task);
74647+ rtmp = task->role;
74648+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74649+ }
74650+ /* this handles non-nested inherited subjects, nested subjects will still
74651+ be dropped currently */
74652+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74653+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
74654+ /* change the role back so that we've made no modifications to the policy */
74655+ task->role = rtmp;
74656+
74657+ if (subj == NULL || task->tmpacl == NULL) {
74658+ ret = -EINVAL;
74659+ goto out;
74660+ }
74661+ } while_each_thread(task2, task);
74662+
74663+ /* now actually apply the policy */
74664+
74665+ do_each_thread(task2, task) {
74666+ if (task->exec_file) {
74667+ role_applied = 0;
74668+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74669+ /* preserve special roles */
74670+ FOR_EACH_ROLE_START(role)
74671+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74672+ task->role = role;
74673+ role_applied = 1;
74674+ break;
74675+ }
74676+ FOR_EACH_ROLE_END(role)
74677+ }
74678+ if (!role_applied) {
74679+ cred = __task_cred(task);
74680+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74681+ }
74682+ /* this handles non-nested inherited subjects, nested subjects will still
74683+ be dropped currently */
74684+ if (!reload_state->oldmode && task->inherited)
74685+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74686+ else {
74687+ /* looked up and tagged to the task previously */
74688+ subj = task->tmpacl;
74689+ }
74690+ /* subj will be non-null */
74691+ __gr_apply_subject_to_task(polstate, task, subj);
74692+ if (reload_state->oldmode) {
74693+ task->acl_role_id = 0;
74694+ task->acl_sp_role = 0;
74695+ task->inherited = 0;
74696+ }
74697+ } else {
74698+ // it's a kernel process
74699+ task->role = polstate->kernel_role;
74700+ task->acl = polstate->kernel_role->root_label;
74701+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
74702+ task->acl->mode &= ~GR_PROCFIND;
74703+#endif
74704+ }
74705+ } while_each_thread(task2, task);
74706+
74707+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
74708+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
74709+
74710+out:
74711+
74712+ return ret;
74713+}
74714+
74715+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
74716+{
74717+ struct gr_reload_state new_reload_state = { };
74718+ int err;
74719+
74720+ new_reload_state.oldpolicy_ptr = polstate;
74721+ new_reload_state.oldalloc_ptr = current_alloc_state;
74722+ new_reload_state.oldmode = oldmode;
74723+
74724+ current_alloc_state = &new_reload_state.newalloc;
74725+ polstate = &new_reload_state.newpolicy;
74726+
74727+ /* everything relevant is now saved off, copy in the new policy */
74728+ if (init_variables(args, true)) {
74729+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74730+ err = -ENOMEM;
74731+ goto error;
74732+ }
74733+
74734+ err = copy_user_acl(args);
74735+ free_init_variables();
74736+ if (err)
74737+ goto error;
74738+ /* the new policy is copied in, with the old policy available via saved_state
74739+ first go through applying roles, making sure to preserve special roles
74740+ then apply new subjects, making sure to preserve inherited and nested subjects,
74741+ though currently only inherited subjects will be preserved
74742+ */
74743+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
74744+ if (err)
74745+ goto error;
74746+
74747+ /* we've now applied the new policy, so restore the old policy state to free it */
74748+ polstate = &new_reload_state.oldpolicy;
74749+ current_alloc_state = &new_reload_state.oldalloc;
74750+ free_variables(true);
74751+
74752+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
74753+ to running_polstate/current_alloc_state inside stop_machine
74754+ */
74755+ err = 0;
74756+ goto out;
74757+error:
74758+ /* on error of loading the new policy, we'll just keep the previous
74759+ policy set around
74760+ */
74761+ free_variables(true);
74762+
74763+ /* doesn't affect runtime, but maintains consistent state */
74764+out:
74765+ polstate = new_reload_state.oldpolicy_ptr;
74766+ current_alloc_state = new_reload_state.oldalloc_ptr;
74767+
74768+ return err;
74769+}
74770+
74771+static int
74772+gracl_init(struct gr_arg *args)
74773+{
74774+ int error = 0;
74775+
74776+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
74777+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
74778+
74779+ if (init_variables(args, false)) {
74780+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74781+ error = -ENOMEM;
74782+ goto out;
74783+ }
74784+
74785+ error = copy_user_acl(args);
74786+ free_init_variables();
74787+ if (error)
74788+ goto out;
74789+
74790+ error = gr_set_acls(0);
74791+ if (error)
74792+ goto out;
74793+
74794+ gr_enable_rbac_system();
74795+
74796+ return 0;
74797+
74798+out:
74799+ free_variables(false);
74800+ return error;
74801+}
74802+
74803+static int
74804+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
74805+ unsigned char **sum)
74806+{
74807+ struct acl_role_label *r;
74808+ struct role_allowed_ip *ipp;
74809+ struct role_transition *trans;
74810+ unsigned int i;
74811+ int found = 0;
74812+ u32 curr_ip = current->signal->curr_ip;
74813+
74814+ current->signal->saved_ip = curr_ip;
74815+
74816+ /* check transition table */
74817+
74818+ for (trans = current->role->transitions; trans; trans = trans->next) {
74819+ if (!strcmp(rolename, trans->rolename)) {
74820+ found = 1;
74821+ break;
74822+ }
74823+ }
74824+
74825+ if (!found)
74826+ return 0;
74827+
74828+ /* handle special roles that do not require authentication
74829+ and check ip */
74830+
74831+ FOR_EACH_ROLE_START(r)
74832+ if (!strcmp(rolename, r->rolename) &&
74833+ (r->roletype & GR_ROLE_SPECIAL)) {
74834+ found = 0;
74835+ if (r->allowed_ips != NULL) {
74836+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
74837+ if ((ntohl(curr_ip) & ipp->netmask) ==
74838+ (ntohl(ipp->addr) & ipp->netmask))
74839+ found = 1;
74840+ }
74841+ } else
74842+ found = 2;
74843+ if (!found)
74844+ return 0;
74845+
74846+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
74847+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
74848+ *salt = NULL;
74849+ *sum = NULL;
74850+ return 1;
74851+ }
74852+ }
74853+ FOR_EACH_ROLE_END(r)
74854+
74855+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74856+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
74857+ *salt = polstate->acl_special_roles[i]->salt;
74858+ *sum = polstate->acl_special_roles[i]->sum;
74859+ return 1;
74860+ }
74861+ }
74862+
74863+ return 0;
74864+}
74865+
74866+int gr_check_secure_terminal(struct task_struct *task)
74867+{
74868+ struct task_struct *p, *p2, *p3;
74869+ struct files_struct *files;
74870+ struct fdtable *fdt;
74871+ struct file *our_file = NULL, *file;
74872+ int i;
74873+
74874+ if (task->signal->tty == NULL)
74875+ return 1;
74876+
74877+ files = get_files_struct(task);
74878+ if (files != NULL) {
74879+ rcu_read_lock();
74880+ fdt = files_fdtable(files);
74881+ for (i=0; i < fdt->max_fds; i++) {
74882+ file = fcheck_files(files, i);
74883+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
74884+ get_file(file);
74885+ our_file = file;
74886+ }
74887+ }
74888+ rcu_read_unlock();
74889+ put_files_struct(files);
74890+ }
74891+
74892+ if (our_file == NULL)
74893+ return 1;
74894+
74895+ read_lock(&tasklist_lock);
74896+ do_each_thread(p2, p) {
74897+ files = get_files_struct(p);
74898+ if (files == NULL ||
74899+ (p->signal && p->signal->tty == task->signal->tty)) {
74900+ if (files != NULL)
74901+ put_files_struct(files);
74902+ continue;
74903+ }
74904+ rcu_read_lock();
74905+ fdt = files_fdtable(files);
74906+ for (i=0; i < fdt->max_fds; i++) {
74907+ file = fcheck_files(files, i);
74908+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
74909+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
74910+ p3 = task;
74911+ while (task_pid_nr(p3) > 0) {
74912+ if (p3 == p)
74913+ break;
74914+ p3 = p3->real_parent;
74915+ }
74916+ if (p3 == p)
74917+ break;
74918+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
74919+ gr_handle_alertkill(p);
74920+ rcu_read_unlock();
74921+ put_files_struct(files);
74922+ read_unlock(&tasklist_lock);
74923+ fput(our_file);
74924+ return 0;
74925+ }
74926+ }
74927+ rcu_read_unlock();
74928+ put_files_struct(files);
74929+ } while_each_thread(p2, p);
74930+ read_unlock(&tasklist_lock);
74931+
74932+ fput(our_file);
74933+ return 1;
74934+}
74935+
74936+ssize_t
74937+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
74938+{
74939+ struct gr_arg_wrapper uwrap;
74940+ unsigned char *sprole_salt = NULL;
74941+ unsigned char *sprole_sum = NULL;
74942+ int error = 0;
74943+ int error2 = 0;
74944+ size_t req_count = 0;
74945+ unsigned char oldmode = 0;
74946+
74947+ mutex_lock(&gr_dev_mutex);
74948+
74949+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
74950+ error = -EPERM;
74951+ goto out;
74952+ }
74953+
74954+#ifdef CONFIG_COMPAT
74955+ pax_open_kernel();
74956+ if (is_compat_task()) {
74957+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
74958+ copy_gr_arg = &copy_gr_arg_compat;
74959+ copy_acl_object_label = &copy_acl_object_label_compat;
74960+ copy_acl_subject_label = &copy_acl_subject_label_compat;
74961+ copy_acl_role_label = &copy_acl_role_label_compat;
74962+ copy_acl_ip_label = &copy_acl_ip_label_compat;
74963+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
74964+ copy_role_transition = &copy_role_transition_compat;
74965+ copy_sprole_pw = &copy_sprole_pw_compat;
74966+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
74967+ copy_pointer_from_array = &copy_pointer_from_array_compat;
74968+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
74969+ } else {
74970+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
74971+ copy_gr_arg = &copy_gr_arg_normal;
74972+ copy_acl_object_label = &copy_acl_object_label_normal;
74973+ copy_acl_subject_label = &copy_acl_subject_label_normal;
74974+ copy_acl_role_label = &copy_acl_role_label_normal;
74975+ copy_acl_ip_label = &copy_acl_ip_label_normal;
74976+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
74977+ copy_role_transition = &copy_role_transition_normal;
74978+ copy_sprole_pw = &copy_sprole_pw_normal;
74979+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
74980+ copy_pointer_from_array = &copy_pointer_from_array_normal;
74981+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
74982+ }
74983+ pax_close_kernel();
74984+#endif
74985+
74986+ req_count = get_gr_arg_wrapper_size();
74987+
74988+ if (count != req_count) {
74989+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
74990+ error = -EINVAL;
74991+ goto out;
74992+ }
74993+
74994+
74995+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
74996+ gr_auth_expires = 0;
74997+ gr_auth_attempts = 0;
74998+ }
74999+
75000+ error = copy_gr_arg_wrapper(buf, &uwrap);
75001+ if (error)
75002+ goto out;
75003+
75004+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75005+ if (error)
75006+ goto out;
75007+
75008+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75009+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75010+ time_after(gr_auth_expires, get_seconds())) {
75011+ error = -EBUSY;
75012+ goto out;
75013+ }
75014+
75015+ /* if non-root trying to do anything other than use a special role,
75016+ do not attempt authentication, do not count towards authentication
75017+ locking
75018+ */
75019+
75020+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75021+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75022+ gr_is_global_nonroot(current_uid())) {
75023+ error = -EPERM;
75024+ goto out;
75025+ }
75026+
75027+ /* ensure pw and special role name are null terminated */
75028+
75029+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75030+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75031+
75032+ /* Okay.
75033+ * We have our enough of the argument structure..(we have yet
75034+ * to copy_from_user the tables themselves) . Copy the tables
75035+ * only if we need them, i.e. for loading operations. */
75036+
75037+ switch (gr_usermode->mode) {
75038+ case GR_STATUS:
75039+ if (gr_acl_is_enabled()) {
75040+ error = 1;
75041+ if (!gr_check_secure_terminal(current))
75042+ error = 3;
75043+ } else
75044+ error = 2;
75045+ goto out;
75046+ case GR_SHUTDOWN:
75047+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75048+ stop_machine(gr_rbac_disable, NULL, NULL);
75049+ free_variables(false);
75050+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75051+ memset(gr_system_salt, 0, GR_SALT_LEN);
75052+ memset(gr_system_sum, 0, GR_SHA_LEN);
75053+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75054+ } else if (gr_acl_is_enabled()) {
75055+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75056+ error = -EPERM;
75057+ } else {
75058+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75059+ error = -EAGAIN;
75060+ }
75061+ break;
75062+ case GR_ENABLE:
75063+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75064+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75065+ else {
75066+ if (gr_acl_is_enabled())
75067+ error = -EAGAIN;
75068+ else
75069+ error = error2;
75070+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75071+ }
75072+ break;
75073+ case GR_OLDRELOAD:
75074+ oldmode = 1;
75075+ case GR_RELOAD:
75076+ if (!gr_acl_is_enabled()) {
75077+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75078+ error = -EAGAIN;
75079+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75080+ error2 = gracl_reload(gr_usermode, oldmode);
75081+ if (!error2)
75082+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75083+ else {
75084+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75085+ error = error2;
75086+ }
75087+ } else {
75088+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75089+ error = -EPERM;
75090+ }
75091+ break;
75092+ case GR_SEGVMOD:
75093+ if (unlikely(!gr_acl_is_enabled())) {
75094+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75095+ error = -EAGAIN;
75096+ break;
75097+ }
75098+
75099+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75100+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75101+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75102+ struct acl_subject_label *segvacl;
75103+ segvacl =
75104+ lookup_acl_subj_label(gr_usermode->segv_inode,
75105+ gr_usermode->segv_device,
75106+ current->role);
75107+ if (segvacl) {
75108+ segvacl->crashes = 0;
75109+ segvacl->expires = 0;
75110+ }
75111+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75112+ gr_remove_uid(gr_usermode->segv_uid);
75113+ }
75114+ } else {
75115+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75116+ error = -EPERM;
75117+ }
75118+ break;
75119+ case GR_SPROLE:
75120+ case GR_SPROLEPAM:
75121+ if (unlikely(!gr_acl_is_enabled())) {
75122+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75123+ error = -EAGAIN;
75124+ break;
75125+ }
75126+
75127+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75128+ current->role->expires = 0;
75129+ current->role->auth_attempts = 0;
75130+ }
75131+
75132+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75133+ time_after(current->role->expires, get_seconds())) {
75134+ error = -EBUSY;
75135+ goto out;
75136+ }
75137+
75138+ if (lookup_special_role_auth
75139+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75140+ && ((!sprole_salt && !sprole_sum)
75141+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75142+ char *p = "";
75143+ assign_special_role(gr_usermode->sp_role);
75144+ read_lock(&tasklist_lock);
75145+ if (current->real_parent)
75146+ p = current->real_parent->role->rolename;
75147+ read_unlock(&tasklist_lock);
75148+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75149+ p, acl_sp_role_value);
75150+ } else {
75151+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75152+ error = -EPERM;
75153+ if(!(current->role->auth_attempts++))
75154+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75155+
75156+ goto out;
75157+ }
75158+ break;
75159+ case GR_UNSPROLE:
75160+ if (unlikely(!gr_acl_is_enabled())) {
75161+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75162+ error = -EAGAIN;
75163+ break;
75164+ }
75165+
75166+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75167+ char *p = "";
75168+ int i = 0;
75169+
75170+ read_lock(&tasklist_lock);
75171+ if (current->real_parent) {
75172+ p = current->real_parent->role->rolename;
75173+ i = current->real_parent->acl_role_id;
75174+ }
75175+ read_unlock(&tasklist_lock);
75176+
75177+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75178+ gr_set_acls(1);
75179+ } else {
75180+ error = -EPERM;
75181+ goto out;
75182+ }
75183+ break;
75184+ default:
75185+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75186+ error = -EINVAL;
75187+ break;
75188+ }
75189+
75190+ if (error != -EPERM)
75191+ goto out;
75192+
75193+ if(!(gr_auth_attempts++))
75194+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75195+
75196+ out:
75197+ mutex_unlock(&gr_dev_mutex);
75198+
75199+ if (!error)
75200+ error = req_count;
75201+
75202+ return error;
75203+}
75204+
75205+int
75206+gr_set_acls(const int type)
75207+{
75208+ struct task_struct *task, *task2;
75209+ struct acl_role_label *role = current->role;
75210+ struct acl_subject_label *subj;
75211+ __u16 acl_role_id = current->acl_role_id;
75212+ const struct cred *cred;
75213+ int ret;
75214+
75215+ rcu_read_lock();
75216+ read_lock(&tasklist_lock);
75217+ read_lock(&grsec_exec_file_lock);
75218+ do_each_thread(task2, task) {
75219+ /* check to see if we're called from the exit handler,
75220+ if so, only replace ACLs that have inherited the admin
75221+ ACL */
75222+
75223+ if (type && (task->role != role ||
75224+ task->acl_role_id != acl_role_id))
75225+ continue;
75226+
75227+ task->acl_role_id = 0;
75228+ task->acl_sp_role = 0;
75229+ task->inherited = 0;
75230+
75231+ if (task->exec_file) {
75232+ cred = __task_cred(task);
75233+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75234+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
75235+ if (subj == NULL) {
75236+ ret = -EINVAL;
75237+ read_unlock(&grsec_exec_file_lock);
75238+ read_unlock(&tasklist_lock);
75239+ rcu_read_unlock();
75240+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75241+ return ret;
75242+ }
75243+ __gr_apply_subject_to_task(polstate, task, subj);
75244+ } else {
75245+ // it's a kernel process
75246+ task->role = polstate->kernel_role;
75247+ task->acl = polstate->kernel_role->root_label;
75248+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75249+ task->acl->mode &= ~GR_PROCFIND;
75250+#endif
75251+ }
75252+ } while_each_thread(task2, task);
75253+ read_unlock(&grsec_exec_file_lock);
75254+ read_unlock(&tasklist_lock);
75255+ rcu_read_unlock();
75256+
75257+ return 0;
75258+}
75259diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75260new file mode 100644
75261index 0000000..39645c9
75262--- /dev/null
75263+++ b/grsecurity/gracl_res.c
75264@@ -0,0 +1,68 @@
75265+#include <linux/kernel.h>
75266+#include <linux/sched.h>
75267+#include <linux/gracl.h>
75268+#include <linux/grinternal.h>
75269+
75270+static const char *restab_log[] = {
75271+ [RLIMIT_CPU] = "RLIMIT_CPU",
75272+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75273+ [RLIMIT_DATA] = "RLIMIT_DATA",
75274+ [RLIMIT_STACK] = "RLIMIT_STACK",
75275+ [RLIMIT_CORE] = "RLIMIT_CORE",
75276+ [RLIMIT_RSS] = "RLIMIT_RSS",
75277+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75278+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75279+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75280+ [RLIMIT_AS] = "RLIMIT_AS",
75281+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75282+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75283+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75284+ [RLIMIT_NICE] = "RLIMIT_NICE",
75285+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75286+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75287+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75288+};
75289+
75290+void
75291+gr_log_resource(const struct task_struct *task,
75292+ const int res, const unsigned long wanted, const int gt)
75293+{
75294+ const struct cred *cred;
75295+ unsigned long rlim;
75296+
75297+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75298+ return;
75299+
75300+ // not yet supported resource
75301+ if (unlikely(!restab_log[res]))
75302+ return;
75303+
75304+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75305+ rlim = task_rlimit_max(task, res);
75306+ else
75307+ rlim = task_rlimit(task, res);
75308+
75309+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75310+ return;
75311+
75312+ rcu_read_lock();
75313+ cred = __task_cred(task);
75314+
75315+ if (res == RLIMIT_NPROC &&
75316+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75317+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75318+ goto out_rcu_unlock;
75319+ else if (res == RLIMIT_MEMLOCK &&
75320+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75321+ goto out_rcu_unlock;
75322+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75323+ goto out_rcu_unlock;
75324+ rcu_read_unlock();
75325+
75326+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75327+
75328+ return;
75329+out_rcu_unlock:
75330+ rcu_read_unlock();
75331+ return;
75332+}
75333diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75334new file mode 100644
75335index 0000000..218b66b
75336--- /dev/null
75337+++ b/grsecurity/gracl_segv.c
75338@@ -0,0 +1,324 @@
75339+#include <linux/kernel.h>
75340+#include <linux/mm.h>
75341+#include <asm/uaccess.h>
75342+#include <asm/errno.h>
75343+#include <asm/mman.h>
75344+#include <net/sock.h>
75345+#include <linux/file.h>
75346+#include <linux/fs.h>
75347+#include <linux/net.h>
75348+#include <linux/in.h>
75349+#include <linux/slab.h>
75350+#include <linux/types.h>
75351+#include <linux/sched.h>
75352+#include <linux/timer.h>
75353+#include <linux/gracl.h>
75354+#include <linux/grsecurity.h>
75355+#include <linux/grinternal.h>
75356+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75357+#include <linux/magic.h>
75358+#include <linux/pagemap.h>
75359+#include "../fs/btrfs/async-thread.h"
75360+#include "../fs/btrfs/ctree.h"
75361+#include "../fs/btrfs/btrfs_inode.h"
75362+#endif
75363+
75364+static struct crash_uid *uid_set;
75365+static unsigned short uid_used;
75366+static DEFINE_SPINLOCK(gr_uid_lock);
75367+extern rwlock_t gr_inode_lock;
75368+extern struct acl_subject_label *
75369+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
75370+ struct acl_role_label *role);
75371+
75372+static inline dev_t __get_dev(const struct dentry *dentry)
75373+{
75374+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75375+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75376+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
75377+ else
75378+#endif
75379+ return dentry->d_sb->s_dev;
75380+}
75381+
75382+static inline u64 __get_ino(const struct dentry *dentry)
75383+{
75384+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75385+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75386+ return btrfs_ino(dentry->d_inode);
75387+ else
75388+#endif
75389+ return dentry->d_inode->i_ino;
75390+}
75391+
75392+int
75393+gr_init_uidset(void)
75394+{
75395+ uid_set =
75396+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
75397+ uid_used = 0;
75398+
75399+ return uid_set ? 1 : 0;
75400+}
75401+
75402+void
75403+gr_free_uidset(void)
75404+{
75405+ if (uid_set) {
75406+ struct crash_uid *tmpset;
75407+ spin_lock(&gr_uid_lock);
75408+ tmpset = uid_set;
75409+ uid_set = NULL;
75410+ uid_used = 0;
75411+ spin_unlock(&gr_uid_lock);
75412+ if (tmpset)
75413+ kfree(tmpset);
75414+ }
75415+
75416+ return;
75417+}
75418+
75419+int
75420+gr_find_uid(const uid_t uid)
75421+{
75422+ struct crash_uid *tmp = uid_set;
75423+ uid_t buid;
75424+ int low = 0, high = uid_used - 1, mid;
75425+
75426+ while (high >= low) {
75427+ mid = (low + high) >> 1;
75428+ buid = tmp[mid].uid;
75429+ if (buid == uid)
75430+ return mid;
75431+ if (buid > uid)
75432+ high = mid - 1;
75433+ if (buid < uid)
75434+ low = mid + 1;
75435+ }
75436+
75437+ return -1;
75438+}
75439+
75440+static __inline__ void
75441+gr_insertsort(void)
75442+{
75443+ unsigned short i, j;
75444+ struct crash_uid index;
75445+
75446+ for (i = 1; i < uid_used; i++) {
75447+ index = uid_set[i];
75448+ j = i;
75449+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
75450+ uid_set[j] = uid_set[j - 1];
75451+ j--;
75452+ }
75453+ uid_set[j] = index;
75454+ }
75455+
75456+ return;
75457+}
75458+
75459+static __inline__ void
75460+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
75461+{
75462+ int loc;
75463+ uid_t uid = GR_GLOBAL_UID(kuid);
75464+
75465+ if (uid_used == GR_UIDTABLE_MAX)
75466+ return;
75467+
75468+ loc = gr_find_uid(uid);
75469+
75470+ if (loc >= 0) {
75471+ uid_set[loc].expires = expires;
75472+ return;
75473+ }
75474+
75475+ uid_set[uid_used].uid = uid;
75476+ uid_set[uid_used].expires = expires;
75477+ uid_used++;
75478+
75479+ gr_insertsort();
75480+
75481+ return;
75482+}
75483+
75484+void
75485+gr_remove_uid(const unsigned short loc)
75486+{
75487+ unsigned short i;
75488+
75489+ for (i = loc + 1; i < uid_used; i++)
75490+ uid_set[i - 1] = uid_set[i];
75491+
75492+ uid_used--;
75493+
75494+ return;
75495+}
75496+
75497+int
75498+gr_check_crash_uid(const kuid_t kuid)
75499+{
75500+ int loc;
75501+ int ret = 0;
75502+ uid_t uid;
75503+
75504+ if (unlikely(!gr_acl_is_enabled()))
75505+ return 0;
75506+
75507+ uid = GR_GLOBAL_UID(kuid);
75508+
75509+ spin_lock(&gr_uid_lock);
75510+ loc = gr_find_uid(uid);
75511+
75512+ if (loc < 0)
75513+ goto out_unlock;
75514+
75515+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
75516+ gr_remove_uid(loc);
75517+ else
75518+ ret = 1;
75519+
75520+out_unlock:
75521+ spin_unlock(&gr_uid_lock);
75522+ return ret;
75523+}
75524+
75525+static __inline__ int
75526+proc_is_setxid(const struct cred *cred)
75527+{
75528+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
75529+ !uid_eq(cred->uid, cred->fsuid))
75530+ return 1;
75531+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
75532+ !gid_eq(cred->gid, cred->fsgid))
75533+ return 1;
75534+
75535+ return 0;
75536+}
75537+
75538+extern int gr_fake_force_sig(int sig, struct task_struct *t);
75539+
75540+void
75541+gr_handle_crash(struct task_struct *task, const int sig)
75542+{
75543+ struct acl_subject_label *curr;
75544+ struct task_struct *tsk, *tsk2;
75545+ const struct cred *cred;
75546+ const struct cred *cred2;
75547+
75548+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
75549+ return;
75550+
75551+ if (unlikely(!gr_acl_is_enabled()))
75552+ return;
75553+
75554+ curr = task->acl;
75555+
75556+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
75557+ return;
75558+
75559+ if (time_before_eq(curr->expires, get_seconds())) {
75560+ curr->expires = 0;
75561+ curr->crashes = 0;
75562+ }
75563+
75564+ curr->crashes++;
75565+
75566+ if (!curr->expires)
75567+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
75568+
75569+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75570+ time_after(curr->expires, get_seconds())) {
75571+ rcu_read_lock();
75572+ cred = __task_cred(task);
75573+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
75574+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75575+ spin_lock(&gr_uid_lock);
75576+ gr_insert_uid(cred->uid, curr->expires);
75577+ spin_unlock(&gr_uid_lock);
75578+ curr->expires = 0;
75579+ curr->crashes = 0;
75580+ read_lock(&tasklist_lock);
75581+ do_each_thread(tsk2, tsk) {
75582+ cred2 = __task_cred(tsk);
75583+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
75584+ gr_fake_force_sig(SIGKILL, tsk);
75585+ } while_each_thread(tsk2, tsk);
75586+ read_unlock(&tasklist_lock);
75587+ } else {
75588+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75589+ read_lock(&tasklist_lock);
75590+ read_lock(&grsec_exec_file_lock);
75591+ do_each_thread(tsk2, tsk) {
75592+ if (likely(tsk != task)) {
75593+ // if this thread has the same subject as the one that triggered
75594+ // RES_CRASH and it's the same binary, kill it
75595+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
75596+ gr_fake_force_sig(SIGKILL, tsk);
75597+ }
75598+ } while_each_thread(tsk2, tsk);
75599+ read_unlock(&grsec_exec_file_lock);
75600+ read_unlock(&tasklist_lock);
75601+ }
75602+ rcu_read_unlock();
75603+ }
75604+
75605+ return;
75606+}
75607+
75608+int
75609+gr_check_crash_exec(const struct file *filp)
75610+{
75611+ struct acl_subject_label *curr;
75612+ struct dentry *dentry;
75613+
75614+ if (unlikely(!gr_acl_is_enabled()))
75615+ return 0;
75616+
75617+ read_lock(&gr_inode_lock);
75618+ dentry = filp->f_path.dentry;
75619+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
75620+ current->role);
75621+ read_unlock(&gr_inode_lock);
75622+
75623+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
75624+ (!curr->crashes && !curr->expires))
75625+ return 0;
75626+
75627+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75628+ time_after(curr->expires, get_seconds()))
75629+ return 1;
75630+ else if (time_before_eq(curr->expires, get_seconds())) {
75631+ curr->crashes = 0;
75632+ curr->expires = 0;
75633+ }
75634+
75635+ return 0;
75636+}
75637+
75638+void
75639+gr_handle_alertkill(struct task_struct *task)
75640+{
75641+ struct acl_subject_label *curracl;
75642+ __u32 curr_ip;
75643+ struct task_struct *p, *p2;
75644+
75645+ if (unlikely(!gr_acl_is_enabled()))
75646+ return;
75647+
75648+ curracl = task->acl;
75649+ curr_ip = task->signal->curr_ip;
75650+
75651+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
75652+ read_lock(&tasklist_lock);
75653+ do_each_thread(p2, p) {
75654+ if (p->signal->curr_ip == curr_ip)
75655+ gr_fake_force_sig(SIGKILL, p);
75656+ } while_each_thread(p2, p);
75657+ read_unlock(&tasklist_lock);
75658+ } else if (curracl->mode & GR_KILLPROC)
75659+ gr_fake_force_sig(SIGKILL, task);
75660+
75661+ return;
75662+}
75663diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
75664new file mode 100644
75665index 0000000..6b0c9cc
75666--- /dev/null
75667+++ b/grsecurity/gracl_shm.c
75668@@ -0,0 +1,40 @@
75669+#include <linux/kernel.h>
75670+#include <linux/mm.h>
75671+#include <linux/sched.h>
75672+#include <linux/file.h>
75673+#include <linux/ipc.h>
75674+#include <linux/gracl.h>
75675+#include <linux/grsecurity.h>
75676+#include <linux/grinternal.h>
75677+
75678+int
75679+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
75680+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
75681+{
75682+ struct task_struct *task;
75683+
75684+ if (!gr_acl_is_enabled())
75685+ return 1;
75686+
75687+ rcu_read_lock();
75688+ read_lock(&tasklist_lock);
75689+
75690+ task = find_task_by_vpid(shm_cprid);
75691+
75692+ if (unlikely(!task))
75693+ task = find_task_by_vpid(shm_lapid);
75694+
75695+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
75696+ (task_pid_nr(task) == shm_lapid)) &&
75697+ (task->acl->mode & GR_PROTSHM) &&
75698+ (task->acl != current->acl))) {
75699+ read_unlock(&tasklist_lock);
75700+ rcu_read_unlock();
75701+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
75702+ return 0;
75703+ }
75704+ read_unlock(&tasklist_lock);
75705+ rcu_read_unlock();
75706+
75707+ return 1;
75708+}
75709diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
75710new file mode 100644
75711index 0000000..bc0be01
75712--- /dev/null
75713+++ b/grsecurity/grsec_chdir.c
75714@@ -0,0 +1,19 @@
75715+#include <linux/kernel.h>
75716+#include <linux/sched.h>
75717+#include <linux/fs.h>
75718+#include <linux/file.h>
75719+#include <linux/grsecurity.h>
75720+#include <linux/grinternal.h>
75721+
75722+void
75723+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
75724+{
75725+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
75726+ if ((grsec_enable_chdir && grsec_enable_group &&
75727+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
75728+ !grsec_enable_group)) {
75729+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
75730+ }
75731+#endif
75732+ return;
75733+}
75734diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
75735new file mode 100644
75736index 0000000..114ea4f
75737--- /dev/null
75738+++ b/grsecurity/grsec_chroot.c
75739@@ -0,0 +1,467 @@
75740+#include <linux/kernel.h>
75741+#include <linux/module.h>
75742+#include <linux/sched.h>
75743+#include <linux/file.h>
75744+#include <linux/fs.h>
75745+#include <linux/mount.h>
75746+#include <linux/types.h>
75747+#include "../fs/mount.h"
75748+#include <linux/grsecurity.h>
75749+#include <linux/grinternal.h>
75750+
75751+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75752+int gr_init_ran;
75753+#endif
75754+
75755+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75756+{
75757+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75758+ struct dentry *tmpd = dentry;
75759+
75760+ read_seqlock_excl(&mount_lock);
75761+ write_seqlock(&rename_lock);
75762+
75763+ while (tmpd != mnt->mnt_root) {
75764+ atomic_inc(&tmpd->chroot_refcnt);
75765+ tmpd = tmpd->d_parent;
75766+ }
75767+ atomic_inc(&tmpd->chroot_refcnt);
75768+
75769+ write_sequnlock(&rename_lock);
75770+ read_sequnlock_excl(&mount_lock);
75771+#endif
75772+}
75773+
75774+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75775+{
75776+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75777+ struct dentry *tmpd = dentry;
75778+
75779+ read_seqlock_excl(&mount_lock);
75780+ write_seqlock(&rename_lock);
75781+
75782+ while (tmpd != mnt->mnt_root) {
75783+ atomic_dec(&tmpd->chroot_refcnt);
75784+ tmpd = tmpd->d_parent;
75785+ }
75786+ atomic_dec(&tmpd->chroot_refcnt);
75787+
75788+ write_sequnlock(&rename_lock);
75789+ read_sequnlock_excl(&mount_lock);
75790+#endif
75791+}
75792+
75793+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75794+static struct dentry *get_closest_chroot(struct dentry *dentry)
75795+{
75796+ write_seqlock(&rename_lock);
75797+ do {
75798+ if (atomic_read(&dentry->chroot_refcnt)) {
75799+ write_sequnlock(&rename_lock);
75800+ return dentry;
75801+ }
75802+ dentry = dentry->d_parent;
75803+ } while (!IS_ROOT(dentry));
75804+ write_sequnlock(&rename_lock);
75805+ return NULL;
75806+}
75807+#endif
75808+
75809+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
75810+ struct dentry *newdentry, struct vfsmount *newmnt)
75811+{
75812+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75813+ struct dentry *chroot;
75814+
75815+ if (unlikely(!grsec_enable_chroot_rename))
75816+ return 0;
75817+
75818+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
75819+ return 0;
75820+
75821+ chroot = get_closest_chroot(olddentry);
75822+
75823+ if (chroot == NULL)
75824+ return 0;
75825+
75826+ if (is_subdir(newdentry, chroot))
75827+ return 0;
75828+
75829+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
75830+
75831+ return 1;
75832+#else
75833+ return 0;
75834+#endif
75835+}
75836+
75837+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
75838+{
75839+#ifdef CONFIG_GRKERNSEC
75840+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
75841+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
75842+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75843+ && gr_init_ran
75844+#endif
75845+ )
75846+ task->gr_is_chrooted = 1;
75847+ else {
75848+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75849+ if (task_pid_nr(task) == 1 && !gr_init_ran)
75850+ gr_init_ran = 1;
75851+#endif
75852+ task->gr_is_chrooted = 0;
75853+ }
75854+
75855+ task->gr_chroot_dentry = path->dentry;
75856+#endif
75857+ return;
75858+}
75859+
75860+void gr_clear_chroot_entries(struct task_struct *task)
75861+{
75862+#ifdef CONFIG_GRKERNSEC
75863+ task->gr_is_chrooted = 0;
75864+ task->gr_chroot_dentry = NULL;
75865+#endif
75866+ return;
75867+}
75868+
75869+int
75870+gr_handle_chroot_unix(const pid_t pid)
75871+{
75872+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
75873+ struct task_struct *p;
75874+
75875+ if (unlikely(!grsec_enable_chroot_unix))
75876+ return 1;
75877+
75878+ if (likely(!proc_is_chrooted(current)))
75879+ return 1;
75880+
75881+ rcu_read_lock();
75882+ read_lock(&tasklist_lock);
75883+ p = find_task_by_vpid_unrestricted(pid);
75884+ if (unlikely(p && !have_same_root(current, p))) {
75885+ read_unlock(&tasklist_lock);
75886+ rcu_read_unlock();
75887+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
75888+ return 0;
75889+ }
75890+ read_unlock(&tasklist_lock);
75891+ rcu_read_unlock();
75892+#endif
75893+ return 1;
75894+}
75895+
75896+int
75897+gr_handle_chroot_nice(void)
75898+{
75899+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
75900+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
75901+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
75902+ return -EPERM;
75903+ }
75904+#endif
75905+ return 0;
75906+}
75907+
75908+int
75909+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
75910+{
75911+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
75912+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
75913+ && proc_is_chrooted(current)) {
75914+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
75915+ return -EACCES;
75916+ }
75917+#endif
75918+ return 0;
75919+}
75920+
75921+int
75922+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
75923+{
75924+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75925+ struct task_struct *p;
75926+ int ret = 0;
75927+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
75928+ return ret;
75929+
75930+ read_lock(&tasklist_lock);
75931+ do_each_pid_task(pid, type, p) {
75932+ if (!have_same_root(current, p)) {
75933+ ret = 1;
75934+ goto out;
75935+ }
75936+ } while_each_pid_task(pid, type, p);
75937+out:
75938+ read_unlock(&tasklist_lock);
75939+ return ret;
75940+#endif
75941+ return 0;
75942+}
75943+
75944+int
75945+gr_pid_is_chrooted(struct task_struct *p)
75946+{
75947+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75948+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
75949+ return 0;
75950+
75951+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
75952+ !have_same_root(current, p)) {
75953+ return 1;
75954+ }
75955+#endif
75956+ return 0;
75957+}
75958+
75959+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
75960+
75961+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
75962+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
75963+{
75964+ struct path path, currentroot;
75965+ int ret = 0;
75966+
75967+ path.dentry = (struct dentry *)u_dentry;
75968+ path.mnt = (struct vfsmount *)u_mnt;
75969+ get_fs_root(current->fs, &currentroot);
75970+ if (path_is_under(&path, &currentroot))
75971+ ret = 1;
75972+ path_put(&currentroot);
75973+
75974+ return ret;
75975+}
75976+#endif
75977+
75978+int
75979+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
75980+{
75981+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
75982+ if (!grsec_enable_chroot_fchdir)
75983+ return 1;
75984+
75985+ if (!proc_is_chrooted(current))
75986+ return 1;
75987+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
75988+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
75989+ return 0;
75990+ }
75991+#endif
75992+ return 1;
75993+}
75994+
75995+int
75996+gr_chroot_fhandle(void)
75997+{
75998+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
75999+ if (!grsec_enable_chroot_fchdir)
76000+ return 1;
76001+
76002+ if (!proc_is_chrooted(current))
76003+ return 1;
76004+ else {
76005+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76006+ return 0;
76007+ }
76008+#endif
76009+ return 1;
76010+}
76011+
76012+int
76013+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76014+ const u64 shm_createtime)
76015+{
76016+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76017+ struct task_struct *p;
76018+
76019+ if (unlikely(!grsec_enable_chroot_shmat))
76020+ return 1;
76021+
76022+ if (likely(!proc_is_chrooted(current)))
76023+ return 1;
76024+
76025+ rcu_read_lock();
76026+ read_lock(&tasklist_lock);
76027+
76028+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76029+ if (time_before_eq64(p->start_time, shm_createtime)) {
76030+ if (have_same_root(current, p)) {
76031+ goto allow;
76032+ } else {
76033+ read_unlock(&tasklist_lock);
76034+ rcu_read_unlock();
76035+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76036+ return 0;
76037+ }
76038+ }
76039+ /* creator exited, pid reuse, fall through to next check */
76040+ }
76041+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76042+ if (unlikely(!have_same_root(current, p))) {
76043+ read_unlock(&tasklist_lock);
76044+ rcu_read_unlock();
76045+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76046+ return 0;
76047+ }
76048+ }
76049+
76050+allow:
76051+ read_unlock(&tasklist_lock);
76052+ rcu_read_unlock();
76053+#endif
76054+ return 1;
76055+}
76056+
76057+void
76058+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76059+{
76060+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76061+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76062+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76063+#endif
76064+ return;
76065+}
76066+
76067+int
76068+gr_handle_chroot_mknod(const struct dentry *dentry,
76069+ const struct vfsmount *mnt, const int mode)
76070+{
76071+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76072+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76073+ proc_is_chrooted(current)) {
76074+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76075+ return -EPERM;
76076+ }
76077+#endif
76078+ return 0;
76079+}
76080+
76081+int
76082+gr_handle_chroot_mount(const struct dentry *dentry,
76083+ const struct vfsmount *mnt, const char *dev_name)
76084+{
76085+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76086+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76087+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76088+ return -EPERM;
76089+ }
76090+#endif
76091+ return 0;
76092+}
76093+
76094+int
76095+gr_handle_chroot_pivot(void)
76096+{
76097+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76098+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76099+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76100+ return -EPERM;
76101+ }
76102+#endif
76103+ return 0;
76104+}
76105+
76106+int
76107+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76108+{
76109+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76110+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76111+ !gr_is_outside_chroot(dentry, mnt)) {
76112+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76113+ return -EPERM;
76114+ }
76115+#endif
76116+ return 0;
76117+}
76118+
76119+extern const char *captab_log[];
76120+extern int captab_log_entries;
76121+
76122+int
76123+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76124+{
76125+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76126+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76127+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76128+ if (cap_raised(chroot_caps, cap)) {
76129+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76130+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76131+ }
76132+ return 0;
76133+ }
76134+ }
76135+#endif
76136+ return 1;
76137+}
76138+
76139+int
76140+gr_chroot_is_capable(const int cap)
76141+{
76142+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76143+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76144+#endif
76145+ return 1;
76146+}
76147+
76148+int
76149+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76150+{
76151+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76152+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76153+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76154+ if (cap_raised(chroot_caps, cap)) {
76155+ return 0;
76156+ }
76157+ }
76158+#endif
76159+ return 1;
76160+}
76161+
76162+int
76163+gr_chroot_is_capable_nolog(const int cap)
76164+{
76165+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76166+ return gr_task_chroot_is_capable_nolog(current, cap);
76167+#endif
76168+ return 1;
76169+}
76170+
76171+int
76172+gr_handle_chroot_sysctl(const int op)
76173+{
76174+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76175+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76176+ proc_is_chrooted(current))
76177+ return -EACCES;
76178+#endif
76179+ return 0;
76180+}
76181+
76182+void
76183+gr_handle_chroot_chdir(const struct path *path)
76184+{
76185+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76186+ if (grsec_enable_chroot_chdir)
76187+ set_fs_pwd(current->fs, path);
76188+#endif
76189+ return;
76190+}
76191+
76192+int
76193+gr_handle_chroot_chmod(const struct dentry *dentry,
76194+ const struct vfsmount *mnt, const int mode)
76195+{
76196+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76197+ /* allow chmod +s on directories, but not files */
76198+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76199+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76200+ proc_is_chrooted(current)) {
76201+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76202+ return -EPERM;
76203+ }
76204+#endif
76205+ return 0;
76206+}
76207diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76208new file mode 100644
76209index 0000000..946f750
76210--- /dev/null
76211+++ b/grsecurity/grsec_disabled.c
76212@@ -0,0 +1,445 @@
76213+#include <linux/kernel.h>
76214+#include <linux/module.h>
76215+#include <linux/sched.h>
76216+#include <linux/file.h>
76217+#include <linux/fs.h>
76218+#include <linux/kdev_t.h>
76219+#include <linux/net.h>
76220+#include <linux/in.h>
76221+#include <linux/ip.h>
76222+#include <linux/skbuff.h>
76223+#include <linux/sysctl.h>
76224+
76225+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76226+void
76227+pax_set_initial_flags(struct linux_binprm *bprm)
76228+{
76229+ return;
76230+}
76231+#endif
76232+
76233+#ifdef CONFIG_SYSCTL
76234+__u32
76235+gr_handle_sysctl(const struct ctl_table * table, const int op)
76236+{
76237+ return 0;
76238+}
76239+#endif
76240+
76241+#ifdef CONFIG_TASKSTATS
76242+int gr_is_taskstats_denied(int pid)
76243+{
76244+ return 0;
76245+}
76246+#endif
76247+
76248+int
76249+gr_acl_is_enabled(void)
76250+{
76251+ return 0;
76252+}
76253+
76254+int
76255+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76256+{
76257+ return 0;
76258+}
76259+
76260+void
76261+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76262+{
76263+ return;
76264+}
76265+
76266+int
76267+gr_handle_rawio(const struct inode *inode)
76268+{
76269+ return 0;
76270+}
76271+
76272+void
76273+gr_acl_handle_psacct(struct task_struct *task, const long code)
76274+{
76275+ return;
76276+}
76277+
76278+int
76279+gr_handle_ptrace(struct task_struct *task, const long request)
76280+{
76281+ return 0;
76282+}
76283+
76284+int
76285+gr_handle_proc_ptrace(struct task_struct *task)
76286+{
76287+ return 0;
76288+}
76289+
76290+int
76291+gr_set_acls(const int type)
76292+{
76293+ return 0;
76294+}
76295+
76296+int
76297+gr_check_hidden_task(const struct task_struct *tsk)
76298+{
76299+ return 0;
76300+}
76301+
76302+int
76303+gr_check_protected_task(const struct task_struct *task)
76304+{
76305+ return 0;
76306+}
76307+
76308+int
76309+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76310+{
76311+ return 0;
76312+}
76313+
76314+void
76315+gr_copy_label(struct task_struct *tsk)
76316+{
76317+ return;
76318+}
76319+
76320+void
76321+gr_set_pax_flags(struct task_struct *task)
76322+{
76323+ return;
76324+}
76325+
76326+int
76327+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76328+ const int unsafe_share)
76329+{
76330+ return 0;
76331+}
76332+
76333+void
76334+gr_handle_delete(const u64 ino, const dev_t dev)
76335+{
76336+ return;
76337+}
76338+
76339+void
76340+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76341+{
76342+ return;
76343+}
76344+
76345+void
76346+gr_handle_crash(struct task_struct *task, const int sig)
76347+{
76348+ return;
76349+}
76350+
76351+int
76352+gr_check_crash_exec(const struct file *filp)
76353+{
76354+ return 0;
76355+}
76356+
76357+int
76358+gr_check_crash_uid(const kuid_t uid)
76359+{
76360+ return 0;
76361+}
76362+
76363+void
76364+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76365+ struct dentry *old_dentry,
76366+ struct dentry *new_dentry,
76367+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
76368+{
76369+ return;
76370+}
76371+
76372+int
76373+gr_search_socket(const int family, const int type, const int protocol)
76374+{
76375+ return 1;
76376+}
76377+
76378+int
76379+gr_search_connectbind(const int mode, const struct socket *sock,
76380+ const struct sockaddr_in *addr)
76381+{
76382+ return 0;
76383+}
76384+
76385+void
76386+gr_handle_alertkill(struct task_struct *task)
76387+{
76388+ return;
76389+}
76390+
76391+__u32
76392+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
76393+{
76394+ return 1;
76395+}
76396+
76397+__u32
76398+gr_acl_handle_hidden_file(const struct dentry * dentry,
76399+ const struct vfsmount * mnt)
76400+{
76401+ return 1;
76402+}
76403+
76404+__u32
76405+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
76406+ int acc_mode)
76407+{
76408+ return 1;
76409+}
76410+
76411+__u32
76412+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
76413+{
76414+ return 1;
76415+}
76416+
76417+__u32
76418+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
76419+{
76420+ return 1;
76421+}
76422+
76423+int
76424+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
76425+ unsigned int *vm_flags)
76426+{
76427+ return 1;
76428+}
76429+
76430+__u32
76431+gr_acl_handle_truncate(const struct dentry * dentry,
76432+ const struct vfsmount * mnt)
76433+{
76434+ return 1;
76435+}
76436+
76437+__u32
76438+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
76439+{
76440+ return 1;
76441+}
76442+
76443+__u32
76444+gr_acl_handle_access(const struct dentry * dentry,
76445+ const struct vfsmount * mnt, const int fmode)
76446+{
76447+ return 1;
76448+}
76449+
76450+__u32
76451+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
76452+ umode_t *mode)
76453+{
76454+ return 1;
76455+}
76456+
76457+__u32
76458+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
76459+{
76460+ return 1;
76461+}
76462+
76463+__u32
76464+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
76465+{
76466+ return 1;
76467+}
76468+
76469+__u32
76470+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
76471+{
76472+ return 1;
76473+}
76474+
76475+void
76476+grsecurity_init(void)
76477+{
76478+ return;
76479+}
76480+
76481+umode_t gr_acl_umask(void)
76482+{
76483+ return 0;
76484+}
76485+
76486+__u32
76487+gr_acl_handle_mknod(const struct dentry * new_dentry,
76488+ const struct dentry * parent_dentry,
76489+ const struct vfsmount * parent_mnt,
76490+ const int mode)
76491+{
76492+ return 1;
76493+}
76494+
76495+__u32
76496+gr_acl_handle_mkdir(const struct dentry * new_dentry,
76497+ const struct dentry * parent_dentry,
76498+ const struct vfsmount * parent_mnt)
76499+{
76500+ return 1;
76501+}
76502+
76503+__u32
76504+gr_acl_handle_symlink(const struct dentry * new_dentry,
76505+ const struct dentry * parent_dentry,
76506+ const struct vfsmount * parent_mnt, const struct filename *from)
76507+{
76508+ return 1;
76509+}
76510+
76511+__u32
76512+gr_acl_handle_link(const struct dentry * new_dentry,
76513+ const struct dentry * parent_dentry,
76514+ const struct vfsmount * parent_mnt,
76515+ const struct dentry * old_dentry,
76516+ const struct vfsmount * old_mnt, const struct filename *to)
76517+{
76518+ return 1;
76519+}
76520+
76521+int
76522+gr_acl_handle_rename(const struct dentry *new_dentry,
76523+ const struct dentry *parent_dentry,
76524+ const struct vfsmount *parent_mnt,
76525+ const struct dentry *old_dentry,
76526+ const struct inode *old_parent_inode,
76527+ const struct vfsmount *old_mnt, const struct filename *newname,
76528+ unsigned int flags)
76529+{
76530+ return 0;
76531+}
76532+
76533+int
76534+gr_acl_handle_filldir(const struct file *file, const char *name,
76535+ const int namelen, const u64 ino)
76536+{
76537+ return 1;
76538+}
76539+
76540+int
76541+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76542+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76543+{
76544+ return 1;
76545+}
76546+
76547+int
76548+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
76549+{
76550+ return 0;
76551+}
76552+
76553+int
76554+gr_search_accept(const struct socket *sock)
76555+{
76556+ return 0;
76557+}
76558+
76559+int
76560+gr_search_listen(const struct socket *sock)
76561+{
76562+ return 0;
76563+}
76564+
76565+int
76566+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
76567+{
76568+ return 0;
76569+}
76570+
76571+__u32
76572+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
76573+{
76574+ return 1;
76575+}
76576+
76577+__u32
76578+gr_acl_handle_creat(const struct dentry * dentry,
76579+ const struct dentry * p_dentry,
76580+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
76581+ const int imode)
76582+{
76583+ return 1;
76584+}
76585+
76586+void
76587+gr_acl_handle_exit(void)
76588+{
76589+ return;
76590+}
76591+
76592+int
76593+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
76594+{
76595+ return 1;
76596+}
76597+
76598+void
76599+gr_set_role_label(const kuid_t uid, const kgid_t gid)
76600+{
76601+ return;
76602+}
76603+
76604+int
76605+gr_acl_handle_procpidmem(const struct task_struct *task)
76606+{
76607+ return 0;
76608+}
76609+
76610+int
76611+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
76612+{
76613+ return 0;
76614+}
76615+
76616+int
76617+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
76618+{
76619+ return 0;
76620+}
76621+
76622+int
76623+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
76624+{
76625+ return 0;
76626+}
76627+
76628+int
76629+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
76630+{
76631+ return 0;
76632+}
76633+
76634+int gr_acl_enable_at_secure(void)
76635+{
76636+ return 0;
76637+}
76638+
76639+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
76640+{
76641+ return dentry->d_sb->s_dev;
76642+}
76643+
76644+u64 gr_get_ino_from_dentry(struct dentry *dentry)
76645+{
76646+ return dentry->d_inode->i_ino;
76647+}
76648+
76649+void gr_put_exec_file(struct task_struct *task)
76650+{
76651+ return;
76652+}
76653+
76654+#ifdef CONFIG_SECURITY
76655+EXPORT_SYMBOL_GPL(gr_check_user_change);
76656+EXPORT_SYMBOL_GPL(gr_check_group_change);
76657+#endif
76658diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
76659new file mode 100644
76660index 0000000..fb7531e
76661--- /dev/null
76662+++ b/grsecurity/grsec_exec.c
76663@@ -0,0 +1,189 @@
76664+#include <linux/kernel.h>
76665+#include <linux/sched.h>
76666+#include <linux/file.h>
76667+#include <linux/binfmts.h>
76668+#include <linux/fs.h>
76669+#include <linux/types.h>
76670+#include <linux/grdefs.h>
76671+#include <linux/grsecurity.h>
76672+#include <linux/grinternal.h>
76673+#include <linux/capability.h>
76674+#include <linux/module.h>
76675+#include <linux/compat.h>
76676+
76677+#include <asm/uaccess.h>
76678+
76679+#ifdef CONFIG_GRKERNSEC_EXECLOG
76680+static char gr_exec_arg_buf[132];
76681+static DEFINE_MUTEX(gr_exec_arg_mutex);
76682+#endif
76683+
76684+struct user_arg_ptr {
76685+#ifdef CONFIG_COMPAT
76686+ bool is_compat;
76687+#endif
76688+ union {
76689+ const char __user *const __user *native;
76690+#ifdef CONFIG_COMPAT
76691+ const compat_uptr_t __user *compat;
76692+#endif
76693+ } ptr;
76694+};
76695+
76696+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
76697+
76698+void
76699+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
76700+{
76701+#ifdef CONFIG_GRKERNSEC_EXECLOG
76702+ char *grarg = gr_exec_arg_buf;
76703+ unsigned int i, x, execlen = 0;
76704+ char c;
76705+
76706+ if (!((grsec_enable_execlog && grsec_enable_group &&
76707+ in_group_p(grsec_audit_gid))
76708+ || (grsec_enable_execlog && !grsec_enable_group)))
76709+ return;
76710+
76711+ mutex_lock(&gr_exec_arg_mutex);
76712+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
76713+
76714+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
76715+ const char __user *p;
76716+ unsigned int len;
76717+
76718+ p = get_user_arg_ptr(argv, i);
76719+ if (IS_ERR(p))
76720+ goto log;
76721+
76722+ len = strnlen_user(p, 128 - execlen);
76723+ if (len > 128 - execlen)
76724+ len = 128 - execlen;
76725+ else if (len > 0)
76726+ len--;
76727+ if (copy_from_user(grarg + execlen, p, len))
76728+ goto log;
76729+
76730+ /* rewrite unprintable characters */
76731+ for (x = 0; x < len; x++) {
76732+ c = *(grarg + execlen + x);
76733+ if (c < 32 || c > 126)
76734+ *(grarg + execlen + x) = ' ';
76735+ }
76736+
76737+ execlen += len;
76738+ *(grarg + execlen) = ' ';
76739+ *(grarg + execlen + 1) = '\0';
76740+ execlen++;
76741+ }
76742+
76743+ log:
76744+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
76745+ bprm->file->f_path.mnt, grarg);
76746+ mutex_unlock(&gr_exec_arg_mutex);
76747+#endif
76748+ return;
76749+}
76750+
76751+#ifdef CONFIG_GRKERNSEC
76752+extern int gr_acl_is_capable(const int cap);
76753+extern int gr_acl_is_capable_nolog(const int cap);
76754+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76755+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
76756+extern int gr_chroot_is_capable(const int cap);
76757+extern int gr_chroot_is_capable_nolog(const int cap);
76758+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76759+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
76760+#endif
76761+
76762+const char *captab_log[] = {
76763+ "CAP_CHOWN",
76764+ "CAP_DAC_OVERRIDE",
76765+ "CAP_DAC_READ_SEARCH",
76766+ "CAP_FOWNER",
76767+ "CAP_FSETID",
76768+ "CAP_KILL",
76769+ "CAP_SETGID",
76770+ "CAP_SETUID",
76771+ "CAP_SETPCAP",
76772+ "CAP_LINUX_IMMUTABLE",
76773+ "CAP_NET_BIND_SERVICE",
76774+ "CAP_NET_BROADCAST",
76775+ "CAP_NET_ADMIN",
76776+ "CAP_NET_RAW",
76777+ "CAP_IPC_LOCK",
76778+ "CAP_IPC_OWNER",
76779+ "CAP_SYS_MODULE",
76780+ "CAP_SYS_RAWIO",
76781+ "CAP_SYS_CHROOT",
76782+ "CAP_SYS_PTRACE",
76783+ "CAP_SYS_PACCT",
76784+ "CAP_SYS_ADMIN",
76785+ "CAP_SYS_BOOT",
76786+ "CAP_SYS_NICE",
76787+ "CAP_SYS_RESOURCE",
76788+ "CAP_SYS_TIME",
76789+ "CAP_SYS_TTY_CONFIG",
76790+ "CAP_MKNOD",
76791+ "CAP_LEASE",
76792+ "CAP_AUDIT_WRITE",
76793+ "CAP_AUDIT_CONTROL",
76794+ "CAP_SETFCAP",
76795+ "CAP_MAC_OVERRIDE",
76796+ "CAP_MAC_ADMIN",
76797+ "CAP_SYSLOG",
76798+ "CAP_WAKE_ALARM",
76799+ "CAP_BLOCK_SUSPEND",
76800+ "CAP_AUDIT_READ"
76801+};
76802+
76803+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
76804+
76805+int gr_is_capable(const int cap)
76806+{
76807+#ifdef CONFIG_GRKERNSEC
76808+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
76809+ return 1;
76810+ return 0;
76811+#else
76812+ return 1;
76813+#endif
76814+}
76815+
76816+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76817+{
76818+#ifdef CONFIG_GRKERNSEC
76819+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
76820+ return 1;
76821+ return 0;
76822+#else
76823+ return 1;
76824+#endif
76825+}
76826+
76827+int gr_is_capable_nolog(const int cap)
76828+{
76829+#ifdef CONFIG_GRKERNSEC
76830+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
76831+ return 1;
76832+ return 0;
76833+#else
76834+ return 1;
76835+#endif
76836+}
76837+
76838+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
76839+{
76840+#ifdef CONFIG_GRKERNSEC
76841+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
76842+ return 1;
76843+ return 0;
76844+#else
76845+ return 1;
76846+#endif
76847+}
76848+
76849+EXPORT_SYMBOL_GPL(gr_is_capable);
76850+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
76851+EXPORT_SYMBOL_GPL(gr_task_is_capable);
76852+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
76853diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
76854new file mode 100644
76855index 0000000..06cc6ea
76856--- /dev/null
76857+++ b/grsecurity/grsec_fifo.c
76858@@ -0,0 +1,24 @@
76859+#include <linux/kernel.h>
76860+#include <linux/sched.h>
76861+#include <linux/fs.h>
76862+#include <linux/file.h>
76863+#include <linux/grinternal.h>
76864+
76865+int
76866+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
76867+ const struct dentry *dir, const int flag, const int acc_mode)
76868+{
76869+#ifdef CONFIG_GRKERNSEC_FIFO
76870+ const struct cred *cred = current_cred();
76871+
76872+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
76873+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
76874+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
76875+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
76876+ if (!inode_permission(dentry->d_inode, acc_mode))
76877+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
76878+ return -EACCES;
76879+ }
76880+#endif
76881+ return 0;
76882+}
76883diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
76884new file mode 100644
76885index 0000000..8ca18bf
76886--- /dev/null
76887+++ b/grsecurity/grsec_fork.c
76888@@ -0,0 +1,23 @@
76889+#include <linux/kernel.h>
76890+#include <linux/sched.h>
76891+#include <linux/grsecurity.h>
76892+#include <linux/grinternal.h>
76893+#include <linux/errno.h>
76894+
76895+void
76896+gr_log_forkfail(const int retval)
76897+{
76898+#ifdef CONFIG_GRKERNSEC_FORKFAIL
76899+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
76900+ switch (retval) {
76901+ case -EAGAIN:
76902+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
76903+ break;
76904+ case -ENOMEM:
76905+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
76906+ break;
76907+ }
76908+ }
76909+#endif
76910+ return;
76911+}
76912diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
76913new file mode 100644
76914index 0000000..4ed9e7d
76915--- /dev/null
76916+++ b/grsecurity/grsec_init.c
76917@@ -0,0 +1,290 @@
76918+#include <linux/kernel.h>
76919+#include <linux/sched.h>
76920+#include <linux/mm.h>
76921+#include <linux/gracl.h>
76922+#include <linux/slab.h>
76923+#include <linux/vmalloc.h>
76924+#include <linux/percpu.h>
76925+#include <linux/module.h>
76926+
76927+int grsec_enable_ptrace_readexec;
76928+int grsec_enable_setxid;
76929+int grsec_enable_symlinkown;
76930+kgid_t grsec_symlinkown_gid;
76931+int grsec_enable_brute;
76932+int grsec_enable_link;
76933+int grsec_enable_dmesg;
76934+int grsec_enable_harden_ptrace;
76935+int grsec_enable_harden_ipc;
76936+int grsec_enable_fifo;
76937+int grsec_enable_execlog;
76938+int grsec_enable_signal;
76939+int grsec_enable_forkfail;
76940+int grsec_enable_audit_ptrace;
76941+int grsec_enable_time;
76942+int grsec_enable_group;
76943+kgid_t grsec_audit_gid;
76944+int grsec_enable_chdir;
76945+int grsec_enable_mount;
76946+int grsec_enable_rofs;
76947+int grsec_deny_new_usb;
76948+int grsec_enable_chroot_findtask;
76949+int grsec_enable_chroot_mount;
76950+int grsec_enable_chroot_shmat;
76951+int grsec_enable_chroot_fchdir;
76952+int grsec_enable_chroot_double;
76953+int grsec_enable_chroot_pivot;
76954+int grsec_enable_chroot_chdir;
76955+int grsec_enable_chroot_chmod;
76956+int grsec_enable_chroot_mknod;
76957+int grsec_enable_chroot_nice;
76958+int grsec_enable_chroot_execlog;
76959+int grsec_enable_chroot_caps;
76960+int grsec_enable_chroot_rename;
76961+int grsec_enable_chroot_sysctl;
76962+int grsec_enable_chroot_unix;
76963+int grsec_enable_tpe;
76964+kgid_t grsec_tpe_gid;
76965+int grsec_enable_blackhole;
76966+#ifdef CONFIG_IPV6_MODULE
76967+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
76968+#endif
76969+int grsec_lastack_retries;
76970+int grsec_enable_tpe_all;
76971+int grsec_enable_tpe_invert;
76972+int grsec_enable_socket_all;
76973+kgid_t grsec_socket_all_gid;
76974+int grsec_enable_socket_client;
76975+kgid_t grsec_socket_client_gid;
76976+int grsec_enable_socket_server;
76977+kgid_t grsec_socket_server_gid;
76978+int grsec_resource_logging;
76979+int grsec_disable_privio;
76980+int grsec_enable_log_rwxmaps;
76981+int grsec_lock;
76982+
76983+DEFINE_SPINLOCK(grsec_alert_lock);
76984+unsigned long grsec_alert_wtime = 0;
76985+unsigned long grsec_alert_fyet = 0;
76986+
76987+DEFINE_SPINLOCK(grsec_audit_lock);
76988+
76989+DEFINE_RWLOCK(grsec_exec_file_lock);
76990+
76991+char *gr_shared_page[4];
76992+
76993+char *gr_alert_log_fmt;
76994+char *gr_audit_log_fmt;
76995+char *gr_alert_log_buf;
76996+char *gr_audit_log_buf;
76997+
76998+extern struct gr_arg *gr_usermode;
76999+extern unsigned char *gr_system_salt;
77000+extern unsigned char *gr_system_sum;
77001+
77002+void __init
77003+grsecurity_init(void)
77004+{
77005+ int j;
77006+ /* create the per-cpu shared pages */
77007+
77008+#ifdef CONFIG_X86
77009+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77010+#endif
77011+
77012+ for (j = 0; j < 4; j++) {
77013+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77014+ if (gr_shared_page[j] == NULL) {
77015+ panic("Unable to allocate grsecurity shared page");
77016+ return;
77017+ }
77018+ }
77019+
77020+ /* allocate log buffers */
77021+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77022+ if (!gr_alert_log_fmt) {
77023+ panic("Unable to allocate grsecurity alert log format buffer");
77024+ return;
77025+ }
77026+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77027+ if (!gr_audit_log_fmt) {
77028+ panic("Unable to allocate grsecurity audit log format buffer");
77029+ return;
77030+ }
77031+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77032+ if (!gr_alert_log_buf) {
77033+ panic("Unable to allocate grsecurity alert log buffer");
77034+ return;
77035+ }
77036+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77037+ if (!gr_audit_log_buf) {
77038+ panic("Unable to allocate grsecurity audit log buffer");
77039+ return;
77040+ }
77041+
77042+ /* allocate memory for authentication structure */
77043+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77044+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77045+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77046+
77047+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77048+ panic("Unable to allocate grsecurity authentication structure");
77049+ return;
77050+ }
77051+
77052+#ifdef CONFIG_GRKERNSEC_IO
77053+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77054+ grsec_disable_privio = 1;
77055+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77056+ grsec_disable_privio = 1;
77057+#else
77058+ grsec_disable_privio = 0;
77059+#endif
77060+#endif
77061+
77062+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77063+ /* for backward compatibility, tpe_invert always defaults to on if
77064+ enabled in the kernel
77065+ */
77066+ grsec_enable_tpe_invert = 1;
77067+#endif
77068+
77069+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77070+#ifndef CONFIG_GRKERNSEC_SYSCTL
77071+ grsec_lock = 1;
77072+#endif
77073+
77074+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77075+ grsec_enable_log_rwxmaps = 1;
77076+#endif
77077+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77078+ grsec_enable_group = 1;
77079+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77080+#endif
77081+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77082+ grsec_enable_ptrace_readexec = 1;
77083+#endif
77084+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77085+ grsec_enable_chdir = 1;
77086+#endif
77087+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77088+ grsec_enable_harden_ptrace = 1;
77089+#endif
77090+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77091+ grsec_enable_harden_ipc = 1;
77092+#endif
77093+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77094+ grsec_enable_mount = 1;
77095+#endif
77096+#ifdef CONFIG_GRKERNSEC_LINK
77097+ grsec_enable_link = 1;
77098+#endif
77099+#ifdef CONFIG_GRKERNSEC_BRUTE
77100+ grsec_enable_brute = 1;
77101+#endif
77102+#ifdef CONFIG_GRKERNSEC_DMESG
77103+ grsec_enable_dmesg = 1;
77104+#endif
77105+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77106+ grsec_enable_blackhole = 1;
77107+ grsec_lastack_retries = 4;
77108+#endif
77109+#ifdef CONFIG_GRKERNSEC_FIFO
77110+ grsec_enable_fifo = 1;
77111+#endif
77112+#ifdef CONFIG_GRKERNSEC_EXECLOG
77113+ grsec_enable_execlog = 1;
77114+#endif
77115+#ifdef CONFIG_GRKERNSEC_SETXID
77116+ grsec_enable_setxid = 1;
77117+#endif
77118+#ifdef CONFIG_GRKERNSEC_SIGNAL
77119+ grsec_enable_signal = 1;
77120+#endif
77121+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77122+ grsec_enable_forkfail = 1;
77123+#endif
77124+#ifdef CONFIG_GRKERNSEC_TIME
77125+ grsec_enable_time = 1;
77126+#endif
77127+#ifdef CONFIG_GRKERNSEC_RESLOG
77128+ grsec_resource_logging = 1;
77129+#endif
77130+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77131+ grsec_enable_chroot_findtask = 1;
77132+#endif
77133+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77134+ grsec_enable_chroot_unix = 1;
77135+#endif
77136+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77137+ grsec_enable_chroot_mount = 1;
77138+#endif
77139+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77140+ grsec_enable_chroot_fchdir = 1;
77141+#endif
77142+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77143+ grsec_enable_chroot_shmat = 1;
77144+#endif
77145+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77146+ grsec_enable_audit_ptrace = 1;
77147+#endif
77148+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77149+ grsec_enable_chroot_double = 1;
77150+#endif
77151+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77152+ grsec_enable_chroot_pivot = 1;
77153+#endif
77154+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77155+ grsec_enable_chroot_chdir = 1;
77156+#endif
77157+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77158+ grsec_enable_chroot_chmod = 1;
77159+#endif
77160+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77161+ grsec_enable_chroot_mknod = 1;
77162+#endif
77163+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77164+ grsec_enable_chroot_nice = 1;
77165+#endif
77166+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77167+ grsec_enable_chroot_execlog = 1;
77168+#endif
77169+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77170+ grsec_enable_chroot_caps = 1;
77171+#endif
77172+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77173+ grsec_enable_chroot_rename = 1;
77174+#endif
77175+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77176+ grsec_enable_chroot_sysctl = 1;
77177+#endif
77178+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77179+ grsec_enable_symlinkown = 1;
77180+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77181+#endif
77182+#ifdef CONFIG_GRKERNSEC_TPE
77183+ grsec_enable_tpe = 1;
77184+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77185+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77186+ grsec_enable_tpe_all = 1;
77187+#endif
77188+#endif
77189+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77190+ grsec_enable_socket_all = 1;
77191+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77192+#endif
77193+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77194+ grsec_enable_socket_client = 1;
77195+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77196+#endif
77197+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77198+ grsec_enable_socket_server = 1;
77199+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77200+#endif
77201+#endif
77202+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77203+ grsec_deny_new_usb = 1;
77204+#endif
77205+
77206+ return;
77207+}
77208diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77209new file mode 100644
77210index 0000000..1773300
77211--- /dev/null
77212+++ b/grsecurity/grsec_ipc.c
77213@@ -0,0 +1,48 @@
77214+#include <linux/kernel.h>
77215+#include <linux/mm.h>
77216+#include <linux/sched.h>
77217+#include <linux/file.h>
77218+#include <linux/ipc.h>
77219+#include <linux/ipc_namespace.h>
77220+#include <linux/grsecurity.h>
77221+#include <linux/grinternal.h>
77222+
77223+int
77224+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77225+{
77226+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77227+ int write;
77228+ int orig_granted_mode;
77229+ kuid_t euid;
77230+ kgid_t egid;
77231+
77232+ if (!grsec_enable_harden_ipc)
77233+ return 1;
77234+
77235+ euid = current_euid();
77236+ egid = current_egid();
77237+
77238+ write = requested_mode & 00002;
77239+ orig_granted_mode = ipcp->mode;
77240+
77241+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77242+ orig_granted_mode >>= 6;
77243+ else {
77244+ /* if likely wrong permissions, lock to user */
77245+ if (orig_granted_mode & 0007)
77246+ orig_granted_mode = 0;
77247+ /* otherwise do a egid-only check */
77248+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77249+ orig_granted_mode >>= 3;
77250+ /* otherwise, no access */
77251+ else
77252+ orig_granted_mode = 0;
77253+ }
77254+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77255+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77256+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77257+ return 0;
77258+ }
77259+#endif
77260+ return 1;
77261+}
77262diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77263new file mode 100644
77264index 0000000..5e05e20
77265--- /dev/null
77266+++ b/grsecurity/grsec_link.c
77267@@ -0,0 +1,58 @@
77268+#include <linux/kernel.h>
77269+#include <linux/sched.h>
77270+#include <linux/fs.h>
77271+#include <linux/file.h>
77272+#include <linux/grinternal.h>
77273+
77274+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77275+{
77276+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77277+ const struct inode *link_inode = link->dentry->d_inode;
77278+
77279+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77280+ /* ignore root-owned links, e.g. /proc/self */
77281+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77282+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77283+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77284+ return 1;
77285+ }
77286+#endif
77287+ return 0;
77288+}
77289+
77290+int
77291+gr_handle_follow_link(const struct inode *parent,
77292+ const struct inode *inode,
77293+ const struct dentry *dentry, const struct vfsmount *mnt)
77294+{
77295+#ifdef CONFIG_GRKERNSEC_LINK
77296+ const struct cred *cred = current_cred();
77297+
77298+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77299+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77300+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77301+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77302+ return -EACCES;
77303+ }
77304+#endif
77305+ return 0;
77306+}
77307+
77308+int
77309+gr_handle_hardlink(const struct dentry *dentry,
77310+ const struct vfsmount *mnt,
77311+ struct inode *inode, const int mode, const struct filename *to)
77312+{
77313+#ifdef CONFIG_GRKERNSEC_LINK
77314+ const struct cred *cred = current_cred();
77315+
77316+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77317+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77318+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77319+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77320+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77321+ return -EPERM;
77322+ }
77323+#endif
77324+ return 0;
77325+}
77326diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77327new file mode 100644
77328index 0000000..dbe0a6b
77329--- /dev/null
77330+++ b/grsecurity/grsec_log.c
77331@@ -0,0 +1,341 @@
77332+#include <linux/kernel.h>
77333+#include <linux/sched.h>
77334+#include <linux/file.h>
77335+#include <linux/tty.h>
77336+#include <linux/fs.h>
77337+#include <linux/mm.h>
77338+#include <linux/grinternal.h>
77339+
77340+#ifdef CONFIG_TREE_PREEMPT_RCU
77341+#define DISABLE_PREEMPT() preempt_disable()
77342+#define ENABLE_PREEMPT() preempt_enable()
77343+#else
77344+#define DISABLE_PREEMPT()
77345+#define ENABLE_PREEMPT()
77346+#endif
77347+
77348+#define BEGIN_LOCKS(x) \
77349+ DISABLE_PREEMPT(); \
77350+ rcu_read_lock(); \
77351+ read_lock(&tasklist_lock); \
77352+ read_lock(&grsec_exec_file_lock); \
77353+ if (x != GR_DO_AUDIT) \
77354+ spin_lock(&grsec_alert_lock); \
77355+ else \
77356+ spin_lock(&grsec_audit_lock)
77357+
77358+#define END_LOCKS(x) \
77359+ if (x != GR_DO_AUDIT) \
77360+ spin_unlock(&grsec_alert_lock); \
77361+ else \
77362+ spin_unlock(&grsec_audit_lock); \
77363+ read_unlock(&grsec_exec_file_lock); \
77364+ read_unlock(&tasklist_lock); \
77365+ rcu_read_unlock(); \
77366+ ENABLE_PREEMPT(); \
77367+ if (x == GR_DONT_AUDIT) \
77368+ gr_handle_alertkill(current)
77369+
77370+enum {
77371+ FLOODING,
77372+ NO_FLOODING
77373+};
77374+
77375+extern char *gr_alert_log_fmt;
77376+extern char *gr_audit_log_fmt;
77377+extern char *gr_alert_log_buf;
77378+extern char *gr_audit_log_buf;
77379+
77380+static int gr_log_start(int audit)
77381+{
77382+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
77383+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
77384+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77385+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
77386+ unsigned long curr_secs = get_seconds();
77387+
77388+ if (audit == GR_DO_AUDIT)
77389+ goto set_fmt;
77390+
77391+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
77392+ grsec_alert_wtime = curr_secs;
77393+ grsec_alert_fyet = 0;
77394+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
77395+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
77396+ grsec_alert_fyet++;
77397+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
77398+ grsec_alert_wtime = curr_secs;
77399+ grsec_alert_fyet++;
77400+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
77401+ return FLOODING;
77402+ }
77403+ else return FLOODING;
77404+
77405+set_fmt:
77406+#endif
77407+ memset(buf, 0, PAGE_SIZE);
77408+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
77409+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
77410+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77411+ } else if (current->signal->curr_ip) {
77412+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
77413+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
77414+ } else if (gr_acl_is_enabled()) {
77415+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
77416+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77417+ } else {
77418+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
77419+ strcpy(buf, fmt);
77420+ }
77421+
77422+ return NO_FLOODING;
77423+}
77424+
77425+static void gr_log_middle(int audit, const char *msg, va_list ap)
77426+ __attribute__ ((format (printf, 2, 0)));
77427+
77428+static void gr_log_middle(int audit, const char *msg, va_list ap)
77429+{
77430+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77431+ unsigned int len = strlen(buf);
77432+
77433+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77434+
77435+ return;
77436+}
77437+
77438+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77439+ __attribute__ ((format (printf, 2, 3)));
77440+
77441+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77442+{
77443+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77444+ unsigned int len = strlen(buf);
77445+ va_list ap;
77446+
77447+ va_start(ap, msg);
77448+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77449+ va_end(ap);
77450+
77451+ return;
77452+}
77453+
77454+static void gr_log_end(int audit, int append_default)
77455+{
77456+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77457+ if (append_default) {
77458+ struct task_struct *task = current;
77459+ struct task_struct *parent = task->real_parent;
77460+ const struct cred *cred = __task_cred(task);
77461+ const struct cred *pcred = __task_cred(parent);
77462+ unsigned int len = strlen(buf);
77463+
77464+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77465+ }
77466+
77467+ printk("%s\n", buf);
77468+
77469+ return;
77470+}
77471+
77472+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
77473+{
77474+ int logtype;
77475+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
77476+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
77477+ void *voidptr = NULL;
77478+ int num1 = 0, num2 = 0;
77479+ unsigned long ulong1 = 0, ulong2 = 0;
77480+ struct dentry *dentry = NULL;
77481+ struct vfsmount *mnt = NULL;
77482+ struct file *file = NULL;
77483+ struct task_struct *task = NULL;
77484+ struct vm_area_struct *vma = NULL;
77485+ const struct cred *cred, *pcred;
77486+ va_list ap;
77487+
77488+ BEGIN_LOCKS(audit);
77489+ logtype = gr_log_start(audit);
77490+ if (logtype == FLOODING) {
77491+ END_LOCKS(audit);
77492+ return;
77493+ }
77494+ va_start(ap, argtypes);
77495+ switch (argtypes) {
77496+ case GR_TTYSNIFF:
77497+ task = va_arg(ap, struct task_struct *);
77498+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
77499+ break;
77500+ case GR_SYSCTL_HIDDEN:
77501+ str1 = va_arg(ap, char *);
77502+ gr_log_middle_varargs(audit, msg, result, str1);
77503+ break;
77504+ case GR_RBAC:
77505+ dentry = va_arg(ap, struct dentry *);
77506+ mnt = va_arg(ap, struct vfsmount *);
77507+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
77508+ break;
77509+ case GR_RBAC_STR:
77510+ dentry = va_arg(ap, struct dentry *);
77511+ mnt = va_arg(ap, struct vfsmount *);
77512+ str1 = va_arg(ap, char *);
77513+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
77514+ break;
77515+ case GR_STR_RBAC:
77516+ str1 = va_arg(ap, char *);
77517+ dentry = va_arg(ap, struct dentry *);
77518+ mnt = va_arg(ap, struct vfsmount *);
77519+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
77520+ break;
77521+ case GR_RBAC_MODE2:
77522+ dentry = va_arg(ap, struct dentry *);
77523+ mnt = va_arg(ap, struct vfsmount *);
77524+ str1 = va_arg(ap, char *);
77525+ str2 = va_arg(ap, char *);
77526+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
77527+ break;
77528+ case GR_RBAC_MODE3:
77529+ dentry = va_arg(ap, struct dentry *);
77530+ mnt = va_arg(ap, struct vfsmount *);
77531+ str1 = va_arg(ap, char *);
77532+ str2 = va_arg(ap, char *);
77533+ str3 = va_arg(ap, char *);
77534+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
77535+ break;
77536+ case GR_FILENAME:
77537+ dentry = va_arg(ap, struct dentry *);
77538+ mnt = va_arg(ap, struct vfsmount *);
77539+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
77540+ break;
77541+ case GR_STR_FILENAME:
77542+ str1 = va_arg(ap, char *);
77543+ dentry = va_arg(ap, struct dentry *);
77544+ mnt = va_arg(ap, struct vfsmount *);
77545+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
77546+ break;
77547+ case GR_FILENAME_STR:
77548+ dentry = va_arg(ap, struct dentry *);
77549+ mnt = va_arg(ap, struct vfsmount *);
77550+ str1 = va_arg(ap, char *);
77551+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
77552+ break;
77553+ case GR_FILENAME_TWO_INT:
77554+ dentry = va_arg(ap, struct dentry *);
77555+ mnt = va_arg(ap, struct vfsmount *);
77556+ num1 = va_arg(ap, int);
77557+ num2 = va_arg(ap, int);
77558+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
77559+ break;
77560+ case GR_FILENAME_TWO_INT_STR:
77561+ dentry = va_arg(ap, struct dentry *);
77562+ mnt = va_arg(ap, struct vfsmount *);
77563+ num1 = va_arg(ap, int);
77564+ num2 = va_arg(ap, int);
77565+ str1 = va_arg(ap, char *);
77566+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
77567+ break;
77568+ case GR_TEXTREL:
77569+ file = va_arg(ap, struct file *);
77570+ ulong1 = va_arg(ap, unsigned long);
77571+ ulong2 = va_arg(ap, unsigned long);
77572+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
77573+ break;
77574+ case GR_PTRACE:
77575+ task = va_arg(ap, struct task_struct *);
77576+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
77577+ break;
77578+ case GR_RESOURCE:
77579+ task = va_arg(ap, struct task_struct *);
77580+ cred = __task_cred(task);
77581+ pcred = __task_cred(task->real_parent);
77582+ ulong1 = va_arg(ap, unsigned long);
77583+ str1 = va_arg(ap, char *);
77584+ ulong2 = va_arg(ap, unsigned long);
77585+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77586+ break;
77587+ case GR_CAP:
77588+ task = va_arg(ap, struct task_struct *);
77589+ cred = __task_cred(task);
77590+ pcred = __task_cred(task->real_parent);
77591+ str1 = va_arg(ap, char *);
77592+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77593+ break;
77594+ case GR_SIG:
77595+ str1 = va_arg(ap, char *);
77596+ voidptr = va_arg(ap, void *);
77597+ gr_log_middle_varargs(audit, msg, str1, voidptr);
77598+ break;
77599+ case GR_SIG2:
77600+ task = va_arg(ap, struct task_struct *);
77601+ cred = __task_cred(task);
77602+ pcred = __task_cred(task->real_parent);
77603+ num1 = va_arg(ap, int);
77604+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77605+ break;
77606+ case GR_CRASH1:
77607+ task = va_arg(ap, struct task_struct *);
77608+ cred = __task_cred(task);
77609+ pcred = __task_cred(task->real_parent);
77610+ ulong1 = va_arg(ap, unsigned long);
77611+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
77612+ break;
77613+ case GR_CRASH2:
77614+ task = va_arg(ap, struct task_struct *);
77615+ cred = __task_cred(task);
77616+ pcred = __task_cred(task->real_parent);
77617+ ulong1 = va_arg(ap, unsigned long);
77618+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
77619+ break;
77620+ case GR_RWXMAP:
77621+ file = va_arg(ap, struct file *);
77622+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
77623+ break;
77624+ case GR_RWXMAPVMA:
77625+ vma = va_arg(ap, struct vm_area_struct *);
77626+ if (vma->vm_file)
77627+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
77628+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
77629+ str1 = "<stack>";
77630+ else if (vma->vm_start <= current->mm->brk &&
77631+ vma->vm_end >= current->mm->start_brk)
77632+ str1 = "<heap>";
77633+ else
77634+ str1 = "<anonymous mapping>";
77635+ gr_log_middle_varargs(audit, msg, str1);
77636+ break;
77637+ case GR_PSACCT:
77638+ {
77639+ unsigned int wday, cday;
77640+ __u8 whr, chr;
77641+ __u8 wmin, cmin;
77642+ __u8 wsec, csec;
77643+ char cur_tty[64] = { 0 };
77644+ char parent_tty[64] = { 0 };
77645+
77646+ task = va_arg(ap, struct task_struct *);
77647+ wday = va_arg(ap, unsigned int);
77648+ cday = va_arg(ap, unsigned int);
77649+ whr = va_arg(ap, int);
77650+ chr = va_arg(ap, int);
77651+ wmin = va_arg(ap, int);
77652+ cmin = va_arg(ap, int);
77653+ wsec = va_arg(ap, int);
77654+ csec = va_arg(ap, int);
77655+ ulong1 = va_arg(ap, unsigned long);
77656+ cred = __task_cred(task);
77657+ pcred = __task_cred(task->real_parent);
77658+
77659+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77660+ }
77661+ break;
77662+ default:
77663+ gr_log_middle(audit, msg, ap);
77664+ }
77665+ va_end(ap);
77666+ // these don't need DEFAULTSECARGS printed on the end
77667+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
77668+ gr_log_end(audit, 0);
77669+ else
77670+ gr_log_end(audit, 1);
77671+ END_LOCKS(audit);
77672+}
77673diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
77674new file mode 100644
77675index 0000000..0e39d8c
77676--- /dev/null
77677+++ b/grsecurity/grsec_mem.c
77678@@ -0,0 +1,48 @@
77679+#include <linux/kernel.h>
77680+#include <linux/sched.h>
77681+#include <linux/mm.h>
77682+#include <linux/mman.h>
77683+#include <linux/module.h>
77684+#include <linux/grinternal.h>
77685+
77686+void gr_handle_msr_write(void)
77687+{
77688+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
77689+ return;
77690+}
77691+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
77692+
77693+void
77694+gr_handle_ioperm(void)
77695+{
77696+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
77697+ return;
77698+}
77699+
77700+void
77701+gr_handle_iopl(void)
77702+{
77703+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
77704+ return;
77705+}
77706+
77707+void
77708+gr_handle_mem_readwrite(u64 from, u64 to)
77709+{
77710+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
77711+ return;
77712+}
77713+
77714+void
77715+gr_handle_vm86(void)
77716+{
77717+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
77718+ return;
77719+}
77720+
77721+void
77722+gr_log_badprocpid(const char *entry)
77723+{
77724+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
77725+ return;
77726+}
77727diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
77728new file mode 100644
77729index 0000000..6f9eb73
77730--- /dev/null
77731+++ b/grsecurity/grsec_mount.c
77732@@ -0,0 +1,65 @@
77733+#include <linux/kernel.h>
77734+#include <linux/sched.h>
77735+#include <linux/mount.h>
77736+#include <linux/major.h>
77737+#include <linux/grsecurity.h>
77738+#include <linux/grinternal.h>
77739+
77740+void
77741+gr_log_remount(const char *devname, const int retval)
77742+{
77743+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77744+ if (grsec_enable_mount && (retval >= 0))
77745+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
77746+#endif
77747+ return;
77748+}
77749+
77750+void
77751+gr_log_unmount(const char *devname, const int retval)
77752+{
77753+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77754+ if (grsec_enable_mount && (retval >= 0))
77755+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
77756+#endif
77757+ return;
77758+}
77759+
77760+void
77761+gr_log_mount(const char *from, struct path *to, const int retval)
77762+{
77763+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77764+ if (grsec_enable_mount && (retval >= 0))
77765+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
77766+#endif
77767+ return;
77768+}
77769+
77770+int
77771+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
77772+{
77773+#ifdef CONFIG_GRKERNSEC_ROFS
77774+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
77775+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
77776+ return -EPERM;
77777+ } else
77778+ return 0;
77779+#endif
77780+ return 0;
77781+}
77782+
77783+int
77784+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
77785+{
77786+#ifdef CONFIG_GRKERNSEC_ROFS
77787+ struct inode *inode = dentry->d_inode;
77788+
77789+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
77790+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
77791+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
77792+ return -EPERM;
77793+ } else
77794+ return 0;
77795+#endif
77796+ return 0;
77797+}
77798diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
77799new file mode 100644
77800index 0000000..6ee9d50
77801--- /dev/null
77802+++ b/grsecurity/grsec_pax.c
77803@@ -0,0 +1,45 @@
77804+#include <linux/kernel.h>
77805+#include <linux/sched.h>
77806+#include <linux/mm.h>
77807+#include <linux/file.h>
77808+#include <linux/grinternal.h>
77809+#include <linux/grsecurity.h>
77810+
77811+void
77812+gr_log_textrel(struct vm_area_struct * vma)
77813+{
77814+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77815+ if (grsec_enable_log_rwxmaps)
77816+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
77817+#endif
77818+ return;
77819+}
77820+
77821+void gr_log_ptgnustack(struct file *file)
77822+{
77823+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77824+ if (grsec_enable_log_rwxmaps)
77825+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
77826+#endif
77827+ return;
77828+}
77829+
77830+void
77831+gr_log_rwxmmap(struct file *file)
77832+{
77833+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77834+ if (grsec_enable_log_rwxmaps)
77835+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
77836+#endif
77837+ return;
77838+}
77839+
77840+void
77841+gr_log_rwxmprotect(struct vm_area_struct *vma)
77842+{
77843+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77844+ if (grsec_enable_log_rwxmaps)
77845+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
77846+#endif
77847+ return;
77848+}
77849diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
77850new file mode 100644
77851index 0000000..2005a3a
77852--- /dev/null
77853+++ b/grsecurity/grsec_proc.c
77854@@ -0,0 +1,20 @@
77855+#include <linux/kernel.h>
77856+#include <linux/sched.h>
77857+#include <linux/grsecurity.h>
77858+#include <linux/grinternal.h>
77859+
77860+int gr_proc_is_restricted(void)
77861+{
77862+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77863+ const struct cred *cred = current_cred();
77864+#endif
77865+
77866+#ifdef CONFIG_GRKERNSEC_PROC_USER
77867+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
77868+ return -EACCES;
77869+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77870+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
77871+ return -EACCES;
77872+#endif
77873+ return 0;
77874+}
77875diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
77876new file mode 100644
77877index 0000000..f7f29aa
77878--- /dev/null
77879+++ b/grsecurity/grsec_ptrace.c
77880@@ -0,0 +1,30 @@
77881+#include <linux/kernel.h>
77882+#include <linux/sched.h>
77883+#include <linux/grinternal.h>
77884+#include <linux/security.h>
77885+
77886+void
77887+gr_audit_ptrace(struct task_struct *task)
77888+{
77889+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77890+ if (grsec_enable_audit_ptrace)
77891+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
77892+#endif
77893+ return;
77894+}
77895+
77896+int
77897+gr_ptrace_readexec(struct file *file, int unsafe_flags)
77898+{
77899+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77900+ const struct dentry *dentry = file->f_path.dentry;
77901+ const struct vfsmount *mnt = file->f_path.mnt;
77902+
77903+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
77904+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
77905+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
77906+ return -EACCES;
77907+ }
77908+#endif
77909+ return 0;
77910+}
77911diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
77912new file mode 100644
77913index 0000000..3860c7e
77914--- /dev/null
77915+++ b/grsecurity/grsec_sig.c
77916@@ -0,0 +1,236 @@
77917+#include <linux/kernel.h>
77918+#include <linux/sched.h>
77919+#include <linux/fs.h>
77920+#include <linux/delay.h>
77921+#include <linux/grsecurity.h>
77922+#include <linux/grinternal.h>
77923+#include <linux/hardirq.h>
77924+
77925+char *signames[] = {
77926+ [SIGSEGV] = "Segmentation fault",
77927+ [SIGILL] = "Illegal instruction",
77928+ [SIGABRT] = "Abort",
77929+ [SIGBUS] = "Invalid alignment/Bus error"
77930+};
77931+
77932+void
77933+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
77934+{
77935+#ifdef CONFIG_GRKERNSEC_SIGNAL
77936+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
77937+ (sig == SIGABRT) || (sig == SIGBUS))) {
77938+ if (task_pid_nr(t) == task_pid_nr(current)) {
77939+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
77940+ } else {
77941+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
77942+ }
77943+ }
77944+#endif
77945+ return;
77946+}
77947+
77948+int
77949+gr_handle_signal(const struct task_struct *p, const int sig)
77950+{
77951+#ifdef CONFIG_GRKERNSEC
77952+ /* ignore the 0 signal for protected task checks */
77953+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
77954+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
77955+ return -EPERM;
77956+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
77957+ return -EPERM;
77958+ }
77959+#endif
77960+ return 0;
77961+}
77962+
77963+#ifdef CONFIG_GRKERNSEC
77964+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
77965+
77966+int gr_fake_force_sig(int sig, struct task_struct *t)
77967+{
77968+ unsigned long int flags;
77969+ int ret, blocked, ignored;
77970+ struct k_sigaction *action;
77971+
77972+ spin_lock_irqsave(&t->sighand->siglock, flags);
77973+ action = &t->sighand->action[sig-1];
77974+ ignored = action->sa.sa_handler == SIG_IGN;
77975+ blocked = sigismember(&t->blocked, sig);
77976+ if (blocked || ignored) {
77977+ action->sa.sa_handler = SIG_DFL;
77978+ if (blocked) {
77979+ sigdelset(&t->blocked, sig);
77980+ recalc_sigpending_and_wake(t);
77981+ }
77982+ }
77983+ if (action->sa.sa_handler == SIG_DFL)
77984+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
77985+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
77986+
77987+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
77988+
77989+ return ret;
77990+}
77991+#endif
77992+
77993+#define GR_USER_BAN_TIME (15 * 60)
77994+#define GR_DAEMON_BRUTE_TIME (30 * 60)
77995+
77996+void gr_handle_brute_attach(int dumpable)
77997+{
77998+#ifdef CONFIG_GRKERNSEC_BRUTE
77999+ struct task_struct *p = current;
78000+ kuid_t uid = GLOBAL_ROOT_UID;
78001+ int daemon = 0;
78002+
78003+ if (!grsec_enable_brute)
78004+ return;
78005+
78006+ rcu_read_lock();
78007+ read_lock(&tasklist_lock);
78008+ read_lock(&grsec_exec_file_lock);
78009+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78010+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78011+ p->real_parent->brute = 1;
78012+ daemon = 1;
78013+ } else {
78014+ const struct cred *cred = __task_cred(p), *cred2;
78015+ struct task_struct *tsk, *tsk2;
78016+
78017+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78018+ struct user_struct *user;
78019+
78020+ uid = cred->uid;
78021+
78022+ /* this is put upon execution past expiration */
78023+ user = find_user(uid);
78024+ if (user == NULL)
78025+ goto unlock;
78026+ user->suid_banned = 1;
78027+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78028+ if (user->suid_ban_expires == ~0UL)
78029+ user->suid_ban_expires--;
78030+
78031+ /* only kill other threads of the same binary, from the same user */
78032+ do_each_thread(tsk2, tsk) {
78033+ cred2 = __task_cred(tsk);
78034+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78035+ gr_fake_force_sig(SIGKILL, tsk);
78036+ } while_each_thread(tsk2, tsk);
78037+ }
78038+ }
78039+unlock:
78040+ read_unlock(&grsec_exec_file_lock);
78041+ read_unlock(&tasklist_lock);
78042+ rcu_read_unlock();
78043+
78044+ if (gr_is_global_nonroot(uid))
78045+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78046+ else if (daemon)
78047+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78048+
78049+#endif
78050+ return;
78051+}
78052+
78053+void gr_handle_brute_check(void)
78054+{
78055+#ifdef CONFIG_GRKERNSEC_BRUTE
78056+ struct task_struct *p = current;
78057+
78058+ if (unlikely(p->brute)) {
78059+ if (!grsec_enable_brute)
78060+ p->brute = 0;
78061+ else if (time_before(get_seconds(), p->brute_expires))
78062+ msleep(30 * 1000);
78063+ }
78064+#endif
78065+ return;
78066+}
78067+
78068+void gr_handle_kernel_exploit(void)
78069+{
78070+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78071+ const struct cred *cred;
78072+ struct task_struct *tsk, *tsk2;
78073+ struct user_struct *user;
78074+ kuid_t uid;
78075+
78076+ if (in_irq() || in_serving_softirq() || in_nmi())
78077+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78078+
78079+ uid = current_uid();
78080+
78081+ if (gr_is_global_root(uid))
78082+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78083+ else {
78084+ /* kill all the processes of this user, hold a reference
78085+ to their creds struct, and prevent them from creating
78086+ another process until system reset
78087+ */
78088+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78089+ GR_GLOBAL_UID(uid));
78090+ /* we intentionally leak this ref */
78091+ user = get_uid(current->cred->user);
78092+ if (user)
78093+ user->kernel_banned = 1;
78094+
78095+ /* kill all processes of this user */
78096+ read_lock(&tasklist_lock);
78097+ do_each_thread(tsk2, tsk) {
78098+ cred = __task_cred(tsk);
78099+ if (uid_eq(cred->uid, uid))
78100+ gr_fake_force_sig(SIGKILL, tsk);
78101+ } while_each_thread(tsk2, tsk);
78102+ read_unlock(&tasklist_lock);
78103+ }
78104+#endif
78105+}
78106+
78107+#ifdef CONFIG_GRKERNSEC_BRUTE
78108+static bool suid_ban_expired(struct user_struct *user)
78109+{
78110+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78111+ user->suid_banned = 0;
78112+ user->suid_ban_expires = 0;
78113+ free_uid(user);
78114+ return true;
78115+ }
78116+
78117+ return false;
78118+}
78119+#endif
78120+
78121+int gr_process_kernel_exec_ban(void)
78122+{
78123+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78124+ if (unlikely(current->cred->user->kernel_banned))
78125+ return -EPERM;
78126+#endif
78127+ return 0;
78128+}
78129+
78130+int gr_process_kernel_setuid_ban(struct user_struct *user)
78131+{
78132+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78133+ if (unlikely(user->kernel_banned))
78134+ gr_fake_force_sig(SIGKILL, current);
78135+#endif
78136+ return 0;
78137+}
78138+
78139+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78140+{
78141+#ifdef CONFIG_GRKERNSEC_BRUTE
78142+ struct user_struct *user = current->cred->user;
78143+ if (unlikely(user->suid_banned)) {
78144+ if (suid_ban_expired(user))
78145+ return 0;
78146+ /* disallow execution of suid binaries only */
78147+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78148+ return -EPERM;
78149+ }
78150+#endif
78151+ return 0;
78152+}
78153diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78154new file mode 100644
78155index 0000000..e3650b6
78156--- /dev/null
78157+++ b/grsecurity/grsec_sock.c
78158@@ -0,0 +1,244 @@
78159+#include <linux/kernel.h>
78160+#include <linux/module.h>
78161+#include <linux/sched.h>
78162+#include <linux/file.h>
78163+#include <linux/net.h>
78164+#include <linux/in.h>
78165+#include <linux/ip.h>
78166+#include <net/sock.h>
78167+#include <net/inet_sock.h>
78168+#include <linux/grsecurity.h>
78169+#include <linux/grinternal.h>
78170+#include <linux/gracl.h>
78171+
78172+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78173+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78174+
78175+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78176+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78177+
78178+#ifdef CONFIG_UNIX_MODULE
78179+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78180+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78181+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78182+EXPORT_SYMBOL_GPL(gr_handle_create);
78183+#endif
78184+
78185+#ifdef CONFIG_GRKERNSEC
78186+#define gr_conn_table_size 32749
78187+struct conn_table_entry {
78188+ struct conn_table_entry *next;
78189+ struct signal_struct *sig;
78190+};
78191+
78192+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78193+DEFINE_SPINLOCK(gr_conn_table_lock);
78194+
78195+extern const char * gr_socktype_to_name(unsigned char type);
78196+extern const char * gr_proto_to_name(unsigned char proto);
78197+extern const char * gr_sockfamily_to_name(unsigned char family);
78198+
78199+static __inline__ int
78200+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78201+{
78202+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78203+}
78204+
78205+static __inline__ int
78206+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78207+ __u16 sport, __u16 dport)
78208+{
78209+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78210+ sig->gr_sport == sport && sig->gr_dport == dport))
78211+ return 1;
78212+ else
78213+ return 0;
78214+}
78215+
78216+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78217+{
78218+ struct conn_table_entry **match;
78219+ unsigned int index;
78220+
78221+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78222+ sig->gr_sport, sig->gr_dport,
78223+ gr_conn_table_size);
78224+
78225+ newent->sig = sig;
78226+
78227+ match = &gr_conn_table[index];
78228+ newent->next = *match;
78229+ *match = newent;
78230+
78231+ return;
78232+}
78233+
78234+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78235+{
78236+ struct conn_table_entry *match, *last = NULL;
78237+ unsigned int index;
78238+
78239+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78240+ sig->gr_sport, sig->gr_dport,
78241+ gr_conn_table_size);
78242+
78243+ match = gr_conn_table[index];
78244+ while (match && !conn_match(match->sig,
78245+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78246+ sig->gr_dport)) {
78247+ last = match;
78248+ match = match->next;
78249+ }
78250+
78251+ if (match) {
78252+ if (last)
78253+ last->next = match->next;
78254+ else
78255+ gr_conn_table[index] = NULL;
78256+ kfree(match);
78257+ }
78258+
78259+ return;
78260+}
78261+
78262+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78263+ __u16 sport, __u16 dport)
78264+{
78265+ struct conn_table_entry *match;
78266+ unsigned int index;
78267+
78268+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78269+
78270+ match = gr_conn_table[index];
78271+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78272+ match = match->next;
78273+
78274+ if (match)
78275+ return match->sig;
78276+ else
78277+ return NULL;
78278+}
78279+
78280+#endif
78281+
78282+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78283+{
78284+#ifdef CONFIG_GRKERNSEC
78285+ struct signal_struct *sig = current->signal;
78286+ struct conn_table_entry *newent;
78287+
78288+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78289+ if (newent == NULL)
78290+ return;
78291+ /* no bh lock needed since we are called with bh disabled */
78292+ spin_lock(&gr_conn_table_lock);
78293+ gr_del_task_from_ip_table_nolock(sig);
78294+ sig->gr_saddr = inet->inet_rcv_saddr;
78295+ sig->gr_daddr = inet->inet_daddr;
78296+ sig->gr_sport = inet->inet_sport;
78297+ sig->gr_dport = inet->inet_dport;
78298+ gr_add_to_task_ip_table_nolock(sig, newent);
78299+ spin_unlock(&gr_conn_table_lock);
78300+#endif
78301+ return;
78302+}
78303+
78304+void gr_del_task_from_ip_table(struct task_struct *task)
78305+{
78306+#ifdef CONFIG_GRKERNSEC
78307+ spin_lock_bh(&gr_conn_table_lock);
78308+ gr_del_task_from_ip_table_nolock(task->signal);
78309+ spin_unlock_bh(&gr_conn_table_lock);
78310+#endif
78311+ return;
78312+}
78313+
78314+void
78315+gr_attach_curr_ip(const struct sock *sk)
78316+{
78317+#ifdef CONFIG_GRKERNSEC
78318+ struct signal_struct *p, *set;
78319+ const struct inet_sock *inet = inet_sk(sk);
78320+
78321+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78322+ return;
78323+
78324+ set = current->signal;
78325+
78326+ spin_lock_bh(&gr_conn_table_lock);
78327+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78328+ inet->inet_dport, inet->inet_sport);
78329+ if (unlikely(p != NULL)) {
78330+ set->curr_ip = p->curr_ip;
78331+ set->used_accept = 1;
78332+ gr_del_task_from_ip_table_nolock(p);
78333+ spin_unlock_bh(&gr_conn_table_lock);
78334+ return;
78335+ }
78336+ spin_unlock_bh(&gr_conn_table_lock);
78337+
78338+ set->curr_ip = inet->inet_daddr;
78339+ set->used_accept = 1;
78340+#endif
78341+ return;
78342+}
78343+
78344+int
78345+gr_handle_sock_all(const int family, const int type, const int protocol)
78346+{
78347+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78348+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
78349+ (family != AF_UNIX)) {
78350+ if (family == AF_INET)
78351+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
78352+ else
78353+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
78354+ return -EACCES;
78355+ }
78356+#endif
78357+ return 0;
78358+}
78359+
78360+int
78361+gr_handle_sock_server(const struct sockaddr *sck)
78362+{
78363+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78364+ if (grsec_enable_socket_server &&
78365+ in_group_p(grsec_socket_server_gid) &&
78366+ sck && (sck->sa_family != AF_UNIX) &&
78367+ (sck->sa_family != AF_LOCAL)) {
78368+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78369+ return -EACCES;
78370+ }
78371+#endif
78372+ return 0;
78373+}
78374+
78375+int
78376+gr_handle_sock_server_other(const struct sock *sck)
78377+{
78378+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78379+ if (grsec_enable_socket_server &&
78380+ in_group_p(grsec_socket_server_gid) &&
78381+ sck && (sck->sk_family != AF_UNIX) &&
78382+ (sck->sk_family != AF_LOCAL)) {
78383+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78384+ return -EACCES;
78385+ }
78386+#endif
78387+ return 0;
78388+}
78389+
78390+int
78391+gr_handle_sock_client(const struct sockaddr *sck)
78392+{
78393+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78394+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
78395+ sck && (sck->sa_family != AF_UNIX) &&
78396+ (sck->sa_family != AF_LOCAL)) {
78397+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
78398+ return -EACCES;
78399+ }
78400+#endif
78401+ return 0;
78402+}
78403diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
78404new file mode 100644
78405index 0000000..cce889e
78406--- /dev/null
78407+++ b/grsecurity/grsec_sysctl.c
78408@@ -0,0 +1,488 @@
78409+#include <linux/kernel.h>
78410+#include <linux/sched.h>
78411+#include <linux/sysctl.h>
78412+#include <linux/grsecurity.h>
78413+#include <linux/grinternal.h>
78414+
78415+int
78416+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
78417+{
78418+#ifdef CONFIG_GRKERNSEC_SYSCTL
78419+ if (dirname == NULL || name == NULL)
78420+ return 0;
78421+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
78422+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
78423+ return -EACCES;
78424+ }
78425+#endif
78426+ return 0;
78427+}
78428+
78429+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
78430+static int __maybe_unused __read_only one = 1;
78431+#endif
78432+
78433+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
78434+ defined(CONFIG_GRKERNSEC_DENYUSB)
78435+struct ctl_table grsecurity_table[] = {
78436+#ifdef CONFIG_GRKERNSEC_SYSCTL
78437+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
78438+#ifdef CONFIG_GRKERNSEC_IO
78439+ {
78440+ .procname = "disable_priv_io",
78441+ .data = &grsec_disable_privio,
78442+ .maxlen = sizeof(int),
78443+ .mode = 0600,
78444+ .proc_handler = &proc_dointvec,
78445+ },
78446+#endif
78447+#endif
78448+#ifdef CONFIG_GRKERNSEC_LINK
78449+ {
78450+ .procname = "linking_restrictions",
78451+ .data = &grsec_enable_link,
78452+ .maxlen = sizeof(int),
78453+ .mode = 0600,
78454+ .proc_handler = &proc_dointvec,
78455+ },
78456+#endif
78457+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78458+ {
78459+ .procname = "enforce_symlinksifowner",
78460+ .data = &grsec_enable_symlinkown,
78461+ .maxlen = sizeof(int),
78462+ .mode = 0600,
78463+ .proc_handler = &proc_dointvec,
78464+ },
78465+ {
78466+ .procname = "symlinkown_gid",
78467+ .data = &grsec_symlinkown_gid,
78468+ .maxlen = sizeof(int),
78469+ .mode = 0600,
78470+ .proc_handler = &proc_dointvec,
78471+ },
78472+#endif
78473+#ifdef CONFIG_GRKERNSEC_BRUTE
78474+ {
78475+ .procname = "deter_bruteforce",
78476+ .data = &grsec_enable_brute,
78477+ .maxlen = sizeof(int),
78478+ .mode = 0600,
78479+ .proc_handler = &proc_dointvec,
78480+ },
78481+#endif
78482+#ifdef CONFIG_GRKERNSEC_FIFO
78483+ {
78484+ .procname = "fifo_restrictions",
78485+ .data = &grsec_enable_fifo,
78486+ .maxlen = sizeof(int),
78487+ .mode = 0600,
78488+ .proc_handler = &proc_dointvec,
78489+ },
78490+#endif
78491+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78492+ {
78493+ .procname = "ptrace_readexec",
78494+ .data = &grsec_enable_ptrace_readexec,
78495+ .maxlen = sizeof(int),
78496+ .mode = 0600,
78497+ .proc_handler = &proc_dointvec,
78498+ },
78499+#endif
78500+#ifdef CONFIG_GRKERNSEC_SETXID
78501+ {
78502+ .procname = "consistent_setxid",
78503+ .data = &grsec_enable_setxid,
78504+ .maxlen = sizeof(int),
78505+ .mode = 0600,
78506+ .proc_handler = &proc_dointvec,
78507+ },
78508+#endif
78509+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78510+ {
78511+ .procname = "ip_blackhole",
78512+ .data = &grsec_enable_blackhole,
78513+ .maxlen = sizeof(int),
78514+ .mode = 0600,
78515+ .proc_handler = &proc_dointvec,
78516+ },
78517+ {
78518+ .procname = "lastack_retries",
78519+ .data = &grsec_lastack_retries,
78520+ .maxlen = sizeof(int),
78521+ .mode = 0600,
78522+ .proc_handler = &proc_dointvec,
78523+ },
78524+#endif
78525+#ifdef CONFIG_GRKERNSEC_EXECLOG
78526+ {
78527+ .procname = "exec_logging",
78528+ .data = &grsec_enable_execlog,
78529+ .maxlen = sizeof(int),
78530+ .mode = 0600,
78531+ .proc_handler = &proc_dointvec,
78532+ },
78533+#endif
78534+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78535+ {
78536+ .procname = "rwxmap_logging",
78537+ .data = &grsec_enable_log_rwxmaps,
78538+ .maxlen = sizeof(int),
78539+ .mode = 0600,
78540+ .proc_handler = &proc_dointvec,
78541+ },
78542+#endif
78543+#ifdef CONFIG_GRKERNSEC_SIGNAL
78544+ {
78545+ .procname = "signal_logging",
78546+ .data = &grsec_enable_signal,
78547+ .maxlen = sizeof(int),
78548+ .mode = 0600,
78549+ .proc_handler = &proc_dointvec,
78550+ },
78551+#endif
78552+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78553+ {
78554+ .procname = "forkfail_logging",
78555+ .data = &grsec_enable_forkfail,
78556+ .maxlen = sizeof(int),
78557+ .mode = 0600,
78558+ .proc_handler = &proc_dointvec,
78559+ },
78560+#endif
78561+#ifdef CONFIG_GRKERNSEC_TIME
78562+ {
78563+ .procname = "timechange_logging",
78564+ .data = &grsec_enable_time,
78565+ .maxlen = sizeof(int),
78566+ .mode = 0600,
78567+ .proc_handler = &proc_dointvec,
78568+ },
78569+#endif
78570+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78571+ {
78572+ .procname = "chroot_deny_shmat",
78573+ .data = &grsec_enable_chroot_shmat,
78574+ .maxlen = sizeof(int),
78575+ .mode = 0600,
78576+ .proc_handler = &proc_dointvec,
78577+ },
78578+#endif
78579+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78580+ {
78581+ .procname = "chroot_deny_unix",
78582+ .data = &grsec_enable_chroot_unix,
78583+ .maxlen = sizeof(int),
78584+ .mode = 0600,
78585+ .proc_handler = &proc_dointvec,
78586+ },
78587+#endif
78588+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78589+ {
78590+ .procname = "chroot_deny_mount",
78591+ .data = &grsec_enable_chroot_mount,
78592+ .maxlen = sizeof(int),
78593+ .mode = 0600,
78594+ .proc_handler = &proc_dointvec,
78595+ },
78596+#endif
78597+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78598+ {
78599+ .procname = "chroot_deny_fchdir",
78600+ .data = &grsec_enable_chroot_fchdir,
78601+ .maxlen = sizeof(int),
78602+ .mode = 0600,
78603+ .proc_handler = &proc_dointvec,
78604+ },
78605+#endif
78606+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78607+ {
78608+ .procname = "chroot_deny_chroot",
78609+ .data = &grsec_enable_chroot_double,
78610+ .maxlen = sizeof(int),
78611+ .mode = 0600,
78612+ .proc_handler = &proc_dointvec,
78613+ },
78614+#endif
78615+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78616+ {
78617+ .procname = "chroot_deny_pivot",
78618+ .data = &grsec_enable_chroot_pivot,
78619+ .maxlen = sizeof(int),
78620+ .mode = 0600,
78621+ .proc_handler = &proc_dointvec,
78622+ },
78623+#endif
78624+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78625+ {
78626+ .procname = "chroot_enforce_chdir",
78627+ .data = &grsec_enable_chroot_chdir,
78628+ .maxlen = sizeof(int),
78629+ .mode = 0600,
78630+ .proc_handler = &proc_dointvec,
78631+ },
78632+#endif
78633+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78634+ {
78635+ .procname = "chroot_deny_chmod",
78636+ .data = &grsec_enable_chroot_chmod,
78637+ .maxlen = sizeof(int),
78638+ .mode = 0600,
78639+ .proc_handler = &proc_dointvec,
78640+ },
78641+#endif
78642+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78643+ {
78644+ .procname = "chroot_deny_mknod",
78645+ .data = &grsec_enable_chroot_mknod,
78646+ .maxlen = sizeof(int),
78647+ .mode = 0600,
78648+ .proc_handler = &proc_dointvec,
78649+ },
78650+#endif
78651+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78652+ {
78653+ .procname = "chroot_restrict_nice",
78654+ .data = &grsec_enable_chroot_nice,
78655+ .maxlen = sizeof(int),
78656+ .mode = 0600,
78657+ .proc_handler = &proc_dointvec,
78658+ },
78659+#endif
78660+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78661+ {
78662+ .procname = "chroot_execlog",
78663+ .data = &grsec_enable_chroot_execlog,
78664+ .maxlen = sizeof(int),
78665+ .mode = 0600,
78666+ .proc_handler = &proc_dointvec,
78667+ },
78668+#endif
78669+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78670+ {
78671+ .procname = "chroot_caps",
78672+ .data = &grsec_enable_chroot_caps,
78673+ .maxlen = sizeof(int),
78674+ .mode = 0600,
78675+ .proc_handler = &proc_dointvec,
78676+ },
78677+#endif
78678+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
78679+ {
78680+ .procname = "chroot_deny_bad_rename",
78681+ .data = &grsec_enable_chroot_rename,
78682+ .maxlen = sizeof(int),
78683+ .mode = 0600,
78684+ .proc_handler = &proc_dointvec,
78685+ },
78686+#endif
78687+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78688+ {
78689+ .procname = "chroot_deny_sysctl",
78690+ .data = &grsec_enable_chroot_sysctl,
78691+ .maxlen = sizeof(int),
78692+ .mode = 0600,
78693+ .proc_handler = &proc_dointvec,
78694+ },
78695+#endif
78696+#ifdef CONFIG_GRKERNSEC_TPE
78697+ {
78698+ .procname = "tpe",
78699+ .data = &grsec_enable_tpe,
78700+ .maxlen = sizeof(int),
78701+ .mode = 0600,
78702+ .proc_handler = &proc_dointvec,
78703+ },
78704+ {
78705+ .procname = "tpe_gid",
78706+ .data = &grsec_tpe_gid,
78707+ .maxlen = sizeof(int),
78708+ .mode = 0600,
78709+ .proc_handler = &proc_dointvec,
78710+ },
78711+#endif
78712+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78713+ {
78714+ .procname = "tpe_invert",
78715+ .data = &grsec_enable_tpe_invert,
78716+ .maxlen = sizeof(int),
78717+ .mode = 0600,
78718+ .proc_handler = &proc_dointvec,
78719+ },
78720+#endif
78721+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78722+ {
78723+ .procname = "tpe_restrict_all",
78724+ .data = &grsec_enable_tpe_all,
78725+ .maxlen = sizeof(int),
78726+ .mode = 0600,
78727+ .proc_handler = &proc_dointvec,
78728+ },
78729+#endif
78730+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78731+ {
78732+ .procname = "socket_all",
78733+ .data = &grsec_enable_socket_all,
78734+ .maxlen = sizeof(int),
78735+ .mode = 0600,
78736+ .proc_handler = &proc_dointvec,
78737+ },
78738+ {
78739+ .procname = "socket_all_gid",
78740+ .data = &grsec_socket_all_gid,
78741+ .maxlen = sizeof(int),
78742+ .mode = 0600,
78743+ .proc_handler = &proc_dointvec,
78744+ },
78745+#endif
78746+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78747+ {
78748+ .procname = "socket_client",
78749+ .data = &grsec_enable_socket_client,
78750+ .maxlen = sizeof(int),
78751+ .mode = 0600,
78752+ .proc_handler = &proc_dointvec,
78753+ },
78754+ {
78755+ .procname = "socket_client_gid",
78756+ .data = &grsec_socket_client_gid,
78757+ .maxlen = sizeof(int),
78758+ .mode = 0600,
78759+ .proc_handler = &proc_dointvec,
78760+ },
78761+#endif
78762+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78763+ {
78764+ .procname = "socket_server",
78765+ .data = &grsec_enable_socket_server,
78766+ .maxlen = sizeof(int),
78767+ .mode = 0600,
78768+ .proc_handler = &proc_dointvec,
78769+ },
78770+ {
78771+ .procname = "socket_server_gid",
78772+ .data = &grsec_socket_server_gid,
78773+ .maxlen = sizeof(int),
78774+ .mode = 0600,
78775+ .proc_handler = &proc_dointvec,
78776+ },
78777+#endif
78778+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78779+ {
78780+ .procname = "audit_group",
78781+ .data = &grsec_enable_group,
78782+ .maxlen = sizeof(int),
78783+ .mode = 0600,
78784+ .proc_handler = &proc_dointvec,
78785+ },
78786+ {
78787+ .procname = "audit_gid",
78788+ .data = &grsec_audit_gid,
78789+ .maxlen = sizeof(int),
78790+ .mode = 0600,
78791+ .proc_handler = &proc_dointvec,
78792+ },
78793+#endif
78794+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78795+ {
78796+ .procname = "audit_chdir",
78797+ .data = &grsec_enable_chdir,
78798+ .maxlen = sizeof(int),
78799+ .mode = 0600,
78800+ .proc_handler = &proc_dointvec,
78801+ },
78802+#endif
78803+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78804+ {
78805+ .procname = "audit_mount",
78806+ .data = &grsec_enable_mount,
78807+ .maxlen = sizeof(int),
78808+ .mode = 0600,
78809+ .proc_handler = &proc_dointvec,
78810+ },
78811+#endif
78812+#ifdef CONFIG_GRKERNSEC_DMESG
78813+ {
78814+ .procname = "dmesg",
78815+ .data = &grsec_enable_dmesg,
78816+ .maxlen = sizeof(int),
78817+ .mode = 0600,
78818+ .proc_handler = &proc_dointvec,
78819+ },
78820+#endif
78821+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78822+ {
78823+ .procname = "chroot_findtask",
78824+ .data = &grsec_enable_chroot_findtask,
78825+ .maxlen = sizeof(int),
78826+ .mode = 0600,
78827+ .proc_handler = &proc_dointvec,
78828+ },
78829+#endif
78830+#ifdef CONFIG_GRKERNSEC_RESLOG
78831+ {
78832+ .procname = "resource_logging",
78833+ .data = &grsec_resource_logging,
78834+ .maxlen = sizeof(int),
78835+ .mode = 0600,
78836+ .proc_handler = &proc_dointvec,
78837+ },
78838+#endif
78839+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78840+ {
78841+ .procname = "audit_ptrace",
78842+ .data = &grsec_enable_audit_ptrace,
78843+ .maxlen = sizeof(int),
78844+ .mode = 0600,
78845+ .proc_handler = &proc_dointvec,
78846+ },
78847+#endif
78848+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78849+ {
78850+ .procname = "harden_ptrace",
78851+ .data = &grsec_enable_harden_ptrace,
78852+ .maxlen = sizeof(int),
78853+ .mode = 0600,
78854+ .proc_handler = &proc_dointvec,
78855+ },
78856+#endif
78857+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78858+ {
78859+ .procname = "harden_ipc",
78860+ .data = &grsec_enable_harden_ipc,
78861+ .maxlen = sizeof(int),
78862+ .mode = 0600,
78863+ .proc_handler = &proc_dointvec,
78864+ },
78865+#endif
78866+ {
78867+ .procname = "grsec_lock",
78868+ .data = &grsec_lock,
78869+ .maxlen = sizeof(int),
78870+ .mode = 0600,
78871+ .proc_handler = &proc_dointvec,
78872+ },
78873+#endif
78874+#ifdef CONFIG_GRKERNSEC_ROFS
78875+ {
78876+ .procname = "romount_protect",
78877+ .data = &grsec_enable_rofs,
78878+ .maxlen = sizeof(int),
78879+ .mode = 0600,
78880+ .proc_handler = &proc_dointvec_minmax,
78881+ .extra1 = &one,
78882+ .extra2 = &one,
78883+ },
78884+#endif
78885+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
78886+ {
78887+ .procname = "deny_new_usb",
78888+ .data = &grsec_deny_new_usb,
78889+ .maxlen = sizeof(int),
78890+ .mode = 0600,
78891+ .proc_handler = &proc_dointvec,
78892+ },
78893+#endif
78894+ { }
78895+};
78896+#endif
78897diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
78898new file mode 100644
78899index 0000000..61b514e
78900--- /dev/null
78901+++ b/grsecurity/grsec_time.c
78902@@ -0,0 +1,16 @@
78903+#include <linux/kernel.h>
78904+#include <linux/sched.h>
78905+#include <linux/grinternal.h>
78906+#include <linux/module.h>
78907+
78908+void
78909+gr_log_timechange(void)
78910+{
78911+#ifdef CONFIG_GRKERNSEC_TIME
78912+ if (grsec_enable_time)
78913+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
78914+#endif
78915+ return;
78916+}
78917+
78918+EXPORT_SYMBOL_GPL(gr_log_timechange);
78919diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
78920new file mode 100644
78921index 0000000..d1953de
78922--- /dev/null
78923+++ b/grsecurity/grsec_tpe.c
78924@@ -0,0 +1,78 @@
78925+#include <linux/kernel.h>
78926+#include <linux/sched.h>
78927+#include <linux/file.h>
78928+#include <linux/fs.h>
78929+#include <linux/grinternal.h>
78930+
78931+extern int gr_acl_tpe_check(void);
78932+
78933+int
78934+gr_tpe_allow(const struct file *file)
78935+{
78936+#ifdef CONFIG_GRKERNSEC
78937+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
78938+ struct inode *file_inode = file->f_path.dentry->d_inode;
78939+ const struct cred *cred = current_cred();
78940+ char *msg = NULL;
78941+ char *msg2 = NULL;
78942+
78943+ // never restrict root
78944+ if (gr_is_global_root(cred->uid))
78945+ return 1;
78946+
78947+ if (grsec_enable_tpe) {
78948+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78949+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
78950+ msg = "not being in trusted group";
78951+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
78952+ msg = "being in untrusted group";
78953+#else
78954+ if (in_group_p(grsec_tpe_gid))
78955+ msg = "being in untrusted group";
78956+#endif
78957+ }
78958+ if (!msg && gr_acl_tpe_check())
78959+ msg = "being in untrusted role";
78960+
78961+ // not in any affected group/role
78962+ if (!msg)
78963+ goto next_check;
78964+
78965+ if (gr_is_global_nonroot(inode->i_uid))
78966+ msg2 = "file in non-root-owned directory";
78967+ else if (inode->i_mode & S_IWOTH)
78968+ msg2 = "file in world-writable directory";
78969+ else if (inode->i_mode & S_IWGRP)
78970+ msg2 = "file in group-writable directory";
78971+ else if (file_inode->i_mode & S_IWOTH)
78972+ msg2 = "file is world-writable";
78973+
78974+ if (msg && msg2) {
78975+ char fullmsg[70] = {0};
78976+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
78977+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
78978+ return 0;
78979+ }
78980+ msg = NULL;
78981+next_check:
78982+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78983+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
78984+ return 1;
78985+
78986+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
78987+ msg = "directory not owned by user";
78988+ else if (inode->i_mode & S_IWOTH)
78989+ msg = "file in world-writable directory";
78990+ else if (inode->i_mode & S_IWGRP)
78991+ msg = "file in group-writable directory";
78992+ else if (file_inode->i_mode & S_IWOTH)
78993+ msg = "file is world-writable";
78994+
78995+ if (msg) {
78996+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
78997+ return 0;
78998+ }
78999+#endif
79000+#endif
79001+ return 1;
79002+}
79003diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79004new file mode 100644
79005index 0000000..ae02d8e
79006--- /dev/null
79007+++ b/grsecurity/grsec_usb.c
79008@@ -0,0 +1,15 @@
79009+#include <linux/kernel.h>
79010+#include <linux/grinternal.h>
79011+#include <linux/module.h>
79012+
79013+int gr_handle_new_usb(void)
79014+{
79015+#ifdef CONFIG_GRKERNSEC_DENYUSB
79016+ if (grsec_deny_new_usb) {
79017+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79018+ return 1;
79019+ }
79020+#endif
79021+ return 0;
79022+}
79023+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79024diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79025new file mode 100644
79026index 0000000..158b330
79027--- /dev/null
79028+++ b/grsecurity/grsum.c
79029@@ -0,0 +1,64 @@
79030+#include <linux/err.h>
79031+#include <linux/kernel.h>
79032+#include <linux/sched.h>
79033+#include <linux/mm.h>
79034+#include <linux/scatterlist.h>
79035+#include <linux/crypto.h>
79036+#include <linux/gracl.h>
79037+
79038+
79039+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79040+#error "crypto and sha256 must be built into the kernel"
79041+#endif
79042+
79043+int
79044+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79045+{
79046+ struct crypto_hash *tfm;
79047+ struct hash_desc desc;
79048+ struct scatterlist sg[2];
79049+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79050+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79051+ unsigned long *sumptr = (unsigned long *)sum;
79052+ int cryptres;
79053+ int retval = 1;
79054+ volatile int mismatched = 0;
79055+ volatile int dummy = 0;
79056+ unsigned int i;
79057+
79058+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79059+ if (IS_ERR(tfm)) {
79060+ /* should never happen, since sha256 should be built in */
79061+ memset(entry->pw, 0, GR_PW_LEN);
79062+ return 1;
79063+ }
79064+
79065+ sg_init_table(sg, 2);
79066+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79067+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79068+
79069+ desc.tfm = tfm;
79070+ desc.flags = 0;
79071+
79072+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79073+ temp_sum);
79074+
79075+ memset(entry->pw, 0, GR_PW_LEN);
79076+
79077+ if (cryptres)
79078+ goto out;
79079+
79080+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79081+ if (sumptr[i] != tmpsumptr[i])
79082+ mismatched = 1;
79083+ else
79084+ dummy = 1; // waste a cycle
79085+
79086+ if (!mismatched)
79087+ retval = dummy - 1;
79088+
79089+out:
79090+ crypto_free_hash(tfm);
79091+
79092+ return retval;
79093+}
79094diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79095index 77ff547..181834f 100644
79096--- a/include/asm-generic/4level-fixup.h
79097+++ b/include/asm-generic/4level-fixup.h
79098@@ -13,8 +13,10 @@
79099 #define pmd_alloc(mm, pud, address) \
79100 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79101 NULL: pmd_offset(pud, address))
79102+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79103
79104 #define pud_alloc(mm, pgd, address) (pgd)
79105+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79106 #define pud_offset(pgd, start) (pgd)
79107 #define pud_none(pud) 0
79108 #define pud_bad(pud) 0
79109diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79110index b7babf0..1e4b4f1 100644
79111--- a/include/asm-generic/atomic-long.h
79112+++ b/include/asm-generic/atomic-long.h
79113@@ -22,6 +22,12 @@
79114
79115 typedef atomic64_t atomic_long_t;
79116
79117+#ifdef CONFIG_PAX_REFCOUNT
79118+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79119+#else
79120+typedef atomic64_t atomic_long_unchecked_t;
79121+#endif
79122+
79123 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79124
79125 static inline long atomic_long_read(atomic_long_t *l)
79126@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79127 return (long)atomic64_read(v);
79128 }
79129
79130+#ifdef CONFIG_PAX_REFCOUNT
79131+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79132+{
79133+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79134+
79135+ return (long)atomic64_read_unchecked(v);
79136+}
79137+#endif
79138+
79139 static inline void atomic_long_set(atomic_long_t *l, long i)
79140 {
79141 atomic64_t *v = (atomic64_t *)l;
79142@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79143 atomic64_set(v, i);
79144 }
79145
79146+#ifdef CONFIG_PAX_REFCOUNT
79147+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79148+{
79149+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79150+
79151+ atomic64_set_unchecked(v, i);
79152+}
79153+#endif
79154+
79155 static inline void atomic_long_inc(atomic_long_t *l)
79156 {
79157 atomic64_t *v = (atomic64_t *)l;
79158@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79159 atomic64_inc(v);
79160 }
79161
79162+#ifdef CONFIG_PAX_REFCOUNT
79163+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79164+{
79165+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79166+
79167+ atomic64_inc_unchecked(v);
79168+}
79169+#endif
79170+
79171 static inline void atomic_long_dec(atomic_long_t *l)
79172 {
79173 atomic64_t *v = (atomic64_t *)l;
79174@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79175 atomic64_dec(v);
79176 }
79177
79178+#ifdef CONFIG_PAX_REFCOUNT
79179+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79180+{
79181+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79182+
79183+ atomic64_dec_unchecked(v);
79184+}
79185+#endif
79186+
79187 static inline void atomic_long_add(long i, atomic_long_t *l)
79188 {
79189 atomic64_t *v = (atomic64_t *)l;
79190@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79191 atomic64_add(i, v);
79192 }
79193
79194+#ifdef CONFIG_PAX_REFCOUNT
79195+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79196+{
79197+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79198+
79199+ atomic64_add_unchecked(i, v);
79200+}
79201+#endif
79202+
79203 static inline void atomic_long_sub(long i, atomic_long_t *l)
79204 {
79205 atomic64_t *v = (atomic64_t *)l;
79206@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79207 atomic64_sub(i, v);
79208 }
79209
79210+#ifdef CONFIG_PAX_REFCOUNT
79211+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79212+{
79213+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79214+
79215+ atomic64_sub_unchecked(i, v);
79216+}
79217+#endif
79218+
79219 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79220 {
79221 atomic64_t *v = (atomic64_t *)l;
79222@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79223 return atomic64_add_negative(i, v);
79224 }
79225
79226-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79227+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79228 {
79229 atomic64_t *v = (atomic64_t *)l;
79230
79231 return (long)atomic64_add_return(i, v);
79232 }
79233
79234+#ifdef CONFIG_PAX_REFCOUNT
79235+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79236+{
79237+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79238+
79239+ return (long)atomic64_add_return_unchecked(i, v);
79240+}
79241+#endif
79242+
79243 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79244 {
79245 atomic64_t *v = (atomic64_t *)l;
79246@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79247 return (long)atomic64_inc_return(v);
79248 }
79249
79250+#ifdef CONFIG_PAX_REFCOUNT
79251+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79252+{
79253+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79254+
79255+ return (long)atomic64_inc_return_unchecked(v);
79256+}
79257+#endif
79258+
79259 static inline long atomic_long_dec_return(atomic_long_t *l)
79260 {
79261 atomic64_t *v = (atomic64_t *)l;
79262@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79263
79264 typedef atomic_t atomic_long_t;
79265
79266+#ifdef CONFIG_PAX_REFCOUNT
79267+typedef atomic_unchecked_t atomic_long_unchecked_t;
79268+#else
79269+typedef atomic_t atomic_long_unchecked_t;
79270+#endif
79271+
79272 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79273 static inline long atomic_long_read(atomic_long_t *l)
79274 {
79275@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79276 return (long)atomic_read(v);
79277 }
79278
79279+#ifdef CONFIG_PAX_REFCOUNT
79280+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79281+{
79282+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79283+
79284+ return (long)atomic_read_unchecked(v);
79285+}
79286+#endif
79287+
79288 static inline void atomic_long_set(atomic_long_t *l, long i)
79289 {
79290 atomic_t *v = (atomic_t *)l;
79291@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79292 atomic_set(v, i);
79293 }
79294
79295+#ifdef CONFIG_PAX_REFCOUNT
79296+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79297+{
79298+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79299+
79300+ atomic_set_unchecked(v, i);
79301+}
79302+#endif
79303+
79304 static inline void atomic_long_inc(atomic_long_t *l)
79305 {
79306 atomic_t *v = (atomic_t *)l;
79307@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79308 atomic_inc(v);
79309 }
79310
79311+#ifdef CONFIG_PAX_REFCOUNT
79312+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79313+{
79314+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79315+
79316+ atomic_inc_unchecked(v);
79317+}
79318+#endif
79319+
79320 static inline void atomic_long_dec(atomic_long_t *l)
79321 {
79322 atomic_t *v = (atomic_t *)l;
79323@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79324 atomic_dec(v);
79325 }
79326
79327+#ifdef CONFIG_PAX_REFCOUNT
79328+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79329+{
79330+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79331+
79332+ atomic_dec_unchecked(v);
79333+}
79334+#endif
79335+
79336 static inline void atomic_long_add(long i, atomic_long_t *l)
79337 {
79338 atomic_t *v = (atomic_t *)l;
79339@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79340 atomic_add(i, v);
79341 }
79342
79343+#ifdef CONFIG_PAX_REFCOUNT
79344+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79345+{
79346+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79347+
79348+ atomic_add_unchecked(i, v);
79349+}
79350+#endif
79351+
79352 static inline void atomic_long_sub(long i, atomic_long_t *l)
79353 {
79354 atomic_t *v = (atomic_t *)l;
79355@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79356 atomic_sub(i, v);
79357 }
79358
79359+#ifdef CONFIG_PAX_REFCOUNT
79360+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79361+{
79362+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79363+
79364+ atomic_sub_unchecked(i, v);
79365+}
79366+#endif
79367+
79368 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79369 {
79370 atomic_t *v = (atomic_t *)l;
79371@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79372 return atomic_add_negative(i, v);
79373 }
79374
79375-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79376+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79377 {
79378 atomic_t *v = (atomic_t *)l;
79379
79380 return (long)atomic_add_return(i, v);
79381 }
79382
79383+#ifdef CONFIG_PAX_REFCOUNT
79384+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79385+{
79386+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79387+
79388+ return (long)atomic_add_return_unchecked(i, v);
79389+}
79390+
79391+#endif
79392+
79393 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79394 {
79395 atomic_t *v = (atomic_t *)l;
79396@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79397 return (long)atomic_inc_return(v);
79398 }
79399
79400+#ifdef CONFIG_PAX_REFCOUNT
79401+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79402+{
79403+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79404+
79405+ return (long)atomic_inc_return_unchecked(v);
79406+}
79407+#endif
79408+
79409 static inline long atomic_long_dec_return(atomic_long_t *l)
79410 {
79411 atomic_t *v = (atomic_t *)l;
79412@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79413
79414 #endif /* BITS_PER_LONG == 64 */
79415
79416+#ifdef CONFIG_PAX_REFCOUNT
79417+static inline void pax_refcount_needs_these_functions(void)
79418+{
79419+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
79420+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
79421+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
79422+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
79423+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
79424+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
79425+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
79426+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
79427+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
79428+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
79429+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
79430+#ifdef CONFIG_X86
79431+ atomic_clear_mask_unchecked(0, NULL);
79432+ atomic_set_mask_unchecked(0, NULL);
79433+#endif
79434+
79435+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
79436+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
79437+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
79438+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
79439+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
79440+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
79441+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
79442+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
79443+}
79444+#else
79445+#define atomic_read_unchecked(v) atomic_read(v)
79446+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
79447+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
79448+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
79449+#define atomic_inc_unchecked(v) atomic_inc(v)
79450+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
79451+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
79452+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
79453+#define atomic_dec_unchecked(v) atomic_dec(v)
79454+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
79455+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
79456+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
79457+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
79458+
79459+#define atomic_long_read_unchecked(v) atomic_long_read(v)
79460+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
79461+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
79462+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
79463+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
79464+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
79465+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
79466+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
79467+#endif
79468+
79469 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
79470diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
79471index 30ad9c8..c70c170 100644
79472--- a/include/asm-generic/atomic64.h
79473+++ b/include/asm-generic/atomic64.h
79474@@ -16,6 +16,8 @@ typedef struct {
79475 long long counter;
79476 } atomic64_t;
79477
79478+typedef atomic64_t atomic64_unchecked_t;
79479+
79480 #define ATOMIC64_INIT(i) { (i) }
79481
79482 extern long long atomic64_read(const atomic64_t *v);
79483@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
79484 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
79485 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
79486
79487+#define atomic64_read_unchecked(v) atomic64_read(v)
79488+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
79489+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
79490+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
79491+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
79492+#define atomic64_inc_unchecked(v) atomic64_inc(v)
79493+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
79494+#define atomic64_dec_unchecked(v) atomic64_dec(v)
79495+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
79496+
79497 #endif /* _ASM_GENERIC_ATOMIC64_H */
79498diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
79499index f5c40b0..e902f9d 100644
79500--- a/include/asm-generic/barrier.h
79501+++ b/include/asm-generic/barrier.h
79502@@ -82,7 +82,7 @@
79503 do { \
79504 compiletime_assert_atomic_type(*p); \
79505 smp_mb(); \
79506- ACCESS_ONCE(*p) = (v); \
79507+ ACCESS_ONCE_RW(*p) = (v); \
79508 } while (0)
79509
79510 #define smp_load_acquire(p) \
79511diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
79512index a60a7cc..0fe12f2 100644
79513--- a/include/asm-generic/bitops/__fls.h
79514+++ b/include/asm-generic/bitops/__fls.h
79515@@ -9,7 +9,7 @@
79516 *
79517 * Undefined if no set bit exists, so code should check against 0 first.
79518 */
79519-static __always_inline unsigned long __fls(unsigned long word)
79520+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
79521 {
79522 int num = BITS_PER_LONG - 1;
79523
79524diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
79525index 0576d1f..dad6c71 100644
79526--- a/include/asm-generic/bitops/fls.h
79527+++ b/include/asm-generic/bitops/fls.h
79528@@ -9,7 +9,7 @@
79529 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
79530 */
79531
79532-static __always_inline int fls(int x)
79533+static __always_inline int __intentional_overflow(-1) fls(int x)
79534 {
79535 int r = 32;
79536
79537diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
79538index b097cf8..3d40e14 100644
79539--- a/include/asm-generic/bitops/fls64.h
79540+++ b/include/asm-generic/bitops/fls64.h
79541@@ -15,7 +15,7 @@
79542 * at position 64.
79543 */
79544 #if BITS_PER_LONG == 32
79545-static __always_inline int fls64(__u64 x)
79546+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79547 {
79548 __u32 h = x >> 32;
79549 if (h)
79550@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
79551 return fls(x);
79552 }
79553 #elif BITS_PER_LONG == 64
79554-static __always_inline int fls64(__u64 x)
79555+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79556 {
79557 if (x == 0)
79558 return 0;
79559diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
79560index 1bfcfe5..e04c5c9 100644
79561--- a/include/asm-generic/cache.h
79562+++ b/include/asm-generic/cache.h
79563@@ -6,7 +6,7 @@
79564 * cache lines need to provide their own cache.h.
79565 */
79566
79567-#define L1_CACHE_SHIFT 5
79568-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
79569+#define L1_CACHE_SHIFT 5UL
79570+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
79571
79572 #endif /* __ASM_GENERIC_CACHE_H */
79573diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
79574index 0d68a1e..b74a761 100644
79575--- a/include/asm-generic/emergency-restart.h
79576+++ b/include/asm-generic/emergency-restart.h
79577@@ -1,7 +1,7 @@
79578 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
79579 #define _ASM_GENERIC_EMERGENCY_RESTART_H
79580
79581-static inline void machine_emergency_restart(void)
79582+static inline __noreturn void machine_emergency_restart(void)
79583 {
79584 machine_restart(NULL);
79585 }
79586diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
79587index 90f99c7..00ce236 100644
79588--- a/include/asm-generic/kmap_types.h
79589+++ b/include/asm-generic/kmap_types.h
79590@@ -2,9 +2,9 @@
79591 #define _ASM_GENERIC_KMAP_TYPES_H
79592
79593 #ifdef __WITH_KM_FENCE
79594-# define KM_TYPE_NR 41
79595+# define KM_TYPE_NR 42
79596 #else
79597-# define KM_TYPE_NR 20
79598+# define KM_TYPE_NR 21
79599 #endif
79600
79601 #endif
79602diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
79603index 9ceb03b..62b0b8f 100644
79604--- a/include/asm-generic/local.h
79605+++ b/include/asm-generic/local.h
79606@@ -23,24 +23,37 @@ typedef struct
79607 atomic_long_t a;
79608 } local_t;
79609
79610+typedef struct {
79611+ atomic_long_unchecked_t a;
79612+} local_unchecked_t;
79613+
79614 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
79615
79616 #define local_read(l) atomic_long_read(&(l)->a)
79617+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
79618 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
79619+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
79620 #define local_inc(l) atomic_long_inc(&(l)->a)
79621+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
79622 #define local_dec(l) atomic_long_dec(&(l)->a)
79623+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
79624 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
79625+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
79626 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
79627+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
79628
79629 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
79630 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
79631 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
79632 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
79633 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
79634+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
79635 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
79636 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
79637+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
79638
79639 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79640+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79641 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
79642 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
79643 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
79644diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
79645index 725612b..9cc513a 100644
79646--- a/include/asm-generic/pgtable-nopmd.h
79647+++ b/include/asm-generic/pgtable-nopmd.h
79648@@ -1,14 +1,19 @@
79649 #ifndef _PGTABLE_NOPMD_H
79650 #define _PGTABLE_NOPMD_H
79651
79652-#ifndef __ASSEMBLY__
79653-
79654 #include <asm-generic/pgtable-nopud.h>
79655
79656-struct mm_struct;
79657-
79658 #define __PAGETABLE_PMD_FOLDED
79659
79660+#define PMD_SHIFT PUD_SHIFT
79661+#define PTRS_PER_PMD 1
79662+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
79663+#define PMD_MASK (~(PMD_SIZE-1))
79664+
79665+#ifndef __ASSEMBLY__
79666+
79667+struct mm_struct;
79668+
79669 /*
79670 * Having the pmd type consist of a pud gets the size right, and allows
79671 * us to conceptually access the pud entry that this pmd is folded into
79672@@ -16,11 +21,6 @@ struct mm_struct;
79673 */
79674 typedef struct { pud_t pud; } pmd_t;
79675
79676-#define PMD_SHIFT PUD_SHIFT
79677-#define PTRS_PER_PMD 1
79678-#define PMD_SIZE (1UL << PMD_SHIFT)
79679-#define PMD_MASK (~(PMD_SIZE-1))
79680-
79681 /*
79682 * The "pud_xxx()" functions here are trivial for a folded two-level
79683 * setup: the pmd is never bad, and a pmd always exists (as it's folded
79684diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
79685index 810431d..0ec4804f 100644
79686--- a/include/asm-generic/pgtable-nopud.h
79687+++ b/include/asm-generic/pgtable-nopud.h
79688@@ -1,10 +1,15 @@
79689 #ifndef _PGTABLE_NOPUD_H
79690 #define _PGTABLE_NOPUD_H
79691
79692-#ifndef __ASSEMBLY__
79693-
79694 #define __PAGETABLE_PUD_FOLDED
79695
79696+#define PUD_SHIFT PGDIR_SHIFT
79697+#define PTRS_PER_PUD 1
79698+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
79699+#define PUD_MASK (~(PUD_SIZE-1))
79700+
79701+#ifndef __ASSEMBLY__
79702+
79703 /*
79704 * Having the pud type consist of a pgd gets the size right, and allows
79705 * us to conceptually access the pgd entry that this pud is folded into
79706@@ -12,11 +17,6 @@
79707 */
79708 typedef struct { pgd_t pgd; } pud_t;
79709
79710-#define PUD_SHIFT PGDIR_SHIFT
79711-#define PTRS_PER_PUD 1
79712-#define PUD_SIZE (1UL << PUD_SHIFT)
79713-#define PUD_MASK (~(PUD_SIZE-1))
79714-
79715 /*
79716 * The "pgd_xxx()" functions here are trivial for a folded two-level
79717 * setup: the pud is never bad, and a pud always exists (as it's folded
79718@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
79719 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
79720
79721 #define pgd_populate(mm, pgd, pud) do { } while (0)
79722+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
79723 /*
79724 * (puds are folded into pgds so this doesn't get actually called,
79725 * but the define is needed for a generic inline function.)
79726diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
79727index 177d597..2826237 100644
79728--- a/include/asm-generic/pgtable.h
79729+++ b/include/asm-generic/pgtable.h
79730@@ -839,6 +839,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
79731 }
79732 #endif /* CONFIG_NUMA_BALANCING */
79733
79734+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
79735+#ifdef CONFIG_PAX_KERNEXEC
79736+#error KERNEXEC requires pax_open_kernel
79737+#else
79738+static inline unsigned long pax_open_kernel(void) { return 0; }
79739+#endif
79740+#endif
79741+
79742+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
79743+#ifdef CONFIG_PAX_KERNEXEC
79744+#error KERNEXEC requires pax_close_kernel
79745+#else
79746+static inline unsigned long pax_close_kernel(void) { return 0; }
79747+#endif
79748+#endif
79749+
79750 #endif /* CONFIG_MMU */
79751
79752 #endif /* !__ASSEMBLY__ */
79753diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
79754index 72d8803..cb9749c 100644
79755--- a/include/asm-generic/uaccess.h
79756+++ b/include/asm-generic/uaccess.h
79757@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
79758 return __clear_user(to, n);
79759 }
79760
79761+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
79762+#ifdef CONFIG_PAX_MEMORY_UDEREF
79763+#error UDEREF requires pax_open_userland
79764+#else
79765+static inline unsigned long pax_open_userland(void) { return 0; }
79766+#endif
79767+#endif
79768+
79769+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
79770+#ifdef CONFIG_PAX_MEMORY_UDEREF
79771+#error UDEREF requires pax_close_userland
79772+#else
79773+static inline unsigned long pax_close_userland(void) { return 0; }
79774+#endif
79775+#endif
79776+
79777 #endif /* __ASM_GENERIC_UACCESS_H */
79778diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
79779index bee5d68..8d362d1 100644
79780--- a/include/asm-generic/vmlinux.lds.h
79781+++ b/include/asm-generic/vmlinux.lds.h
79782@@ -234,6 +234,7 @@
79783 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
79784 VMLINUX_SYMBOL(__start_rodata) = .; \
79785 *(.rodata) *(.rodata.*) \
79786+ *(.data..read_only) \
79787 *(__vermagic) /* Kernel version magic */ \
79788 . = ALIGN(8); \
79789 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
79790@@ -726,17 +727,18 @@
79791 * section in the linker script will go there too. @phdr should have
79792 * a leading colon.
79793 *
79794- * Note that this macros defines __per_cpu_load as an absolute symbol.
79795+ * Note that this macros defines per_cpu_load as an absolute symbol.
79796 * If there is no need to put the percpu section at a predetermined
79797 * address, use PERCPU_SECTION.
79798 */
79799 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
79800- VMLINUX_SYMBOL(__per_cpu_load) = .; \
79801- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
79802+ per_cpu_load = .; \
79803+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
79804 - LOAD_OFFSET) { \
79805+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
79806 PERCPU_INPUT(cacheline) \
79807 } phdr \
79808- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
79809+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
79810
79811 /**
79812 * PERCPU_SECTION - define output section for percpu area, simple version
79813diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
79814index 623a59c..1e79ab9 100644
79815--- a/include/crypto/algapi.h
79816+++ b/include/crypto/algapi.h
79817@@ -34,7 +34,7 @@ struct crypto_type {
79818 unsigned int maskclear;
79819 unsigned int maskset;
79820 unsigned int tfmsize;
79821-};
79822+} __do_const;
79823
79824 struct crypto_instance {
79825 struct crypto_alg alg;
79826diff --git a/include/drm/drmP.h b/include/drm/drmP.h
79827index e1b2e8b..2697bd2 100644
79828--- a/include/drm/drmP.h
79829+++ b/include/drm/drmP.h
79830@@ -59,6 +59,7 @@
79831
79832 #include <asm/mman.h>
79833 #include <asm/pgalloc.h>
79834+#include <asm/local.h>
79835 #include <asm/uaccess.h>
79836
79837 #include <uapi/drm/drm.h>
79838@@ -223,10 +224,12 @@ void drm_err(const char *format, ...);
79839 * \param cmd command.
79840 * \param arg argument.
79841 */
79842-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
79843+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
79844+ struct drm_file *file_priv);
79845+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
79846 struct drm_file *file_priv);
79847
79848-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79849+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
79850 unsigned long arg);
79851
79852 #define DRM_IOCTL_NR(n) _IOC_NR(n)
79853@@ -242,10 +245,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79854 struct drm_ioctl_desc {
79855 unsigned int cmd;
79856 int flags;
79857- drm_ioctl_t *func;
79858+ drm_ioctl_t func;
79859 unsigned int cmd_drv;
79860 const char *name;
79861-};
79862+} __do_const;
79863
79864 /**
79865 * Creates a driver or general drm_ioctl_desc array entry for the given
79866@@ -629,7 +632,8 @@ struct drm_info_list {
79867 int (*show)(struct seq_file*, void*); /** show callback */
79868 u32 driver_features; /**< Required driver features for this entry */
79869 void *data;
79870-};
79871+} __do_const;
79872+typedef struct drm_info_list __no_const drm_info_list_no_const;
79873
79874 /**
79875 * debugfs node structure. This structure represents a debugfs file.
79876@@ -713,7 +717,7 @@ struct drm_device {
79877
79878 /** \name Usage Counters */
79879 /*@{ */
79880- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
79881+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
79882 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
79883 int buf_use; /**< Buffers in use -- cannot alloc */
79884 atomic_t buf_alloc; /**< Buffer allocation in progress */
79885diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
79886index 7adbb65..2a1eb1f 100644
79887--- a/include/drm/drm_crtc_helper.h
79888+++ b/include/drm/drm_crtc_helper.h
79889@@ -116,7 +116,7 @@ struct drm_encoder_helper_funcs {
79890 struct drm_connector *connector);
79891 /* disable encoder when not in use - more explicit than dpms off */
79892 void (*disable)(struct drm_encoder *encoder);
79893-};
79894+} __no_const;
79895
79896 /**
79897 * drm_connector_helper_funcs - helper operations for connectors
79898diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
79899index d016dc5..3951fe0 100644
79900--- a/include/drm/i915_pciids.h
79901+++ b/include/drm/i915_pciids.h
79902@@ -37,7 +37,7 @@
79903 */
79904 #define INTEL_VGA_DEVICE(id, info) { \
79905 0x8086, id, \
79906- ~0, ~0, \
79907+ PCI_ANY_ID, PCI_ANY_ID, \
79908 0x030000, 0xff0000, \
79909 (unsigned long) info }
79910
79911diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
79912index 72dcbe8..8db58d7 100644
79913--- a/include/drm/ttm/ttm_memory.h
79914+++ b/include/drm/ttm/ttm_memory.h
79915@@ -48,7 +48,7 @@
79916
79917 struct ttm_mem_shrink {
79918 int (*do_shrink) (struct ttm_mem_shrink *);
79919-};
79920+} __no_const;
79921
79922 /**
79923 * struct ttm_mem_global - Global memory accounting structure.
79924diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
79925index 49a8284..9643967 100644
79926--- a/include/drm/ttm/ttm_page_alloc.h
79927+++ b/include/drm/ttm/ttm_page_alloc.h
79928@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
79929 */
79930 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
79931
79932+struct device;
79933 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
79934 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
79935
79936diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
79937index 4b840e8..155d235 100644
79938--- a/include/keys/asymmetric-subtype.h
79939+++ b/include/keys/asymmetric-subtype.h
79940@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
79941 /* Verify the signature on a key of this subtype (optional) */
79942 int (*verify_signature)(const struct key *key,
79943 const struct public_key_signature *sig);
79944-};
79945+} __do_const;
79946
79947 /**
79948 * asymmetric_key_subtype - Get the subtype from an asymmetric key
79949diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
79950index c1da539..1dcec55 100644
79951--- a/include/linux/atmdev.h
79952+++ b/include/linux/atmdev.h
79953@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
79954 #endif
79955
79956 struct k_atm_aal_stats {
79957-#define __HANDLE_ITEM(i) atomic_t i
79958+#define __HANDLE_ITEM(i) atomic_unchecked_t i
79959 __AAL_STAT_ITEMS
79960 #undef __HANDLE_ITEM
79961 };
79962@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
79963 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
79964 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
79965 struct module *owner;
79966-};
79967+} __do_const ;
79968
79969 struct atmphy_ops {
79970 int (*start)(struct atm_dev *dev);
79971diff --git a/include/linux/atomic.h b/include/linux/atomic.h
79972index 5b08a85..60922fb 100644
79973--- a/include/linux/atomic.h
79974+++ b/include/linux/atomic.h
79975@@ -12,7 +12,7 @@
79976 * Atomically adds @a to @v, so long as @v was not already @u.
79977 * Returns non-zero if @v was not @u, and zero otherwise.
79978 */
79979-static inline int atomic_add_unless(atomic_t *v, int a, int u)
79980+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
79981 {
79982 return __atomic_add_unless(v, a, u) != u;
79983 }
79984diff --git a/include/linux/audit.h b/include/linux/audit.h
79985index af84234..4177a40 100644
79986--- a/include/linux/audit.h
79987+++ b/include/linux/audit.h
79988@@ -225,7 +225,7 @@ static inline void audit_ptrace(struct task_struct *t)
79989 extern unsigned int audit_serial(void);
79990 extern int auditsc_get_stamp(struct audit_context *ctx,
79991 struct timespec *t, unsigned int *serial);
79992-extern int audit_set_loginuid(kuid_t loginuid);
79993+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
79994
79995 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
79996 {
79997diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
79998index 576e463..28fd926 100644
79999--- a/include/linux/binfmts.h
80000+++ b/include/linux/binfmts.h
80001@@ -44,7 +44,7 @@ struct linux_binprm {
80002 unsigned interp_flags;
80003 unsigned interp_data;
80004 unsigned long loader, exec;
80005-};
80006+} __randomize_layout;
80007
80008 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80009 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80010@@ -77,8 +77,10 @@ struct linux_binfmt {
80011 int (*load_binary)(struct linux_binprm *);
80012 int (*load_shlib)(struct file *);
80013 int (*core_dump)(struct coredump_params *cprm);
80014+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80015+ void (*handle_mmap)(struct file *);
80016 unsigned long min_coredump; /* minimal dump size */
80017-};
80018+} __do_const __randomize_layout;
80019
80020 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80021
80022diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
80023index 202e403..16e6617 100644
80024--- a/include/linux/bitmap.h
80025+++ b/include/linux/bitmap.h
80026@@ -302,7 +302,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
80027 return __bitmap_full(src, nbits);
80028 }
80029
80030-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
80031+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
80032 {
80033 if (small_const_nbits(nbits))
80034 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
80035diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80036index 5d858e0..336c1d9 100644
80037--- a/include/linux/bitops.h
80038+++ b/include/linux/bitops.h
80039@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80040 * @word: value to rotate
80041 * @shift: bits to roll
80042 */
80043-static inline __u32 rol32(__u32 word, unsigned int shift)
80044+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80045 {
80046 return (word << shift) | (word >> (32 - shift));
80047 }
80048@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80049 * @word: value to rotate
80050 * @shift: bits to roll
80051 */
80052-static inline __u32 ror32(__u32 word, unsigned int shift)
80053+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80054 {
80055 return (word >> shift) | (word << (32 - shift));
80056 }
80057@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80058 return (__s32)(value << shift) >> shift;
80059 }
80060
80061-static inline unsigned fls_long(unsigned long l)
80062+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80063 {
80064 if (sizeof(l) == 4)
80065 return fls(l);
80066diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80067index 92f4b4b..483d537 100644
80068--- a/include/linux/blkdev.h
80069+++ b/include/linux/blkdev.h
80070@@ -1613,7 +1613,7 @@ struct block_device_operations {
80071 /* this callback is with swap_lock and sometimes page table lock held */
80072 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80073 struct module *owner;
80074-};
80075+} __do_const;
80076
80077 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80078 unsigned long);
80079diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80080index afc1343..9735539 100644
80081--- a/include/linux/blktrace_api.h
80082+++ b/include/linux/blktrace_api.h
80083@@ -25,7 +25,7 @@ struct blk_trace {
80084 struct dentry *dropped_file;
80085 struct dentry *msg_file;
80086 struct list_head running_list;
80087- atomic_t dropped;
80088+ atomic_unchecked_t dropped;
80089 };
80090
80091 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80092diff --git a/include/linux/cache.h b/include/linux/cache.h
80093index 17e7e82..1d7da26 100644
80094--- a/include/linux/cache.h
80095+++ b/include/linux/cache.h
80096@@ -16,6 +16,14 @@
80097 #define __read_mostly
80098 #endif
80099
80100+#ifndef __read_only
80101+#ifdef CONFIG_PAX_KERNEXEC
80102+#error KERNEXEC requires __read_only
80103+#else
80104+#define __read_only __read_mostly
80105+#endif
80106+#endif
80107+
80108 #ifndef ____cacheline_aligned
80109 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80110 #endif
80111diff --git a/include/linux/capability.h b/include/linux/capability.h
80112index aa93e5e..985a1b0 100644
80113--- a/include/linux/capability.h
80114+++ b/include/linux/capability.h
80115@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80116 extern bool capable(int cap);
80117 extern bool ns_capable(struct user_namespace *ns, int cap);
80118 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80119+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80120 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80121+extern bool capable_nolog(int cap);
80122+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80123
80124 /* audit system wants to get cap info from files as well */
80125 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80126
80127+extern int is_privileged_binary(const struct dentry *dentry);
80128+
80129 #endif /* !_LINUX_CAPABILITY_H */
80130diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80131index 8609d57..86e4d79 100644
80132--- a/include/linux/cdrom.h
80133+++ b/include/linux/cdrom.h
80134@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80135
80136 /* driver specifications */
80137 const int capability; /* capability flags */
80138- int n_minors; /* number of active minor devices */
80139 /* handle uniform packets for scsi type devices (scsi,atapi) */
80140 int (*generic_packet) (struct cdrom_device_info *,
80141 struct packet_command *);
80142diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80143index 4ce9056..86caac6 100644
80144--- a/include/linux/cleancache.h
80145+++ b/include/linux/cleancache.h
80146@@ -31,7 +31,7 @@ struct cleancache_ops {
80147 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80148 void (*invalidate_inode)(int, struct cleancache_filekey);
80149 void (*invalidate_fs)(int);
80150-};
80151+} __no_const;
80152
80153 extern struct cleancache_ops *
80154 cleancache_register_ops(struct cleancache_ops *ops);
80155diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80156index d936409..ce9f842 100644
80157--- a/include/linux/clk-provider.h
80158+++ b/include/linux/clk-provider.h
80159@@ -191,6 +191,7 @@ struct clk_ops {
80160 void (*init)(struct clk_hw *hw);
80161 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80162 };
80163+typedef struct clk_ops __no_const clk_ops_no_const;
80164
80165 /**
80166 * struct clk_init_data - holds init data that's common to all clocks and is
80167diff --git a/include/linux/compat.h b/include/linux/compat.h
80168index 7450ca2..a824b81 100644
80169--- a/include/linux/compat.h
80170+++ b/include/linux/compat.h
80171@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80172 compat_size_t __user *len_ptr);
80173
80174 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80175-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80176+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80177 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80178 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80179 compat_ssize_t msgsz, int msgflg);
80180@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80181 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80182 compat_ulong_t addr, compat_ulong_t data);
80183 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80184- compat_long_t addr, compat_long_t data);
80185+ compat_ulong_t addr, compat_ulong_t data);
80186
80187 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80188 /*
80189diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80190index d1a5582..4424efa 100644
80191--- a/include/linux/compiler-gcc4.h
80192+++ b/include/linux/compiler-gcc4.h
80193@@ -39,9 +39,34 @@
80194 # define __compiletime_warning(message) __attribute__((warning(message)))
80195 # define __compiletime_error(message) __attribute__((error(message)))
80196 #endif /* __CHECKER__ */
80197+
80198+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80199+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80200+#define __bos0(ptr) __bos((ptr), 0)
80201+#define __bos1(ptr) __bos((ptr), 1)
80202 #endif /* GCC_VERSION >= 40300 */
80203
80204 #if GCC_VERSION >= 40500
80205+
80206+#ifdef RANDSTRUCT_PLUGIN
80207+#define __randomize_layout __attribute__((randomize_layout))
80208+#define __no_randomize_layout __attribute__((no_randomize_layout))
80209+#endif
80210+
80211+#ifdef CONSTIFY_PLUGIN
80212+#define __no_const __attribute__((no_const))
80213+#define __do_const __attribute__((do_const))
80214+#endif
80215+
80216+#ifdef SIZE_OVERFLOW_PLUGIN
80217+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80218+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80219+#endif
80220+
80221+#ifdef LATENT_ENTROPY_PLUGIN
80222+#define __latent_entropy __attribute__((latent_entropy))
80223+#endif
80224+
80225 /*
80226 * Mark a position in code as unreachable. This can be used to
80227 * suppress control flow warnings after asm blocks that transfer
80228diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
80229index c8c5659..d09f2ad 100644
80230--- a/include/linux/compiler-gcc5.h
80231+++ b/include/linux/compiler-gcc5.h
80232@@ -28,6 +28,28 @@
80233 # define __compiletime_error(message) __attribute__((error(message)))
80234 #endif /* __CHECKER__ */
80235
80236+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80237+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80238+#define __bos0(ptr) __bos((ptr), 0)
80239+#define __bos1(ptr) __bos((ptr), 1)
80240+
80241+#ifdef CONSTIFY_PLUGIN
80242+#error not yet
80243+#define __no_const __attribute__((no_const))
80244+#define __do_const __attribute__((do_const))
80245+#endif
80246+
80247+#ifdef SIZE_OVERFLOW_PLUGIN
80248+#error not yet
80249+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80250+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80251+#endif
80252+
80253+#ifdef LATENT_ENTROPY_PLUGIN
80254+#error not yet
80255+#define __latent_entropy __attribute__((latent_entropy))
80256+#endif
80257+
80258 /*
80259 * Mark a position in code as unreachable. This can be used to
80260 * suppress control flow warnings after asm blocks that transfer
80261diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80262index fa6a314..752a6ef 100644
80263--- a/include/linux/compiler.h
80264+++ b/include/linux/compiler.h
80265@@ -5,11 +5,14 @@
80266
80267 #ifdef __CHECKER__
80268 # define __user __attribute__((noderef, address_space(1)))
80269+# define __force_user __force __user
80270 # define __kernel __attribute__((address_space(0)))
80271+# define __force_kernel __force __kernel
80272 # define __safe __attribute__((safe))
80273 # define __force __attribute__((force))
80274 # define __nocast __attribute__((nocast))
80275 # define __iomem __attribute__((noderef, address_space(2)))
80276+# define __force_iomem __force __iomem
80277 # define __must_hold(x) __attribute__((context(x,1,1)))
80278 # define __acquires(x) __attribute__((context(x,0,1)))
80279 # define __releases(x) __attribute__((context(x,1,0)))
80280@@ -17,20 +20,37 @@
80281 # define __release(x) __context__(x,-1)
80282 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80283 # define __percpu __attribute__((noderef, address_space(3)))
80284+# define __force_percpu __force __percpu
80285 #ifdef CONFIG_SPARSE_RCU_POINTER
80286 # define __rcu __attribute__((noderef, address_space(4)))
80287+# define __force_rcu __force __rcu
80288 #else
80289 # define __rcu
80290+# define __force_rcu
80291 #endif
80292 extern void __chk_user_ptr(const volatile void __user *);
80293 extern void __chk_io_ptr(const volatile void __iomem *);
80294 #else
80295-# define __user
80296-# define __kernel
80297+# ifdef CHECKER_PLUGIN
80298+//# define __user
80299+//# define __force_user
80300+//# define __kernel
80301+//# define __force_kernel
80302+# else
80303+# ifdef STRUCTLEAK_PLUGIN
80304+# define __user __attribute__((user))
80305+# else
80306+# define __user
80307+# endif
80308+# define __force_user
80309+# define __kernel
80310+# define __force_kernel
80311+# endif
80312 # define __safe
80313 # define __force
80314 # define __nocast
80315 # define __iomem
80316+# define __force_iomem
80317 # define __chk_user_ptr(x) (void)0
80318 # define __chk_io_ptr(x) (void)0
80319 # define __builtin_warning(x, y...) (1)
80320@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80321 # define __release(x) (void)0
80322 # define __cond_lock(x,c) (c)
80323 # define __percpu
80324+# define __force_percpu
80325 # define __rcu
80326+# define __force_rcu
80327 #endif
80328
80329 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80330@@ -201,32 +223,32 @@ static __always_inline void data_access_exceeds_word_size(void)
80331 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
80332 {
80333 switch (size) {
80334- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
80335- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
80336- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
80337+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
80338+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
80339+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
80340 #ifdef CONFIG_64BIT
80341- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
80342+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
80343 #endif
80344 default:
80345 barrier();
80346- __builtin_memcpy((void *)res, (const void *)p, size);
80347+ __builtin_memcpy(res, (const void *)p, size);
80348 data_access_exceeds_word_size();
80349 barrier();
80350 }
80351 }
80352
80353-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
80354+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
80355 {
80356 switch (size) {
80357- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
80358- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
80359- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
80360+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
80361+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
80362+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
80363 #ifdef CONFIG_64BIT
80364- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
80365+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
80366 #endif
80367 default:
80368 barrier();
80369- __builtin_memcpy((void *)p, (const void *)res, size);
80370+ __builtin_memcpy((void *)p, res, size);
80371 data_access_exceeds_word_size();
80372 barrier();
80373 }
80374@@ -360,6 +382,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80375 # define __attribute_const__ /* unimplemented */
80376 #endif
80377
80378+#ifndef __randomize_layout
80379+# define __randomize_layout
80380+#endif
80381+
80382+#ifndef __no_randomize_layout
80383+# define __no_randomize_layout
80384+#endif
80385+
80386+#ifndef __no_const
80387+# define __no_const
80388+#endif
80389+
80390+#ifndef __do_const
80391+# define __do_const
80392+#endif
80393+
80394+#ifndef __size_overflow
80395+# define __size_overflow(...)
80396+#endif
80397+
80398+#ifndef __intentional_overflow
80399+# define __intentional_overflow(...)
80400+#endif
80401+
80402+#ifndef __latent_entropy
80403+# define __latent_entropy
80404+#endif
80405+
80406 /*
80407 * Tell gcc if a function is cold. The compiler will assume any path
80408 * directly leading to the call is unlikely.
80409@@ -369,6 +419,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80410 #define __cold
80411 #endif
80412
80413+#ifndef __alloc_size
80414+#define __alloc_size(...)
80415+#endif
80416+
80417+#ifndef __bos
80418+#define __bos(ptr, arg)
80419+#endif
80420+
80421+#ifndef __bos0
80422+#define __bos0(ptr)
80423+#endif
80424+
80425+#ifndef __bos1
80426+#define __bos1(ptr)
80427+#endif
80428+
80429 /* Simple shorthand for a section definition */
80430 #ifndef __section
80431 # define __section(S) __attribute__ ((__section__(#S)))
80432@@ -462,8 +528,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80433 */
80434 #define __ACCESS_ONCE(x) ({ \
80435 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
80436- (volatile typeof(x) *)&(x); })
80437+ (volatile const typeof(x) *)&(x); })
80438 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
80439+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
80440
80441 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
80442 #ifdef CONFIG_KPROBES
80443diff --git a/include/linux/completion.h b/include/linux/completion.h
80444index 5d5aaae..0ea9b84 100644
80445--- a/include/linux/completion.h
80446+++ b/include/linux/completion.h
80447@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
80448
80449 extern void wait_for_completion(struct completion *);
80450 extern void wait_for_completion_io(struct completion *);
80451-extern int wait_for_completion_interruptible(struct completion *x);
80452-extern int wait_for_completion_killable(struct completion *x);
80453+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
80454+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
80455 extern unsigned long wait_for_completion_timeout(struct completion *x,
80456- unsigned long timeout);
80457+ unsigned long timeout) __intentional_overflow(-1);
80458 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
80459- unsigned long timeout);
80460+ unsigned long timeout) __intentional_overflow(-1);
80461 extern long wait_for_completion_interruptible_timeout(
80462- struct completion *x, unsigned long timeout);
80463+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80464 extern long wait_for_completion_killable_timeout(
80465- struct completion *x, unsigned long timeout);
80466+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80467 extern bool try_wait_for_completion(struct completion *x);
80468 extern bool completion_done(struct completion *x);
80469
80470diff --git a/include/linux/configfs.h b/include/linux/configfs.h
80471index 34025df..d94bbbc 100644
80472--- a/include/linux/configfs.h
80473+++ b/include/linux/configfs.h
80474@@ -125,7 +125,7 @@ struct configfs_attribute {
80475 const char *ca_name;
80476 struct module *ca_owner;
80477 umode_t ca_mode;
80478-};
80479+} __do_const;
80480
80481 /*
80482 * Users often need to create attribute structures for their configurable
80483diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
80484index 4d078ce..c970f4d 100644
80485--- a/include/linux/cpufreq.h
80486+++ b/include/linux/cpufreq.h
80487@@ -206,6 +206,7 @@ struct global_attr {
80488 ssize_t (*store)(struct kobject *a, struct attribute *b,
80489 const char *c, size_t count);
80490 };
80491+typedef struct global_attr __no_const global_attr_no_const;
80492
80493 #define define_one_global_ro(_name) \
80494 static struct global_attr _name = \
80495@@ -277,7 +278,7 @@ struct cpufreq_driver {
80496 bool boost_supported;
80497 bool boost_enabled;
80498 int (*set_boost)(int state);
80499-};
80500+} __do_const;
80501
80502 /* flags */
80503 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
80504diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
80505index ab70f3b..3ef7771 100644
80506--- a/include/linux/cpuidle.h
80507+++ b/include/linux/cpuidle.h
80508@@ -50,7 +50,8 @@ struct cpuidle_state {
80509 int index);
80510
80511 int (*enter_dead) (struct cpuidle_device *dev, int index);
80512-};
80513+} __do_const;
80514+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
80515
80516 /* Idle State Flags */
80517 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
80518@@ -206,7 +207,7 @@ struct cpuidle_governor {
80519 void (*reflect) (struct cpuidle_device *dev, int index);
80520
80521 struct module *owner;
80522-};
80523+} __do_const;
80524
80525 #ifdef CONFIG_CPU_IDLE
80526 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
80527diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
80528index b950e9d..63810aa 100644
80529--- a/include/linux/cpumask.h
80530+++ b/include/linux/cpumask.h
80531@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80532 }
80533
80534 /* Valid inputs for n are -1 and 0. */
80535-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80536+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80537 {
80538 return n+1;
80539 }
80540
80541-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80542+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80543 {
80544 return n+1;
80545 }
80546
80547-static inline unsigned int cpumask_next_and(int n,
80548+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
80549 const struct cpumask *srcp,
80550 const struct cpumask *andp)
80551 {
80552@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80553 *
80554 * Returns >= nr_cpu_ids if no further cpus set.
80555 */
80556-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80557+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80558 {
80559 /* -1 is a legal arg here. */
80560 if (n != -1)
80561@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80562 *
80563 * Returns >= nr_cpu_ids if no further cpus unset.
80564 */
80565-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80566+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80567 {
80568 /* -1 is a legal arg here. */
80569 if (n != -1)
80570@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80571 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
80572 }
80573
80574-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
80575+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
80576 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
80577 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
80578
80579@@ -464,7 +464,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
80580 * cpumask_weight - Count of bits in *srcp
80581 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
80582 */
80583-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
80584+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
80585 {
80586 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
80587 }
80588diff --git a/include/linux/cred.h b/include/linux/cred.h
80589index 2fb2ca2..d6a3340 100644
80590--- a/include/linux/cred.h
80591+++ b/include/linux/cred.h
80592@@ -35,7 +35,7 @@ struct group_info {
80593 int nblocks;
80594 kgid_t small_block[NGROUPS_SMALL];
80595 kgid_t *blocks[0];
80596-};
80597+} __randomize_layout;
80598
80599 /**
80600 * get_group_info - Get a reference to a group info structure
80601@@ -137,7 +137,7 @@ struct cred {
80602 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
80603 struct group_info *group_info; /* supplementary groups for euid/fsgid */
80604 struct rcu_head rcu; /* RCU deletion hook */
80605-};
80606+} __randomize_layout;
80607
80608 extern void __put_cred(struct cred *);
80609 extern void exit_creds(struct task_struct *);
80610@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
80611 static inline void validate_process_creds(void)
80612 {
80613 }
80614+static inline void validate_task_creds(struct task_struct *task)
80615+{
80616+}
80617 #endif
80618
80619 /**
80620@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
80621
80622 #define task_uid(task) (task_cred_xxx((task), uid))
80623 #define task_euid(task) (task_cred_xxx((task), euid))
80624+#define task_securebits(task) (task_cred_xxx((task), securebits))
80625
80626 #define current_cred_xxx(xxx) \
80627 ({ \
80628diff --git a/include/linux/crypto.h b/include/linux/crypto.h
80629index 9c8776d..8c526c2 100644
80630--- a/include/linux/crypto.h
80631+++ b/include/linux/crypto.h
80632@@ -626,7 +626,7 @@ struct cipher_tfm {
80633 const u8 *key, unsigned int keylen);
80634 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80635 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80636-};
80637+} __no_const;
80638
80639 struct hash_tfm {
80640 int (*init)(struct hash_desc *desc);
80641@@ -647,13 +647,13 @@ struct compress_tfm {
80642 int (*cot_decompress)(struct crypto_tfm *tfm,
80643 const u8 *src, unsigned int slen,
80644 u8 *dst, unsigned int *dlen);
80645-};
80646+} __no_const;
80647
80648 struct rng_tfm {
80649 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
80650 unsigned int dlen);
80651 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
80652-};
80653+} __no_const;
80654
80655 #define crt_ablkcipher crt_u.ablkcipher
80656 #define crt_aead crt_u.aead
80657diff --git a/include/linux/ctype.h b/include/linux/ctype.h
80658index 653589e..4ef254a 100644
80659--- a/include/linux/ctype.h
80660+++ b/include/linux/ctype.h
80661@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
80662 * Fast implementation of tolower() for internal usage. Do not use in your
80663 * code.
80664 */
80665-static inline char _tolower(const char c)
80666+static inline unsigned char _tolower(const unsigned char c)
80667 {
80668 return c | 0x20;
80669 }
80670diff --git a/include/linux/dcache.h b/include/linux/dcache.h
80671index 5a81398..6bbee30 100644
80672--- a/include/linux/dcache.h
80673+++ b/include/linux/dcache.h
80674@@ -123,6 +123,9 @@ struct dentry {
80675 unsigned long d_time; /* used by d_revalidate */
80676 void *d_fsdata; /* fs-specific data */
80677
80678+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
80679+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
80680+#endif
80681 struct list_head d_lru; /* LRU list */
80682 struct list_head d_child; /* child of parent list */
80683 struct list_head d_subdirs; /* our children */
80684@@ -133,7 +136,7 @@ struct dentry {
80685 struct hlist_node d_alias; /* inode alias list */
80686 struct rcu_head d_rcu;
80687 } d_u;
80688-};
80689+} __randomize_layout;
80690
80691 /*
80692 * dentry->d_lock spinlock nesting subclasses:
80693diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
80694index 7925bf0..d5143d2 100644
80695--- a/include/linux/decompress/mm.h
80696+++ b/include/linux/decompress/mm.h
80697@@ -77,7 +77,7 @@ static void free(void *where)
80698 * warnings when not needed (indeed large_malloc / large_free are not
80699 * needed by inflate */
80700
80701-#define malloc(a) kmalloc(a, GFP_KERNEL)
80702+#define malloc(a) kmalloc((a), GFP_KERNEL)
80703 #define free(a) kfree(a)
80704
80705 #define large_malloc(a) vmalloc(a)
80706diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
80707index ce447f0..83c66bd 100644
80708--- a/include/linux/devfreq.h
80709+++ b/include/linux/devfreq.h
80710@@ -114,7 +114,7 @@ struct devfreq_governor {
80711 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
80712 int (*event_handler)(struct devfreq *devfreq,
80713 unsigned int event, void *data);
80714-};
80715+} __do_const;
80716
80717 /**
80718 * struct devfreq - Device devfreq structure
80719diff --git a/include/linux/device.h b/include/linux/device.h
80720index fb50673..ec0b35b 100644
80721--- a/include/linux/device.h
80722+++ b/include/linux/device.h
80723@@ -311,7 +311,7 @@ struct subsys_interface {
80724 struct list_head node;
80725 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
80726 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
80727-};
80728+} __do_const;
80729
80730 int subsys_interface_register(struct subsys_interface *sif);
80731 void subsys_interface_unregister(struct subsys_interface *sif);
80732@@ -507,7 +507,7 @@ struct device_type {
80733 void (*release)(struct device *dev);
80734
80735 const struct dev_pm_ops *pm;
80736-};
80737+} __do_const;
80738
80739 /* interface for exporting device attributes */
80740 struct device_attribute {
80741@@ -517,11 +517,12 @@ struct device_attribute {
80742 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
80743 const char *buf, size_t count);
80744 };
80745+typedef struct device_attribute __no_const device_attribute_no_const;
80746
80747 struct dev_ext_attribute {
80748 struct device_attribute attr;
80749 void *var;
80750-};
80751+} __do_const;
80752
80753 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
80754 char *buf);
80755diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
80756index c3007cb..43efc8c 100644
80757--- a/include/linux/dma-mapping.h
80758+++ b/include/linux/dma-mapping.h
80759@@ -60,7 +60,7 @@ struct dma_map_ops {
80760 u64 (*get_required_mask)(struct device *dev);
80761 #endif
80762 int is_phys;
80763-};
80764+} __do_const;
80765
80766 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
80767
80768diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
80769index 40cd75e..38572a9 100644
80770--- a/include/linux/dmaengine.h
80771+++ b/include/linux/dmaengine.h
80772@@ -1137,9 +1137,9 @@ struct dma_pinned_list {
80773 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
80774 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
80775
80776-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80777+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80778 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
80779-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80780+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80781 struct dma_pinned_list *pinned_list, struct page *page,
80782 unsigned int offset, size_t len);
80783
80784diff --git a/include/linux/efi.h b/include/linux/efi.h
80785index 0238d61..34a758f 100644
80786--- a/include/linux/efi.h
80787+++ b/include/linux/efi.h
80788@@ -1054,6 +1054,7 @@ struct efivar_operations {
80789 efi_set_variable_nonblocking_t *set_variable_nonblocking;
80790 efi_query_variable_store_t *query_variable_store;
80791 };
80792+typedef struct efivar_operations __no_const efivar_operations_no_const;
80793
80794 struct efivars {
80795 /*
80796diff --git a/include/linux/elf.h b/include/linux/elf.h
80797index 20fa8d8..3d0dd18 100644
80798--- a/include/linux/elf.h
80799+++ b/include/linux/elf.h
80800@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
80801 #define elf_note elf32_note
80802 #define elf_addr_t Elf32_Off
80803 #define Elf_Half Elf32_Half
80804+#define elf_dyn Elf32_Dyn
80805
80806 #else
80807
80808@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
80809 #define elf_note elf64_note
80810 #define elf_addr_t Elf64_Off
80811 #define Elf_Half Elf64_Half
80812+#define elf_dyn Elf64_Dyn
80813
80814 #endif
80815
80816diff --git a/include/linux/err.h b/include/linux/err.h
80817index a729120..6ede2c9 100644
80818--- a/include/linux/err.h
80819+++ b/include/linux/err.h
80820@@ -20,12 +20,12 @@
80821
80822 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
80823
80824-static inline void * __must_check ERR_PTR(long error)
80825+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
80826 {
80827 return (void *) error;
80828 }
80829
80830-static inline long __must_check PTR_ERR(__force const void *ptr)
80831+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
80832 {
80833 return (long) ptr;
80834 }
80835diff --git a/include/linux/extcon.h b/include/linux/extcon.h
80836index 36f49c4..a2a1f4c 100644
80837--- a/include/linux/extcon.h
80838+++ b/include/linux/extcon.h
80839@@ -135,7 +135,7 @@ struct extcon_dev {
80840 /* /sys/class/extcon/.../mutually_exclusive/... */
80841 struct attribute_group attr_g_muex;
80842 struct attribute **attrs_muex;
80843- struct device_attribute *d_attrs_muex;
80844+ device_attribute_no_const *d_attrs_muex;
80845 };
80846
80847 /**
80848diff --git a/include/linux/fb.h b/include/linux/fb.h
80849index 09bb7a1..d98870a 100644
80850--- a/include/linux/fb.h
80851+++ b/include/linux/fb.h
80852@@ -305,7 +305,7 @@ struct fb_ops {
80853 /* called at KDB enter and leave time to prepare the console */
80854 int (*fb_debug_enter)(struct fb_info *info);
80855 int (*fb_debug_leave)(struct fb_info *info);
80856-};
80857+} __do_const;
80858
80859 #ifdef CONFIG_FB_TILEBLITTING
80860 #define FB_TILE_CURSOR_NONE 0
80861diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
80862index 230f87b..1fd0485 100644
80863--- a/include/linux/fdtable.h
80864+++ b/include/linux/fdtable.h
80865@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
80866 void put_files_struct(struct files_struct *fs);
80867 void reset_files_struct(struct files_struct *);
80868 int unshare_files(struct files_struct **);
80869-struct files_struct *dup_fd(struct files_struct *, int *);
80870+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
80871 void do_close_on_exec(struct files_struct *);
80872 int iterate_fd(struct files_struct *, unsigned,
80873 int (*)(const void *, struct file *, unsigned),
80874diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
80875index 8293262..2b3b8bd 100644
80876--- a/include/linux/frontswap.h
80877+++ b/include/linux/frontswap.h
80878@@ -11,7 +11,7 @@ struct frontswap_ops {
80879 int (*load)(unsigned, pgoff_t, struct page *);
80880 void (*invalidate_page)(unsigned, pgoff_t);
80881 void (*invalidate_area)(unsigned);
80882-};
80883+} __no_const;
80884
80885 extern bool frontswap_enabled;
80886 extern struct frontswap_ops *
80887diff --git a/include/linux/fs.h b/include/linux/fs.h
80888index 42efe13..72d42ee 100644
80889--- a/include/linux/fs.h
80890+++ b/include/linux/fs.h
80891@@ -413,7 +413,7 @@ struct address_space {
80892 spinlock_t private_lock; /* for use by the address_space */
80893 struct list_head private_list; /* ditto */
80894 void *private_data; /* ditto */
80895-} __attribute__((aligned(sizeof(long))));
80896+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
80897 /*
80898 * On most architectures that alignment is already the case; but
80899 * must be enforced here for CRIS, to let the least significant bit
80900@@ -456,7 +456,7 @@ struct block_device {
80901 int bd_fsfreeze_count;
80902 /* Mutex for freeze */
80903 struct mutex bd_fsfreeze_mutex;
80904-};
80905+} __randomize_layout;
80906
80907 /*
80908 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
80909@@ -642,7 +642,7 @@ struct inode {
80910 #endif
80911
80912 void *i_private; /* fs or device private pointer */
80913-};
80914+} __randomize_layout;
80915
80916 static inline int inode_unhashed(struct inode *inode)
80917 {
80918@@ -837,7 +837,7 @@ struct file {
80919 struct list_head f_tfile_llink;
80920 #endif /* #ifdef CONFIG_EPOLL */
80921 struct address_space *f_mapping;
80922-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
80923+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
80924
80925 struct file_handle {
80926 __u32 handle_bytes;
80927@@ -962,7 +962,7 @@ struct file_lock {
80928 int state; /* state of grant or error if -ve */
80929 } afs;
80930 } fl_u;
80931-};
80932+} __randomize_layout;
80933
80934 /* The following constant reflects the upper bound of the file/locking space */
80935 #ifndef OFFSET_MAX
80936@@ -1305,7 +1305,7 @@ struct super_block {
80937 * Indicates how deep in a filesystem stack this SB is
80938 */
80939 int s_stack_depth;
80940-};
80941+} __randomize_layout;
80942
80943 extern struct timespec current_fs_time(struct super_block *sb);
80944
80945@@ -1536,7 +1536,8 @@ struct file_operations {
80946 long (*fallocate)(struct file *file, int mode, loff_t offset,
80947 loff_t len);
80948 void (*show_fdinfo)(struct seq_file *m, struct file *f);
80949-};
80950+} __do_const __randomize_layout;
80951+typedef struct file_operations __no_const file_operations_no_const;
80952
80953 struct inode_operations {
80954 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
80955@@ -2854,4 +2855,14 @@ static inline bool dir_relax(struct inode *inode)
80956 return !IS_DEADDIR(inode);
80957 }
80958
80959+static inline bool is_sidechannel_device(const struct inode *inode)
80960+{
80961+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
80962+ umode_t mode = inode->i_mode;
80963+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
80964+#else
80965+ return false;
80966+#endif
80967+}
80968+
80969 #endif /* _LINUX_FS_H */
80970diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
80971index 0efc3e6..fd23610 100644
80972--- a/include/linux/fs_struct.h
80973+++ b/include/linux/fs_struct.h
80974@@ -6,13 +6,13 @@
80975 #include <linux/seqlock.h>
80976
80977 struct fs_struct {
80978- int users;
80979+ atomic_t users;
80980 spinlock_t lock;
80981 seqcount_t seq;
80982 int umask;
80983 int in_exec;
80984 struct path root, pwd;
80985-};
80986+} __randomize_layout;
80987
80988 extern struct kmem_cache *fs_cachep;
80989
80990diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
80991index 7714849..a4a5c7a 100644
80992--- a/include/linux/fscache-cache.h
80993+++ b/include/linux/fscache-cache.h
80994@@ -113,7 +113,7 @@ struct fscache_operation {
80995 fscache_operation_release_t release;
80996 };
80997
80998-extern atomic_t fscache_op_debug_id;
80999+extern atomic_unchecked_t fscache_op_debug_id;
81000 extern void fscache_op_work_func(struct work_struct *work);
81001
81002 extern void fscache_enqueue_operation(struct fscache_operation *);
81003@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81004 INIT_WORK(&op->work, fscache_op_work_func);
81005 atomic_set(&op->usage, 1);
81006 op->state = FSCACHE_OP_ST_INITIALISED;
81007- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81008+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81009 op->processor = processor;
81010 op->release = release;
81011 INIT_LIST_HEAD(&op->pend_link);
81012diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81013index 115bb81..e7b812b 100644
81014--- a/include/linux/fscache.h
81015+++ b/include/linux/fscache.h
81016@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81017 * - this is mandatory for any object that may have data
81018 */
81019 void (*now_uncached)(void *cookie_netfs_data);
81020-};
81021+} __do_const;
81022
81023 /*
81024 * fscache cached network filesystem type
81025diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81026index 7ee1774..72505b8 100644
81027--- a/include/linux/fsnotify.h
81028+++ b/include/linux/fsnotify.h
81029@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
81030 struct inode *inode = file_inode(file);
81031 __u32 mask = FS_ACCESS;
81032
81033+ if (is_sidechannel_device(inode))
81034+ return;
81035+
81036 if (S_ISDIR(inode->i_mode))
81037 mask |= FS_ISDIR;
81038
81039@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
81040 struct inode *inode = file_inode(file);
81041 __u32 mask = FS_MODIFY;
81042
81043+ if (is_sidechannel_device(inode))
81044+ return;
81045+
81046 if (S_ISDIR(inode->i_mode))
81047 mask |= FS_ISDIR;
81048
81049@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81050 */
81051 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81052 {
81053- return kstrdup(name, GFP_KERNEL);
81054+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81055 }
81056
81057 /*
81058diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81059index ec274e0..e678159 100644
81060--- a/include/linux/genhd.h
81061+++ b/include/linux/genhd.h
81062@@ -194,7 +194,7 @@ struct gendisk {
81063 struct kobject *slave_dir;
81064
81065 struct timer_rand_state *random;
81066- atomic_t sync_io; /* RAID */
81067+ atomic_unchecked_t sync_io; /* RAID */
81068 struct disk_events *ev;
81069 #ifdef CONFIG_BLK_DEV_INTEGRITY
81070 struct blk_integrity *integrity;
81071@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81072 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81073
81074 /* drivers/char/random.c */
81075-extern void add_disk_randomness(struct gendisk *disk);
81076+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81077 extern void rand_initialize_disk(struct gendisk *disk);
81078
81079 static inline sector_t get_start_sect(struct block_device *bdev)
81080diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81081index 667c311..abac2a7 100644
81082--- a/include/linux/genl_magic_func.h
81083+++ b/include/linux/genl_magic_func.h
81084@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81085 },
81086
81087 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81088-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81089+static struct genl_ops ZZZ_genl_ops[] = {
81090 #include GENL_MAGIC_INCLUDE_FILE
81091 };
81092
81093diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81094index b840e3b..aeaeef9 100644
81095--- a/include/linux/gfp.h
81096+++ b/include/linux/gfp.h
81097@@ -34,6 +34,13 @@ struct vm_area_struct;
81098 #define ___GFP_NO_KSWAPD 0x400000u
81099 #define ___GFP_OTHER_NODE 0x800000u
81100 #define ___GFP_WRITE 0x1000000u
81101+
81102+#ifdef CONFIG_PAX_USERCOPY_SLABS
81103+#define ___GFP_USERCOPY 0x2000000u
81104+#else
81105+#define ___GFP_USERCOPY 0
81106+#endif
81107+
81108 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81109
81110 /*
81111@@ -90,6 +97,7 @@ struct vm_area_struct;
81112 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81113 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81114 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81115+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81116
81117 /*
81118 * This may seem redundant, but it's a way of annotating false positives vs.
81119@@ -97,7 +105,7 @@ struct vm_area_struct;
81120 */
81121 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81122
81123-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81124+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81125 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81126
81127 /* This equals 0, but use constants in case they ever change */
81128@@ -152,6 +160,8 @@ struct vm_area_struct;
81129 /* 4GB DMA on some platforms */
81130 #define GFP_DMA32 __GFP_DMA32
81131
81132+#define GFP_USERCOPY __GFP_USERCOPY
81133+
81134 /* Convert GFP flags to their corresponding migrate type */
81135 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
81136 {
81137diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81138new file mode 100644
81139index 0000000..91858e4
81140--- /dev/null
81141+++ b/include/linux/gracl.h
81142@@ -0,0 +1,342 @@
81143+#ifndef GR_ACL_H
81144+#define GR_ACL_H
81145+
81146+#include <linux/grdefs.h>
81147+#include <linux/resource.h>
81148+#include <linux/capability.h>
81149+#include <linux/dcache.h>
81150+#include <asm/resource.h>
81151+
81152+/* Major status information */
81153+
81154+#define GR_VERSION "grsecurity 3.1"
81155+#define GRSECURITY_VERSION 0x3100
81156+
81157+enum {
81158+ GR_SHUTDOWN = 0,
81159+ GR_ENABLE = 1,
81160+ GR_SPROLE = 2,
81161+ GR_OLDRELOAD = 3,
81162+ GR_SEGVMOD = 4,
81163+ GR_STATUS = 5,
81164+ GR_UNSPROLE = 6,
81165+ GR_PASSSET = 7,
81166+ GR_SPROLEPAM = 8,
81167+ GR_RELOAD = 9,
81168+};
81169+
81170+/* Password setup definitions
81171+ * kernel/grhash.c */
81172+enum {
81173+ GR_PW_LEN = 128,
81174+ GR_SALT_LEN = 16,
81175+ GR_SHA_LEN = 32,
81176+};
81177+
81178+enum {
81179+ GR_SPROLE_LEN = 64,
81180+};
81181+
81182+enum {
81183+ GR_NO_GLOB = 0,
81184+ GR_REG_GLOB,
81185+ GR_CREATE_GLOB
81186+};
81187+
81188+#define GR_NLIMITS 32
81189+
81190+/* Begin Data Structures */
81191+
81192+struct sprole_pw {
81193+ unsigned char *rolename;
81194+ unsigned char salt[GR_SALT_LEN];
81195+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81196+};
81197+
81198+struct name_entry {
81199+ __u32 key;
81200+ u64 inode;
81201+ dev_t device;
81202+ char *name;
81203+ __u16 len;
81204+ __u8 deleted;
81205+ struct name_entry *prev;
81206+ struct name_entry *next;
81207+};
81208+
81209+struct inodev_entry {
81210+ struct name_entry *nentry;
81211+ struct inodev_entry *prev;
81212+ struct inodev_entry *next;
81213+};
81214+
81215+struct acl_role_db {
81216+ struct acl_role_label **r_hash;
81217+ __u32 r_size;
81218+};
81219+
81220+struct inodev_db {
81221+ struct inodev_entry **i_hash;
81222+ __u32 i_size;
81223+};
81224+
81225+struct name_db {
81226+ struct name_entry **n_hash;
81227+ __u32 n_size;
81228+};
81229+
81230+struct crash_uid {
81231+ uid_t uid;
81232+ unsigned long expires;
81233+};
81234+
81235+struct gr_hash_struct {
81236+ void **table;
81237+ void **nametable;
81238+ void *first;
81239+ __u32 table_size;
81240+ __u32 used_size;
81241+ int type;
81242+};
81243+
81244+/* Userspace Grsecurity ACL data structures */
81245+
81246+struct acl_subject_label {
81247+ char *filename;
81248+ u64 inode;
81249+ dev_t device;
81250+ __u32 mode;
81251+ kernel_cap_t cap_mask;
81252+ kernel_cap_t cap_lower;
81253+ kernel_cap_t cap_invert_audit;
81254+
81255+ struct rlimit res[GR_NLIMITS];
81256+ __u32 resmask;
81257+
81258+ __u8 user_trans_type;
81259+ __u8 group_trans_type;
81260+ uid_t *user_transitions;
81261+ gid_t *group_transitions;
81262+ __u16 user_trans_num;
81263+ __u16 group_trans_num;
81264+
81265+ __u32 sock_families[2];
81266+ __u32 ip_proto[8];
81267+ __u32 ip_type;
81268+ struct acl_ip_label **ips;
81269+ __u32 ip_num;
81270+ __u32 inaddr_any_override;
81271+
81272+ __u32 crashes;
81273+ unsigned long expires;
81274+
81275+ struct acl_subject_label *parent_subject;
81276+ struct gr_hash_struct *hash;
81277+ struct acl_subject_label *prev;
81278+ struct acl_subject_label *next;
81279+
81280+ struct acl_object_label **obj_hash;
81281+ __u32 obj_hash_size;
81282+ __u16 pax_flags;
81283+};
81284+
81285+struct role_allowed_ip {
81286+ __u32 addr;
81287+ __u32 netmask;
81288+
81289+ struct role_allowed_ip *prev;
81290+ struct role_allowed_ip *next;
81291+};
81292+
81293+struct role_transition {
81294+ char *rolename;
81295+
81296+ struct role_transition *prev;
81297+ struct role_transition *next;
81298+};
81299+
81300+struct acl_role_label {
81301+ char *rolename;
81302+ uid_t uidgid;
81303+ __u16 roletype;
81304+
81305+ __u16 auth_attempts;
81306+ unsigned long expires;
81307+
81308+ struct acl_subject_label *root_label;
81309+ struct gr_hash_struct *hash;
81310+
81311+ struct acl_role_label *prev;
81312+ struct acl_role_label *next;
81313+
81314+ struct role_transition *transitions;
81315+ struct role_allowed_ip *allowed_ips;
81316+ uid_t *domain_children;
81317+ __u16 domain_child_num;
81318+
81319+ umode_t umask;
81320+
81321+ struct acl_subject_label **subj_hash;
81322+ __u32 subj_hash_size;
81323+};
81324+
81325+struct user_acl_role_db {
81326+ struct acl_role_label **r_table;
81327+ __u32 num_pointers; /* Number of allocations to track */
81328+ __u32 num_roles; /* Number of roles */
81329+ __u32 num_domain_children; /* Number of domain children */
81330+ __u32 num_subjects; /* Number of subjects */
81331+ __u32 num_objects; /* Number of objects */
81332+};
81333+
81334+struct acl_object_label {
81335+ char *filename;
81336+ u64 inode;
81337+ dev_t device;
81338+ __u32 mode;
81339+
81340+ struct acl_subject_label *nested;
81341+ struct acl_object_label *globbed;
81342+
81343+ /* next two structures not used */
81344+
81345+ struct acl_object_label *prev;
81346+ struct acl_object_label *next;
81347+};
81348+
81349+struct acl_ip_label {
81350+ char *iface;
81351+ __u32 addr;
81352+ __u32 netmask;
81353+ __u16 low, high;
81354+ __u8 mode;
81355+ __u32 type;
81356+ __u32 proto[8];
81357+
81358+ /* next two structures not used */
81359+
81360+ struct acl_ip_label *prev;
81361+ struct acl_ip_label *next;
81362+};
81363+
81364+struct gr_arg {
81365+ struct user_acl_role_db role_db;
81366+ unsigned char pw[GR_PW_LEN];
81367+ unsigned char salt[GR_SALT_LEN];
81368+ unsigned char sum[GR_SHA_LEN];
81369+ unsigned char sp_role[GR_SPROLE_LEN];
81370+ struct sprole_pw *sprole_pws;
81371+ dev_t segv_device;
81372+ u64 segv_inode;
81373+ uid_t segv_uid;
81374+ __u16 num_sprole_pws;
81375+ __u16 mode;
81376+};
81377+
81378+struct gr_arg_wrapper {
81379+ struct gr_arg *arg;
81380+ __u32 version;
81381+ __u32 size;
81382+};
81383+
81384+struct subject_map {
81385+ struct acl_subject_label *user;
81386+ struct acl_subject_label *kernel;
81387+ struct subject_map *prev;
81388+ struct subject_map *next;
81389+};
81390+
81391+struct acl_subj_map_db {
81392+ struct subject_map **s_hash;
81393+ __u32 s_size;
81394+};
81395+
81396+struct gr_policy_state {
81397+ struct sprole_pw **acl_special_roles;
81398+ __u16 num_sprole_pws;
81399+ struct acl_role_label *kernel_role;
81400+ struct acl_role_label *role_list;
81401+ struct acl_role_label *default_role;
81402+ struct acl_role_db acl_role_set;
81403+ struct acl_subj_map_db subj_map_set;
81404+ struct name_db name_set;
81405+ struct inodev_db inodev_set;
81406+};
81407+
81408+struct gr_alloc_state {
81409+ unsigned long alloc_stack_next;
81410+ unsigned long alloc_stack_size;
81411+ void **alloc_stack;
81412+};
81413+
81414+struct gr_reload_state {
81415+ struct gr_policy_state oldpolicy;
81416+ struct gr_alloc_state oldalloc;
81417+ struct gr_policy_state newpolicy;
81418+ struct gr_alloc_state newalloc;
81419+ struct gr_policy_state *oldpolicy_ptr;
81420+ struct gr_alloc_state *oldalloc_ptr;
81421+ unsigned char oldmode;
81422+};
81423+
81424+/* End Data Structures Section */
81425+
81426+/* Hash functions generated by empirical testing by Brad Spengler
81427+ Makes good use of the low bits of the inode. Generally 0-1 times
81428+ in loop for successful match. 0-3 for unsuccessful match.
81429+ Shift/add algorithm with modulus of table size and an XOR*/
81430+
81431+static __inline__ unsigned int
81432+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
81433+{
81434+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
81435+}
81436+
81437+ static __inline__ unsigned int
81438+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
81439+{
81440+ return ((const unsigned long)userp % sz);
81441+}
81442+
81443+static __inline__ unsigned int
81444+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
81445+{
81446+ unsigned int rem;
81447+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
81448+ return rem;
81449+}
81450+
81451+static __inline__ unsigned int
81452+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
81453+{
81454+ return full_name_hash((const unsigned char *)name, len) % sz;
81455+}
81456+
81457+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
81458+ subj = NULL; \
81459+ iter = 0; \
81460+ while (iter < role->subj_hash_size) { \
81461+ if (subj == NULL) \
81462+ subj = role->subj_hash[iter]; \
81463+ if (subj == NULL) { \
81464+ iter++; \
81465+ continue; \
81466+ }
81467+
81468+#define FOR_EACH_SUBJECT_END(subj,iter) \
81469+ subj = subj->next; \
81470+ if (subj == NULL) \
81471+ iter++; \
81472+ }
81473+
81474+
81475+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
81476+ subj = role->hash->first; \
81477+ while (subj != NULL) {
81478+
81479+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
81480+ subj = subj->next; \
81481+ }
81482+
81483+#endif
81484+
81485diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
81486new file mode 100644
81487index 0000000..af64092
81488--- /dev/null
81489+++ b/include/linux/gracl_compat.h
81490@@ -0,0 +1,156 @@
81491+#ifndef GR_ACL_COMPAT_H
81492+#define GR_ACL_COMPAT_H
81493+
81494+#include <linux/resource.h>
81495+#include <asm/resource.h>
81496+
81497+struct sprole_pw_compat {
81498+ compat_uptr_t rolename;
81499+ unsigned char salt[GR_SALT_LEN];
81500+ unsigned char sum[GR_SHA_LEN];
81501+};
81502+
81503+struct gr_hash_struct_compat {
81504+ compat_uptr_t table;
81505+ compat_uptr_t nametable;
81506+ compat_uptr_t first;
81507+ __u32 table_size;
81508+ __u32 used_size;
81509+ int type;
81510+};
81511+
81512+struct acl_subject_label_compat {
81513+ compat_uptr_t filename;
81514+ compat_u64 inode;
81515+ __u32 device;
81516+ __u32 mode;
81517+ kernel_cap_t cap_mask;
81518+ kernel_cap_t cap_lower;
81519+ kernel_cap_t cap_invert_audit;
81520+
81521+ struct compat_rlimit res[GR_NLIMITS];
81522+ __u32 resmask;
81523+
81524+ __u8 user_trans_type;
81525+ __u8 group_trans_type;
81526+ compat_uptr_t user_transitions;
81527+ compat_uptr_t group_transitions;
81528+ __u16 user_trans_num;
81529+ __u16 group_trans_num;
81530+
81531+ __u32 sock_families[2];
81532+ __u32 ip_proto[8];
81533+ __u32 ip_type;
81534+ compat_uptr_t ips;
81535+ __u32 ip_num;
81536+ __u32 inaddr_any_override;
81537+
81538+ __u32 crashes;
81539+ compat_ulong_t expires;
81540+
81541+ compat_uptr_t parent_subject;
81542+ compat_uptr_t hash;
81543+ compat_uptr_t prev;
81544+ compat_uptr_t next;
81545+
81546+ compat_uptr_t obj_hash;
81547+ __u32 obj_hash_size;
81548+ __u16 pax_flags;
81549+};
81550+
81551+struct role_allowed_ip_compat {
81552+ __u32 addr;
81553+ __u32 netmask;
81554+
81555+ compat_uptr_t prev;
81556+ compat_uptr_t next;
81557+};
81558+
81559+struct role_transition_compat {
81560+ compat_uptr_t rolename;
81561+
81562+ compat_uptr_t prev;
81563+ compat_uptr_t next;
81564+};
81565+
81566+struct acl_role_label_compat {
81567+ compat_uptr_t rolename;
81568+ uid_t uidgid;
81569+ __u16 roletype;
81570+
81571+ __u16 auth_attempts;
81572+ compat_ulong_t expires;
81573+
81574+ compat_uptr_t root_label;
81575+ compat_uptr_t hash;
81576+
81577+ compat_uptr_t prev;
81578+ compat_uptr_t next;
81579+
81580+ compat_uptr_t transitions;
81581+ compat_uptr_t allowed_ips;
81582+ compat_uptr_t domain_children;
81583+ __u16 domain_child_num;
81584+
81585+ umode_t umask;
81586+
81587+ compat_uptr_t subj_hash;
81588+ __u32 subj_hash_size;
81589+};
81590+
81591+struct user_acl_role_db_compat {
81592+ compat_uptr_t r_table;
81593+ __u32 num_pointers;
81594+ __u32 num_roles;
81595+ __u32 num_domain_children;
81596+ __u32 num_subjects;
81597+ __u32 num_objects;
81598+};
81599+
81600+struct acl_object_label_compat {
81601+ compat_uptr_t filename;
81602+ compat_u64 inode;
81603+ __u32 device;
81604+ __u32 mode;
81605+
81606+ compat_uptr_t nested;
81607+ compat_uptr_t globbed;
81608+
81609+ compat_uptr_t prev;
81610+ compat_uptr_t next;
81611+};
81612+
81613+struct acl_ip_label_compat {
81614+ compat_uptr_t iface;
81615+ __u32 addr;
81616+ __u32 netmask;
81617+ __u16 low, high;
81618+ __u8 mode;
81619+ __u32 type;
81620+ __u32 proto[8];
81621+
81622+ compat_uptr_t prev;
81623+ compat_uptr_t next;
81624+};
81625+
81626+struct gr_arg_compat {
81627+ struct user_acl_role_db_compat role_db;
81628+ unsigned char pw[GR_PW_LEN];
81629+ unsigned char salt[GR_SALT_LEN];
81630+ unsigned char sum[GR_SHA_LEN];
81631+ unsigned char sp_role[GR_SPROLE_LEN];
81632+ compat_uptr_t sprole_pws;
81633+ __u32 segv_device;
81634+ compat_u64 segv_inode;
81635+ uid_t segv_uid;
81636+ __u16 num_sprole_pws;
81637+ __u16 mode;
81638+};
81639+
81640+struct gr_arg_wrapper_compat {
81641+ compat_uptr_t arg;
81642+ __u32 version;
81643+ __u32 size;
81644+};
81645+
81646+#endif
81647diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
81648new file mode 100644
81649index 0000000..323ecf2
81650--- /dev/null
81651+++ b/include/linux/gralloc.h
81652@@ -0,0 +1,9 @@
81653+#ifndef __GRALLOC_H
81654+#define __GRALLOC_H
81655+
81656+void acl_free_all(void);
81657+int acl_alloc_stack_init(unsigned long size);
81658+void *acl_alloc(unsigned long len);
81659+void *acl_alloc_num(unsigned long num, unsigned long len);
81660+
81661+#endif
81662diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
81663new file mode 100644
81664index 0000000..be66033
81665--- /dev/null
81666+++ b/include/linux/grdefs.h
81667@@ -0,0 +1,140 @@
81668+#ifndef GRDEFS_H
81669+#define GRDEFS_H
81670+
81671+/* Begin grsecurity status declarations */
81672+
81673+enum {
81674+ GR_READY = 0x01,
81675+ GR_STATUS_INIT = 0x00 // disabled state
81676+};
81677+
81678+/* Begin ACL declarations */
81679+
81680+/* Role flags */
81681+
81682+enum {
81683+ GR_ROLE_USER = 0x0001,
81684+ GR_ROLE_GROUP = 0x0002,
81685+ GR_ROLE_DEFAULT = 0x0004,
81686+ GR_ROLE_SPECIAL = 0x0008,
81687+ GR_ROLE_AUTH = 0x0010,
81688+ GR_ROLE_NOPW = 0x0020,
81689+ GR_ROLE_GOD = 0x0040,
81690+ GR_ROLE_LEARN = 0x0080,
81691+ GR_ROLE_TPE = 0x0100,
81692+ GR_ROLE_DOMAIN = 0x0200,
81693+ GR_ROLE_PAM = 0x0400,
81694+ GR_ROLE_PERSIST = 0x0800
81695+};
81696+
81697+/* ACL Subject and Object mode flags */
81698+enum {
81699+ GR_DELETED = 0x80000000
81700+};
81701+
81702+/* ACL Object-only mode flags */
81703+enum {
81704+ GR_READ = 0x00000001,
81705+ GR_APPEND = 0x00000002,
81706+ GR_WRITE = 0x00000004,
81707+ GR_EXEC = 0x00000008,
81708+ GR_FIND = 0x00000010,
81709+ GR_INHERIT = 0x00000020,
81710+ GR_SETID = 0x00000040,
81711+ GR_CREATE = 0x00000080,
81712+ GR_DELETE = 0x00000100,
81713+ GR_LINK = 0x00000200,
81714+ GR_AUDIT_READ = 0x00000400,
81715+ GR_AUDIT_APPEND = 0x00000800,
81716+ GR_AUDIT_WRITE = 0x00001000,
81717+ GR_AUDIT_EXEC = 0x00002000,
81718+ GR_AUDIT_FIND = 0x00004000,
81719+ GR_AUDIT_INHERIT= 0x00008000,
81720+ GR_AUDIT_SETID = 0x00010000,
81721+ GR_AUDIT_CREATE = 0x00020000,
81722+ GR_AUDIT_DELETE = 0x00040000,
81723+ GR_AUDIT_LINK = 0x00080000,
81724+ GR_PTRACERD = 0x00100000,
81725+ GR_NOPTRACE = 0x00200000,
81726+ GR_SUPPRESS = 0x00400000,
81727+ GR_NOLEARN = 0x00800000,
81728+ GR_INIT_TRANSFER= 0x01000000
81729+};
81730+
81731+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
81732+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
81733+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
81734+
81735+/* ACL subject-only mode flags */
81736+enum {
81737+ GR_KILL = 0x00000001,
81738+ GR_VIEW = 0x00000002,
81739+ GR_PROTECTED = 0x00000004,
81740+ GR_LEARN = 0x00000008,
81741+ GR_OVERRIDE = 0x00000010,
81742+ /* just a placeholder, this mode is only used in userspace */
81743+ GR_DUMMY = 0x00000020,
81744+ GR_PROTSHM = 0x00000040,
81745+ GR_KILLPROC = 0x00000080,
81746+ GR_KILLIPPROC = 0x00000100,
81747+ /* just a placeholder, this mode is only used in userspace */
81748+ GR_NOTROJAN = 0x00000200,
81749+ GR_PROTPROCFD = 0x00000400,
81750+ GR_PROCACCT = 0x00000800,
81751+ GR_RELAXPTRACE = 0x00001000,
81752+ //GR_NESTED = 0x00002000,
81753+ GR_INHERITLEARN = 0x00004000,
81754+ GR_PROCFIND = 0x00008000,
81755+ GR_POVERRIDE = 0x00010000,
81756+ GR_KERNELAUTH = 0x00020000,
81757+ GR_ATSECURE = 0x00040000,
81758+ GR_SHMEXEC = 0x00080000
81759+};
81760+
81761+enum {
81762+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
81763+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
81764+ GR_PAX_ENABLE_MPROTECT = 0x0004,
81765+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
81766+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
81767+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
81768+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
81769+ GR_PAX_DISABLE_MPROTECT = 0x0400,
81770+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
81771+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
81772+};
81773+
81774+enum {
81775+ GR_ID_USER = 0x01,
81776+ GR_ID_GROUP = 0x02,
81777+};
81778+
81779+enum {
81780+ GR_ID_ALLOW = 0x01,
81781+ GR_ID_DENY = 0x02,
81782+};
81783+
81784+#define GR_CRASH_RES 31
81785+#define GR_UIDTABLE_MAX 500
81786+
81787+/* begin resource learning section */
81788+enum {
81789+ GR_RLIM_CPU_BUMP = 60,
81790+ GR_RLIM_FSIZE_BUMP = 50000,
81791+ GR_RLIM_DATA_BUMP = 10000,
81792+ GR_RLIM_STACK_BUMP = 1000,
81793+ GR_RLIM_CORE_BUMP = 10000,
81794+ GR_RLIM_RSS_BUMP = 500000,
81795+ GR_RLIM_NPROC_BUMP = 1,
81796+ GR_RLIM_NOFILE_BUMP = 5,
81797+ GR_RLIM_MEMLOCK_BUMP = 50000,
81798+ GR_RLIM_AS_BUMP = 500000,
81799+ GR_RLIM_LOCKS_BUMP = 2,
81800+ GR_RLIM_SIGPENDING_BUMP = 5,
81801+ GR_RLIM_MSGQUEUE_BUMP = 10000,
81802+ GR_RLIM_NICE_BUMP = 1,
81803+ GR_RLIM_RTPRIO_BUMP = 1,
81804+ GR_RLIM_RTTIME_BUMP = 1000000
81805+};
81806+
81807+#endif
81808diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
81809new file mode 100644
81810index 0000000..fb1de5d
81811--- /dev/null
81812+++ b/include/linux/grinternal.h
81813@@ -0,0 +1,230 @@
81814+#ifndef __GRINTERNAL_H
81815+#define __GRINTERNAL_H
81816+
81817+#ifdef CONFIG_GRKERNSEC
81818+
81819+#include <linux/fs.h>
81820+#include <linux/mnt_namespace.h>
81821+#include <linux/nsproxy.h>
81822+#include <linux/gracl.h>
81823+#include <linux/grdefs.h>
81824+#include <linux/grmsg.h>
81825+
81826+void gr_add_learn_entry(const char *fmt, ...)
81827+ __attribute__ ((format (printf, 1, 2)));
81828+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
81829+ const struct vfsmount *mnt);
81830+__u32 gr_check_create(const struct dentry *new_dentry,
81831+ const struct dentry *parent,
81832+ const struct vfsmount *mnt, const __u32 mode);
81833+int gr_check_protected_task(const struct task_struct *task);
81834+__u32 to_gr_audit(const __u32 reqmode);
81835+int gr_set_acls(const int type);
81836+int gr_acl_is_enabled(void);
81837+char gr_roletype_to_char(void);
81838+
81839+void gr_handle_alertkill(struct task_struct *task);
81840+char *gr_to_filename(const struct dentry *dentry,
81841+ const struct vfsmount *mnt);
81842+char *gr_to_filename1(const struct dentry *dentry,
81843+ const struct vfsmount *mnt);
81844+char *gr_to_filename2(const struct dentry *dentry,
81845+ const struct vfsmount *mnt);
81846+char *gr_to_filename3(const struct dentry *dentry,
81847+ const struct vfsmount *mnt);
81848+
81849+extern int grsec_enable_ptrace_readexec;
81850+extern int grsec_enable_harden_ptrace;
81851+extern int grsec_enable_link;
81852+extern int grsec_enable_fifo;
81853+extern int grsec_enable_execve;
81854+extern int grsec_enable_shm;
81855+extern int grsec_enable_execlog;
81856+extern int grsec_enable_signal;
81857+extern int grsec_enable_audit_ptrace;
81858+extern int grsec_enable_forkfail;
81859+extern int grsec_enable_time;
81860+extern int grsec_enable_rofs;
81861+extern int grsec_deny_new_usb;
81862+extern int grsec_enable_chroot_shmat;
81863+extern int grsec_enable_chroot_mount;
81864+extern int grsec_enable_chroot_double;
81865+extern int grsec_enable_chroot_pivot;
81866+extern int grsec_enable_chroot_chdir;
81867+extern int grsec_enable_chroot_chmod;
81868+extern int grsec_enable_chroot_mknod;
81869+extern int grsec_enable_chroot_fchdir;
81870+extern int grsec_enable_chroot_nice;
81871+extern int grsec_enable_chroot_execlog;
81872+extern int grsec_enable_chroot_caps;
81873+extern int grsec_enable_chroot_rename;
81874+extern int grsec_enable_chroot_sysctl;
81875+extern int grsec_enable_chroot_unix;
81876+extern int grsec_enable_symlinkown;
81877+extern kgid_t grsec_symlinkown_gid;
81878+extern int grsec_enable_tpe;
81879+extern kgid_t grsec_tpe_gid;
81880+extern int grsec_enable_tpe_all;
81881+extern int grsec_enable_tpe_invert;
81882+extern int grsec_enable_socket_all;
81883+extern kgid_t grsec_socket_all_gid;
81884+extern int grsec_enable_socket_client;
81885+extern kgid_t grsec_socket_client_gid;
81886+extern int grsec_enable_socket_server;
81887+extern kgid_t grsec_socket_server_gid;
81888+extern kgid_t grsec_audit_gid;
81889+extern int grsec_enable_group;
81890+extern int grsec_enable_log_rwxmaps;
81891+extern int grsec_enable_mount;
81892+extern int grsec_enable_chdir;
81893+extern int grsec_resource_logging;
81894+extern int grsec_enable_blackhole;
81895+extern int grsec_lastack_retries;
81896+extern int grsec_enable_brute;
81897+extern int grsec_enable_harden_ipc;
81898+extern int grsec_lock;
81899+
81900+extern spinlock_t grsec_alert_lock;
81901+extern unsigned long grsec_alert_wtime;
81902+extern unsigned long grsec_alert_fyet;
81903+
81904+extern spinlock_t grsec_audit_lock;
81905+
81906+extern rwlock_t grsec_exec_file_lock;
81907+
81908+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
81909+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
81910+ (tsk)->exec_file->f_path.mnt) : "/")
81911+
81912+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
81913+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
81914+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
81915+
81916+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
81917+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
81918+ (tsk)->exec_file->f_path.mnt) : "/")
81919+
81920+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
81921+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
81922+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
81923+
81924+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
81925+
81926+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
81927+
81928+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
81929+{
81930+ if (file1 && file2) {
81931+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
81932+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
81933+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
81934+ return true;
81935+ }
81936+
81937+ return false;
81938+}
81939+
81940+#define GR_CHROOT_CAPS {{ \
81941+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
81942+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
81943+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
81944+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
81945+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
81946+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
81947+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
81948+
81949+#define security_learn(normal_msg,args...) \
81950+({ \
81951+ read_lock(&grsec_exec_file_lock); \
81952+ gr_add_learn_entry(normal_msg "\n", ## args); \
81953+ read_unlock(&grsec_exec_file_lock); \
81954+})
81955+
81956+enum {
81957+ GR_DO_AUDIT,
81958+ GR_DONT_AUDIT,
81959+ /* used for non-audit messages that we shouldn't kill the task on */
81960+ GR_DONT_AUDIT_GOOD
81961+};
81962+
81963+enum {
81964+ GR_TTYSNIFF,
81965+ GR_RBAC,
81966+ GR_RBAC_STR,
81967+ GR_STR_RBAC,
81968+ GR_RBAC_MODE2,
81969+ GR_RBAC_MODE3,
81970+ GR_FILENAME,
81971+ GR_SYSCTL_HIDDEN,
81972+ GR_NOARGS,
81973+ GR_ONE_INT,
81974+ GR_ONE_INT_TWO_STR,
81975+ GR_ONE_STR,
81976+ GR_STR_INT,
81977+ GR_TWO_STR_INT,
81978+ GR_TWO_INT,
81979+ GR_TWO_U64,
81980+ GR_THREE_INT,
81981+ GR_FIVE_INT_TWO_STR,
81982+ GR_TWO_STR,
81983+ GR_THREE_STR,
81984+ GR_FOUR_STR,
81985+ GR_STR_FILENAME,
81986+ GR_FILENAME_STR,
81987+ GR_FILENAME_TWO_INT,
81988+ GR_FILENAME_TWO_INT_STR,
81989+ GR_TEXTREL,
81990+ GR_PTRACE,
81991+ GR_RESOURCE,
81992+ GR_CAP,
81993+ GR_SIG,
81994+ GR_SIG2,
81995+ GR_CRASH1,
81996+ GR_CRASH2,
81997+ GR_PSACCT,
81998+ GR_RWXMAP,
81999+ GR_RWXMAPVMA
82000+};
82001+
82002+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82003+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82004+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82005+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82006+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82007+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82008+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82009+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82010+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82011+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82012+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82013+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82014+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82015+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82016+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82017+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82018+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82019+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82020+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82021+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82022+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82023+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82024+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82025+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82026+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82027+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82028+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82029+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82030+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82031+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82032+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82033+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82034+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82035+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82036+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82037+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82038+
82039+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82040+
82041+#endif
82042+
82043+#endif
82044diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82045new file mode 100644
82046index 0000000..26ef560
82047--- /dev/null
82048+++ b/include/linux/grmsg.h
82049@@ -0,0 +1,118 @@
82050+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82051+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82052+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82053+#define GR_STOPMOD_MSG "denied modification of module state by "
82054+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82055+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82056+#define GR_IOPERM_MSG "denied use of ioperm() by "
82057+#define GR_IOPL_MSG "denied use of iopl() by "
82058+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82059+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82060+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82061+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82062+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82063+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82064+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82065+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82066+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82067+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82068+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82069+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82070+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82071+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82072+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82073+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82074+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82075+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82076+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82077+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82078+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82079+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82080+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82081+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82082+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82083+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82084+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82085+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82086+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82087+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82088+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82089+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82090+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82091+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82092+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82093+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
82094+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82095+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82096+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82097+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82098+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82099+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82100+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82101+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82102+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82103+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82104+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82105+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82106+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82107+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82108+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82109+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82110+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82111+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82112+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82113+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82114+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82115+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82116+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82117+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82118+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82119+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82120+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82121+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82122+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82123+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82124+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82125+#define GR_NICE_CHROOT_MSG "denied priority change by "
82126+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82127+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82128+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82129+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82130+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82131+#define GR_TIME_MSG "time set by "
82132+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82133+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82134+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82135+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82136+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82137+#define GR_BIND_MSG "denied bind() by "
82138+#define GR_CONNECT_MSG "denied connect() by "
82139+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82140+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82141+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82142+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82143+#define GR_CAP_ACL_MSG "use of %s denied for "
82144+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82145+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82146+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82147+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82148+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82149+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82150+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82151+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82152+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82153+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82154+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82155+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82156+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82157+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82158+#define GR_VM86_MSG "denied use of vm86 by "
82159+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82160+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82161+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82162+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82163+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82164+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82165+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82166+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82167+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82168diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82169new file mode 100644
82170index 0000000..63c1850
82171--- /dev/null
82172+++ b/include/linux/grsecurity.h
82173@@ -0,0 +1,250 @@
82174+#ifndef GR_SECURITY_H
82175+#define GR_SECURITY_H
82176+#include <linux/fs.h>
82177+#include <linux/fs_struct.h>
82178+#include <linux/binfmts.h>
82179+#include <linux/gracl.h>
82180+
82181+/* notify of brain-dead configs */
82182+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82183+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82184+#endif
82185+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82186+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82187+#endif
82188+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82189+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82190+#endif
82191+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82192+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82193+#endif
82194+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82195+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82196+#endif
82197+
82198+int gr_handle_new_usb(void);
82199+
82200+void gr_handle_brute_attach(int dumpable);
82201+void gr_handle_brute_check(void);
82202+void gr_handle_kernel_exploit(void);
82203+
82204+char gr_roletype_to_char(void);
82205+
82206+int gr_proc_is_restricted(void);
82207+
82208+int gr_acl_enable_at_secure(void);
82209+
82210+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82211+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82212+
82213+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82214+
82215+void gr_del_task_from_ip_table(struct task_struct *p);
82216+
82217+int gr_pid_is_chrooted(struct task_struct *p);
82218+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82219+int gr_handle_chroot_nice(void);
82220+int gr_handle_chroot_sysctl(const int op);
82221+int gr_handle_chroot_setpriority(struct task_struct *p,
82222+ const int niceval);
82223+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82224+int gr_chroot_fhandle(void);
82225+int gr_handle_chroot_chroot(const struct dentry *dentry,
82226+ const struct vfsmount *mnt);
82227+void gr_handle_chroot_chdir(const struct path *path);
82228+int gr_handle_chroot_chmod(const struct dentry *dentry,
82229+ const struct vfsmount *mnt, const int mode);
82230+int gr_handle_chroot_mknod(const struct dentry *dentry,
82231+ const struct vfsmount *mnt, const int mode);
82232+int gr_handle_chroot_mount(const struct dentry *dentry,
82233+ const struct vfsmount *mnt,
82234+ const char *dev_name);
82235+int gr_handle_chroot_pivot(void);
82236+int gr_handle_chroot_unix(const pid_t pid);
82237+
82238+int gr_handle_rawio(const struct inode *inode);
82239+
82240+void gr_handle_ioperm(void);
82241+void gr_handle_iopl(void);
82242+void gr_handle_msr_write(void);
82243+
82244+umode_t gr_acl_umask(void);
82245+
82246+int gr_tpe_allow(const struct file *file);
82247+
82248+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82249+void gr_clear_chroot_entries(struct task_struct *task);
82250+
82251+void gr_log_forkfail(const int retval);
82252+void gr_log_timechange(void);
82253+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82254+void gr_log_chdir(const struct dentry *dentry,
82255+ const struct vfsmount *mnt);
82256+void gr_log_chroot_exec(const struct dentry *dentry,
82257+ const struct vfsmount *mnt);
82258+void gr_log_remount(const char *devname, const int retval);
82259+void gr_log_unmount(const char *devname, const int retval);
82260+void gr_log_mount(const char *from, struct path *to, const int retval);
82261+void gr_log_textrel(struct vm_area_struct *vma);
82262+void gr_log_ptgnustack(struct file *file);
82263+void gr_log_rwxmmap(struct file *file);
82264+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82265+
82266+int gr_handle_follow_link(const struct inode *parent,
82267+ const struct inode *inode,
82268+ const struct dentry *dentry,
82269+ const struct vfsmount *mnt);
82270+int gr_handle_fifo(const struct dentry *dentry,
82271+ const struct vfsmount *mnt,
82272+ const struct dentry *dir, const int flag,
82273+ const int acc_mode);
82274+int gr_handle_hardlink(const struct dentry *dentry,
82275+ const struct vfsmount *mnt,
82276+ struct inode *inode,
82277+ const int mode, const struct filename *to);
82278+
82279+int gr_is_capable(const int cap);
82280+int gr_is_capable_nolog(const int cap);
82281+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82282+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82283+
82284+void gr_copy_label(struct task_struct *tsk);
82285+void gr_handle_crash(struct task_struct *task, const int sig);
82286+int gr_handle_signal(const struct task_struct *p, const int sig);
82287+int gr_check_crash_uid(const kuid_t uid);
82288+int gr_check_protected_task(const struct task_struct *task);
82289+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82290+int gr_acl_handle_mmap(const struct file *file,
82291+ const unsigned long prot);
82292+int gr_acl_handle_mprotect(const struct file *file,
82293+ const unsigned long prot);
82294+int gr_check_hidden_task(const struct task_struct *tsk);
82295+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82296+ const struct vfsmount *mnt);
82297+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82298+ const struct vfsmount *mnt);
82299+__u32 gr_acl_handle_access(const struct dentry *dentry,
82300+ const struct vfsmount *mnt, const int fmode);
82301+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82302+ const struct vfsmount *mnt, umode_t *mode);
82303+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82304+ const struct vfsmount *mnt);
82305+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82306+ const struct vfsmount *mnt);
82307+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82308+ const struct vfsmount *mnt);
82309+int gr_handle_ptrace(struct task_struct *task, const long request);
82310+int gr_handle_proc_ptrace(struct task_struct *task);
82311+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82312+ const struct vfsmount *mnt);
82313+int gr_check_crash_exec(const struct file *filp);
82314+int gr_acl_is_enabled(void);
82315+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82316+ const kgid_t gid);
82317+int gr_set_proc_label(const struct dentry *dentry,
82318+ const struct vfsmount *mnt,
82319+ const int unsafe_flags);
82320+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
82321+ const struct vfsmount *mnt);
82322+__u32 gr_acl_handle_open(const struct dentry *dentry,
82323+ const struct vfsmount *mnt, int acc_mode);
82324+__u32 gr_acl_handle_creat(const struct dentry *dentry,
82325+ const struct dentry *p_dentry,
82326+ const struct vfsmount *p_mnt,
82327+ int open_flags, int acc_mode, const int imode);
82328+void gr_handle_create(const struct dentry *dentry,
82329+ const struct vfsmount *mnt);
82330+void gr_handle_proc_create(const struct dentry *dentry,
82331+ const struct inode *inode);
82332+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
82333+ const struct dentry *parent_dentry,
82334+ const struct vfsmount *parent_mnt,
82335+ const int mode);
82336+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
82337+ const struct dentry *parent_dentry,
82338+ const struct vfsmount *parent_mnt);
82339+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
82340+ const struct vfsmount *mnt);
82341+void gr_handle_delete(const u64 ino, const dev_t dev);
82342+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
82343+ const struct vfsmount *mnt);
82344+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
82345+ const struct dentry *parent_dentry,
82346+ const struct vfsmount *parent_mnt,
82347+ const struct filename *from);
82348+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
82349+ const struct dentry *parent_dentry,
82350+ const struct vfsmount *parent_mnt,
82351+ const struct dentry *old_dentry,
82352+ const struct vfsmount *old_mnt, const struct filename *to);
82353+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
82354+int gr_acl_handle_rename(struct dentry *new_dentry,
82355+ struct dentry *parent_dentry,
82356+ const struct vfsmount *parent_mnt,
82357+ struct dentry *old_dentry,
82358+ struct inode *old_parent_inode,
82359+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
82360+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
82361+ struct dentry *old_dentry,
82362+ struct dentry *new_dentry,
82363+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
82364+__u32 gr_check_link(const struct dentry *new_dentry,
82365+ const struct dentry *parent_dentry,
82366+ const struct vfsmount *parent_mnt,
82367+ const struct dentry *old_dentry,
82368+ const struct vfsmount *old_mnt);
82369+int gr_acl_handle_filldir(const struct file *file, const char *name,
82370+ const unsigned int namelen, const u64 ino);
82371+
82372+__u32 gr_acl_handle_unix(const struct dentry *dentry,
82373+ const struct vfsmount *mnt);
82374+void gr_acl_handle_exit(void);
82375+void gr_acl_handle_psacct(struct task_struct *task, const long code);
82376+int gr_acl_handle_procpidmem(const struct task_struct *task);
82377+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
82378+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
82379+void gr_audit_ptrace(struct task_struct *task);
82380+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
82381+u64 gr_get_ino_from_dentry(struct dentry *dentry);
82382+void gr_put_exec_file(struct task_struct *task);
82383+
82384+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
82385+
82386+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82387+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82388+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
82389+ struct dentry *newdentry, struct vfsmount *newmnt);
82390+
82391+#ifdef CONFIG_GRKERNSEC_RESLOG
82392+extern void gr_log_resource(const struct task_struct *task, const int res,
82393+ const unsigned long wanted, const int gt);
82394+#else
82395+static inline void gr_log_resource(const struct task_struct *task, const int res,
82396+ const unsigned long wanted, const int gt)
82397+{
82398+}
82399+#endif
82400+
82401+#ifdef CONFIG_GRKERNSEC
82402+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
82403+void gr_handle_vm86(void);
82404+void gr_handle_mem_readwrite(u64 from, u64 to);
82405+
82406+void gr_log_badprocpid(const char *entry);
82407+
82408+extern int grsec_enable_dmesg;
82409+extern int grsec_disable_privio;
82410+
82411+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
82412+extern kgid_t grsec_proc_gid;
82413+#endif
82414+
82415+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82416+extern int grsec_enable_chroot_findtask;
82417+#endif
82418+#ifdef CONFIG_GRKERNSEC_SETXID
82419+extern int grsec_enable_setxid;
82420+#endif
82421+#endif
82422+
82423+#endif
82424diff --git a/include/linux/grsock.h b/include/linux/grsock.h
82425new file mode 100644
82426index 0000000..e7ffaaf
82427--- /dev/null
82428+++ b/include/linux/grsock.h
82429@@ -0,0 +1,19 @@
82430+#ifndef __GRSOCK_H
82431+#define __GRSOCK_H
82432+
82433+extern void gr_attach_curr_ip(const struct sock *sk);
82434+extern int gr_handle_sock_all(const int family, const int type,
82435+ const int protocol);
82436+extern int gr_handle_sock_server(const struct sockaddr *sck);
82437+extern int gr_handle_sock_server_other(const struct sock *sck);
82438+extern int gr_handle_sock_client(const struct sockaddr *sck);
82439+extern int gr_search_connect(struct socket * sock,
82440+ struct sockaddr_in * addr);
82441+extern int gr_search_bind(struct socket * sock,
82442+ struct sockaddr_in * addr);
82443+extern int gr_search_listen(struct socket * sock);
82444+extern int gr_search_accept(struct socket * sock);
82445+extern int gr_search_socket(const int domain, const int type,
82446+ const int protocol);
82447+
82448+#endif
82449diff --git a/include/linux/highmem.h b/include/linux/highmem.h
82450index 9286a46..373f27f 100644
82451--- a/include/linux/highmem.h
82452+++ b/include/linux/highmem.h
82453@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
82454 kunmap_atomic(kaddr);
82455 }
82456
82457+static inline void sanitize_highpage(struct page *page)
82458+{
82459+ void *kaddr;
82460+ unsigned long flags;
82461+
82462+ local_irq_save(flags);
82463+ kaddr = kmap_atomic(page);
82464+ clear_page(kaddr);
82465+ kunmap_atomic(kaddr);
82466+ local_irq_restore(flags);
82467+}
82468+
82469 static inline void zero_user_segments(struct page *page,
82470 unsigned start1, unsigned end1,
82471 unsigned start2, unsigned end2)
82472diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
82473index 1c7b89a..7dda400 100644
82474--- a/include/linux/hwmon-sysfs.h
82475+++ b/include/linux/hwmon-sysfs.h
82476@@ -25,7 +25,8 @@
82477 struct sensor_device_attribute{
82478 struct device_attribute dev_attr;
82479 int index;
82480-};
82481+} __do_const;
82482+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
82483 #define to_sensor_dev_attr(_dev_attr) \
82484 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
82485
82486@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
82487 struct device_attribute dev_attr;
82488 u8 index;
82489 u8 nr;
82490-};
82491+} __do_const;
82492+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
82493 #define to_sensor_dev_attr_2(_dev_attr) \
82494 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
82495
82496diff --git a/include/linux/i2c.h b/include/linux/i2c.h
82497index 7c76959..153e597 100644
82498--- a/include/linux/i2c.h
82499+++ b/include/linux/i2c.h
82500@@ -413,6 +413,7 @@ struct i2c_algorithm {
82501 int (*unreg_slave)(struct i2c_client *client);
82502 #endif
82503 };
82504+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
82505
82506 /**
82507 * struct i2c_bus_recovery_info - I2C bus recovery information
82508diff --git a/include/linux/i2o.h b/include/linux/i2o.h
82509index d23c3c2..eb63c81 100644
82510--- a/include/linux/i2o.h
82511+++ b/include/linux/i2o.h
82512@@ -565,7 +565,7 @@ struct i2o_controller {
82513 struct i2o_device *exec; /* Executive */
82514 #if BITS_PER_LONG == 64
82515 spinlock_t context_list_lock; /* lock for context_list */
82516- atomic_t context_list_counter; /* needed for unique contexts */
82517+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
82518 struct list_head context_list; /* list of context id's
82519 and pointers */
82520 #endif
82521diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
82522index aff7ad8..3942bbd 100644
82523--- a/include/linux/if_pppox.h
82524+++ b/include/linux/if_pppox.h
82525@@ -76,7 +76,7 @@ struct pppox_proto {
82526 int (*ioctl)(struct socket *sock, unsigned int cmd,
82527 unsigned long arg);
82528 struct module *owner;
82529-};
82530+} __do_const;
82531
82532 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
82533 extern void unregister_pppox_proto(int proto_num);
82534diff --git a/include/linux/init.h b/include/linux/init.h
82535index 2df8e8d..3e1280d 100644
82536--- a/include/linux/init.h
82537+++ b/include/linux/init.h
82538@@ -37,9 +37,17 @@
82539 * section.
82540 */
82541
82542+#define add_init_latent_entropy __latent_entropy
82543+
82544+#ifdef CONFIG_MEMORY_HOTPLUG
82545+#define add_meminit_latent_entropy
82546+#else
82547+#define add_meminit_latent_entropy __latent_entropy
82548+#endif
82549+
82550 /* These are for everybody (although not all archs will actually
82551 discard it in modules) */
82552-#define __init __section(.init.text) __cold notrace
82553+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
82554 #define __initdata __section(.init.data)
82555 #define __initconst __constsection(.init.rodata)
82556 #define __exitdata __section(.exit.data)
82557@@ -100,7 +108,7 @@
82558 #define __cpuexitconst
82559
82560 /* Used for MEMORY_HOTPLUG */
82561-#define __meminit __section(.meminit.text) __cold notrace
82562+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
82563 #define __meminitdata __section(.meminit.data)
82564 #define __meminitconst __constsection(.meminit.rodata)
82565 #define __memexit __section(.memexit.text) __exitused __cold notrace
82566diff --git a/include/linux/init_task.h b/include/linux/init_task.h
82567index 3037fc0..c6527ce 100644
82568--- a/include/linux/init_task.h
82569+++ b/include/linux/init_task.h
82570@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
82571
82572 #define INIT_TASK_COMM "swapper"
82573
82574+#ifdef CONFIG_X86
82575+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
82576+#else
82577+#define INIT_TASK_THREAD_INFO
82578+#endif
82579+
82580 #ifdef CONFIG_RT_MUTEXES
82581 # define INIT_RT_MUTEXES(tsk) \
82582 .pi_waiters = RB_ROOT, \
82583@@ -214,6 +220,7 @@ extern struct task_group root_task_group;
82584 RCU_POINTER_INITIALIZER(cred, &init_cred), \
82585 .comm = INIT_TASK_COMM, \
82586 .thread = INIT_THREAD, \
82587+ INIT_TASK_THREAD_INFO \
82588 .fs = &init_fs, \
82589 .files = &init_files, \
82590 .signal = &init_signals, \
82591diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
82592index d9b05b5..e5f5b7b 100644
82593--- a/include/linux/interrupt.h
82594+++ b/include/linux/interrupt.h
82595@@ -413,8 +413,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
82596
82597 struct softirq_action
82598 {
82599- void (*action)(struct softirq_action *);
82600-};
82601+ void (*action)(void);
82602+} __no_const;
82603
82604 asmlinkage void do_softirq(void);
82605 asmlinkage void __do_softirq(void);
82606@@ -428,7 +428,7 @@ static inline void do_softirq_own_stack(void)
82607 }
82608 #endif
82609
82610-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
82611+extern void open_softirq(int nr, void (*action)(void));
82612 extern void softirq_init(void);
82613 extern void __raise_softirq_irqoff(unsigned int nr);
82614
82615diff --git a/include/linux/iommu.h b/include/linux/iommu.h
82616index 38daa45..4de4317 100644
82617--- a/include/linux/iommu.h
82618+++ b/include/linux/iommu.h
82619@@ -147,7 +147,7 @@ struct iommu_ops {
82620
82621 unsigned long pgsize_bitmap;
82622 void *priv;
82623-};
82624+} __do_const;
82625
82626 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
82627 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
82628diff --git a/include/linux/ioport.h b/include/linux/ioport.h
82629index 2c525022..345b106 100644
82630--- a/include/linux/ioport.h
82631+++ b/include/linux/ioport.h
82632@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
82633 int adjust_resource(struct resource *res, resource_size_t start,
82634 resource_size_t size);
82635 resource_size_t resource_alignment(struct resource *res);
82636-static inline resource_size_t resource_size(const struct resource *res)
82637+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
82638 {
82639 return res->end - res->start + 1;
82640 }
82641diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
82642index 1eee6bc..9cf4912 100644
82643--- a/include/linux/ipc_namespace.h
82644+++ b/include/linux/ipc_namespace.h
82645@@ -60,7 +60,7 @@ struct ipc_namespace {
82646 struct user_namespace *user_ns;
82647
82648 struct ns_common ns;
82649-};
82650+} __randomize_layout;
82651
82652 extern struct ipc_namespace init_ipc_ns;
82653 extern atomic_t nr_ipc_ns;
82654diff --git a/include/linux/irq.h b/include/linux/irq.h
82655index d09ec7a..f373eb5 100644
82656--- a/include/linux/irq.h
82657+++ b/include/linux/irq.h
82658@@ -364,7 +364,8 @@ struct irq_chip {
82659 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
82660
82661 unsigned long flags;
82662-};
82663+} __do_const;
82664+typedef struct irq_chip __no_const irq_chip_no_const;
82665
82666 /*
82667 * irq_chip specific flags
82668diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
82669index 71d706d..817cdec 100644
82670--- a/include/linux/irqchip/arm-gic.h
82671+++ b/include/linux/irqchip/arm-gic.h
82672@@ -95,7 +95,7 @@
82673
82674 struct device_node;
82675
82676-extern struct irq_chip gic_arch_extn;
82677+extern irq_chip_no_const gic_arch_extn;
82678
82679 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
82680 u32 offset, struct device_node *);
82681diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
82682index faf433a..7dcb186 100644
82683--- a/include/linux/irqdesc.h
82684+++ b/include/linux/irqdesc.h
82685@@ -61,7 +61,7 @@ struct irq_desc {
82686 unsigned int irq_count; /* For detecting broken IRQs */
82687 unsigned long last_unhandled; /* Aging timer for unhandled count */
82688 unsigned int irqs_unhandled;
82689- atomic_t threads_handled;
82690+ atomic_unchecked_t threads_handled;
82691 int threads_handled_last;
82692 raw_spinlock_t lock;
82693 struct cpumask *percpu_enabled;
82694diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
82695index c367cbd..c9b79e6 100644
82696--- a/include/linux/jiffies.h
82697+++ b/include/linux/jiffies.h
82698@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
82699 /*
82700 * Convert various time units to each other:
82701 */
82702-extern unsigned int jiffies_to_msecs(const unsigned long j);
82703-extern unsigned int jiffies_to_usecs(const unsigned long j);
82704+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
82705+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
82706
82707-static inline u64 jiffies_to_nsecs(const unsigned long j)
82708+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
82709 {
82710 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
82711 }
82712
82713-extern unsigned long msecs_to_jiffies(const unsigned int m);
82714-extern unsigned long usecs_to_jiffies(const unsigned int u);
82715+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
82716+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
82717 extern unsigned long timespec_to_jiffies(const struct timespec *value);
82718 extern void jiffies_to_timespec(const unsigned long jiffies,
82719- struct timespec *value);
82720-extern unsigned long timeval_to_jiffies(const struct timeval *value);
82721+ struct timespec *value) __intentional_overflow(-1);
82722+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
82723 extern void jiffies_to_timeval(const unsigned long jiffies,
82724 struct timeval *value);
82725
82726diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
82727index 6883e19..e854fcb 100644
82728--- a/include/linux/kallsyms.h
82729+++ b/include/linux/kallsyms.h
82730@@ -15,7 +15,8 @@
82731
82732 struct module;
82733
82734-#ifdef CONFIG_KALLSYMS
82735+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
82736+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
82737 /* Lookup the address for a symbol. Returns 0 if not found. */
82738 unsigned long kallsyms_lookup_name(const char *name);
82739
82740@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
82741 /* Stupid that this does nothing, but I didn't create this mess. */
82742 #define __print_symbol(fmt, addr)
82743 #endif /*CONFIG_KALLSYMS*/
82744+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
82745+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
82746+extern unsigned long kallsyms_lookup_name(const char *name);
82747+extern void __print_symbol(const char *fmt, unsigned long address);
82748+extern int sprint_backtrace(char *buffer, unsigned long address);
82749+extern int sprint_symbol(char *buffer, unsigned long address);
82750+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
82751+const char *kallsyms_lookup(unsigned long addr,
82752+ unsigned long *symbolsize,
82753+ unsigned long *offset,
82754+ char **modname, char *namebuf);
82755+extern int kallsyms_lookup_size_offset(unsigned long addr,
82756+ unsigned long *symbolsize,
82757+ unsigned long *offset);
82758+#endif
82759
82760 /* This macro allows us to keep printk typechecking */
82761 static __printf(1, 2)
82762diff --git a/include/linux/kernel.h b/include/linux/kernel.h
82763index 64ce58b..6bcdbfa 100644
82764--- a/include/linux/kernel.h
82765+++ b/include/linux/kernel.h
82766@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
82767 /* Obsolete, do not use. Use kstrto<foo> instead */
82768
82769 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
82770-extern long simple_strtol(const char *,char **,unsigned int);
82771+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
82772 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
82773 extern long long simple_strtoll(const char *,char **,unsigned int);
82774
82775diff --git a/include/linux/key-type.h b/include/linux/key-type.h
82776index ff9f1d3..6712be5 100644
82777--- a/include/linux/key-type.h
82778+++ b/include/linux/key-type.h
82779@@ -152,7 +152,7 @@ struct key_type {
82780 /* internal fields */
82781 struct list_head link; /* link in types list */
82782 struct lock_class_key lock_class; /* key->sem lock class */
82783-};
82784+} __do_const;
82785
82786 extern struct key_type key_type_keyring;
82787
82788diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
82789index e465bb1..19f605fd 100644
82790--- a/include/linux/kgdb.h
82791+++ b/include/linux/kgdb.h
82792@@ -52,7 +52,7 @@ extern int kgdb_connected;
82793 extern int kgdb_io_module_registered;
82794
82795 extern atomic_t kgdb_setting_breakpoint;
82796-extern atomic_t kgdb_cpu_doing_single_step;
82797+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
82798
82799 extern struct task_struct *kgdb_usethread;
82800 extern struct task_struct *kgdb_contthread;
82801@@ -254,7 +254,7 @@ struct kgdb_arch {
82802 void (*correct_hw_break)(void);
82803
82804 void (*enable_nmi)(bool on);
82805-};
82806+} __do_const;
82807
82808 /**
82809 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
82810@@ -279,7 +279,7 @@ struct kgdb_io {
82811 void (*pre_exception) (void);
82812 void (*post_exception) (void);
82813 int is_console;
82814-};
82815+} __do_const;
82816
82817 extern struct kgdb_arch arch_kgdb_ops;
82818
82819diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
82820index e705467..a92471d 100644
82821--- a/include/linux/kmemleak.h
82822+++ b/include/linux/kmemleak.h
82823@@ -27,7 +27,7 @@
82824
82825 extern void kmemleak_init(void) __ref;
82826 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82827- gfp_t gfp) __ref;
82828+ gfp_t gfp) __ref __size_overflow(2);
82829 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
82830 extern void kmemleak_free(const void *ptr) __ref;
82831 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
82832@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
82833 static inline void kmemleak_init(void)
82834 {
82835 }
82836-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82837+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
82838 gfp_t gfp)
82839 {
82840 }
82841diff --git a/include/linux/kmod.h b/include/linux/kmod.h
82842index 0555cc6..40116ce 100644
82843--- a/include/linux/kmod.h
82844+++ b/include/linux/kmod.h
82845@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
82846 * usually useless though. */
82847 extern __printf(2, 3)
82848 int __request_module(bool wait, const char *name, ...);
82849+extern __printf(3, 4)
82850+int ___request_module(bool wait, char *param_name, const char *name, ...);
82851 #define request_module(mod...) __request_module(true, mod)
82852 #define request_module_nowait(mod...) __request_module(false, mod)
82853 #define try_then_request_module(x, mod...) \
82854@@ -57,6 +59,9 @@ struct subprocess_info {
82855 struct work_struct work;
82856 struct completion *complete;
82857 char *path;
82858+#ifdef CONFIG_GRKERNSEC
82859+ char *origpath;
82860+#endif
82861 char **argv;
82862 char **envp;
82863 int wait;
82864diff --git a/include/linux/kobject.h b/include/linux/kobject.h
82865index 2d61b90..a1d0a13 100644
82866--- a/include/linux/kobject.h
82867+++ b/include/linux/kobject.h
82868@@ -118,7 +118,7 @@ struct kobj_type {
82869 struct attribute **default_attrs;
82870 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
82871 const void *(*namespace)(struct kobject *kobj);
82872-};
82873+} __do_const;
82874
82875 struct kobj_uevent_env {
82876 char *argv[3];
82877@@ -142,6 +142,7 @@ struct kobj_attribute {
82878 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
82879 const char *buf, size_t count);
82880 };
82881+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
82882
82883 extern const struct sysfs_ops kobj_sysfs_ops;
82884
82885@@ -169,7 +170,7 @@ struct kset {
82886 spinlock_t list_lock;
82887 struct kobject kobj;
82888 const struct kset_uevent_ops *uevent_ops;
82889-};
82890+} __randomize_layout;
82891
82892 extern void kset_init(struct kset *kset);
82893 extern int __must_check kset_register(struct kset *kset);
82894diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
82895index df32d25..fb52e27 100644
82896--- a/include/linux/kobject_ns.h
82897+++ b/include/linux/kobject_ns.h
82898@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
82899 const void *(*netlink_ns)(struct sock *sk);
82900 const void *(*initial_ns)(void);
82901 void (*drop_ns)(void *);
82902-};
82903+} __do_const;
82904
82905 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
82906 int kobj_ns_type_registered(enum kobj_ns_type type);
82907diff --git a/include/linux/kref.h b/include/linux/kref.h
82908index 484604d..0f6c5b6 100644
82909--- a/include/linux/kref.h
82910+++ b/include/linux/kref.h
82911@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
82912 static inline int kref_sub(struct kref *kref, unsigned int count,
82913 void (*release)(struct kref *kref))
82914 {
82915- WARN_ON(release == NULL);
82916+ BUG_ON(release == NULL);
82917
82918 if (atomic_sub_and_test((int) count, &kref->refcount)) {
82919 release(kref);
82920diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
82921index 26f1060..bafc04a 100644
82922--- a/include/linux/kvm_host.h
82923+++ b/include/linux/kvm_host.h
82924@@ -470,7 +470,7 @@ static inline void kvm_irqfd_exit(void)
82925 {
82926 }
82927 #endif
82928-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
82929+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
82930 struct module *module);
82931 void kvm_exit(void);
82932
82933@@ -639,7 +639,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
82934 struct kvm_guest_debug *dbg);
82935 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
82936
82937-int kvm_arch_init(void *opaque);
82938+int kvm_arch_init(const void *opaque);
82939 void kvm_arch_exit(void);
82940
82941 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
82942diff --git a/include/linux/libata.h b/include/linux/libata.h
82943index 91f705d..24be831 100644
82944--- a/include/linux/libata.h
82945+++ b/include/linux/libata.h
82946@@ -979,7 +979,7 @@ struct ata_port_operations {
82947 * fields must be pointers.
82948 */
82949 const struct ata_port_operations *inherits;
82950-};
82951+} __do_const;
82952
82953 struct ata_port_info {
82954 unsigned long flags;
82955diff --git a/include/linux/linkage.h b/include/linux/linkage.h
82956index a6a42dd..6c5ebce 100644
82957--- a/include/linux/linkage.h
82958+++ b/include/linux/linkage.h
82959@@ -36,6 +36,7 @@
82960 #endif
82961
82962 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
82963+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
82964 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
82965
82966 /*
82967diff --git a/include/linux/list.h b/include/linux/list.h
82968index feb773c..98f3075 100644
82969--- a/include/linux/list.h
82970+++ b/include/linux/list.h
82971@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
82972 extern void list_del(struct list_head *entry);
82973 #endif
82974
82975+extern void __pax_list_add(struct list_head *new,
82976+ struct list_head *prev,
82977+ struct list_head *next);
82978+static inline void pax_list_add(struct list_head *new, struct list_head *head)
82979+{
82980+ __pax_list_add(new, head, head->next);
82981+}
82982+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
82983+{
82984+ __pax_list_add(new, head->prev, head);
82985+}
82986+extern void pax_list_del(struct list_head *entry);
82987+
82988 /**
82989 * list_replace - replace old entry by new one
82990 * @old : the element to be replaced
82991@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
82992 INIT_LIST_HEAD(entry);
82993 }
82994
82995+extern void pax_list_del_init(struct list_head *entry);
82996+
82997 /**
82998 * list_move - delete from one list and add as another's head
82999 * @list: the entry to move
83000diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83001index 4bfde0e..d6e2e09 100644
83002--- a/include/linux/lockref.h
83003+++ b/include/linux/lockref.h
83004@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83005 return ((int)l->count < 0);
83006 }
83007
83008+static inline unsigned int __lockref_read(struct lockref *lockref)
83009+{
83010+ return lockref->count;
83011+}
83012+
83013+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83014+{
83015+ lockref->count = count;
83016+}
83017+
83018+static inline void __lockref_inc(struct lockref *lockref)
83019+{
83020+
83021+#ifdef CONFIG_PAX_REFCOUNT
83022+ atomic_inc((atomic_t *)&lockref->count);
83023+#else
83024+ lockref->count++;
83025+#endif
83026+
83027+}
83028+
83029+static inline void __lockref_dec(struct lockref *lockref)
83030+{
83031+
83032+#ifdef CONFIG_PAX_REFCOUNT
83033+ atomic_dec((atomic_t *)&lockref->count);
83034+#else
83035+ lockref->count--;
83036+#endif
83037+
83038+}
83039+
83040 #endif /* __LINUX_LOCKREF_H */
83041diff --git a/include/linux/math64.h b/include/linux/math64.h
83042index c45c089..298841c 100644
83043--- a/include/linux/math64.h
83044+++ b/include/linux/math64.h
83045@@ -15,7 +15,7 @@
83046 * This is commonly provided by 32bit archs to provide an optimized 64bit
83047 * divide.
83048 */
83049-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83050+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83051 {
83052 *remainder = dividend % divisor;
83053 return dividend / divisor;
83054@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83055 /**
83056 * div64_u64 - unsigned 64bit divide with 64bit divisor
83057 */
83058-static inline u64 div64_u64(u64 dividend, u64 divisor)
83059+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83060 {
83061 return dividend / divisor;
83062 }
83063@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83064 #define div64_ul(x, y) div_u64((x), (y))
83065
83066 #ifndef div_u64_rem
83067-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83068+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83069 {
83070 *remainder = do_div(dividend, divisor);
83071 return dividend;
83072@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83073 #endif
83074
83075 #ifndef div64_u64
83076-extern u64 div64_u64(u64 dividend, u64 divisor);
83077+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83078 #endif
83079
83080 #ifndef div64_s64
83081@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83082 * divide.
83083 */
83084 #ifndef div_u64
83085-static inline u64 div_u64(u64 dividend, u32 divisor)
83086+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83087 {
83088 u32 remainder;
83089 return div_u64_rem(dividend, divisor, &remainder);
83090diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83091index 3d385c8..deacb6a 100644
83092--- a/include/linux/mempolicy.h
83093+++ b/include/linux/mempolicy.h
83094@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83095 }
83096
83097 #define vma_policy(vma) ((vma)->vm_policy)
83098+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83099+{
83100+ vma->vm_policy = pol;
83101+}
83102
83103 static inline void mpol_get(struct mempolicy *pol)
83104 {
83105@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83106 }
83107
83108 #define vma_policy(vma) NULL
83109+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83110+{
83111+}
83112
83113 static inline int
83114 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83115diff --git a/include/linux/mm.h b/include/linux/mm.h
83116index dd5ea30..cf81cd1 100644
83117--- a/include/linux/mm.h
83118+++ b/include/linux/mm.h
83119@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
83120
83121 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
83122 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
83123+
83124+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83125+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
83126+#endif
83127+
83128 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
83129 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
83130 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83131@@ -256,8 +261,8 @@ struct vm_operations_struct {
83132 /* called by access_process_vm when get_user_pages() fails, typically
83133 * for use by special VMAs that can switch between memory and hardware
83134 */
83135- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83136- void *buf, int len, int write);
83137+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83138+ void *buf, size_t len, int write);
83139
83140 /* Called by the /proc/PID/maps code to ask the vma whether it
83141 * has a special name. Returning non-NULL will also cause this
83142@@ -291,6 +296,7 @@ struct vm_operations_struct {
83143 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
83144 unsigned long size, pgoff_t pgoff);
83145 };
83146+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83147
83148 struct mmu_gather;
83149 struct inode;
83150@@ -1183,8 +1189,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83151 unsigned long *pfn);
83152 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83153 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83154-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83155- void *buf, int len, int write);
83156+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83157+ void *buf, size_t len, int write);
83158
83159 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83160 loff_t const holebegin, loff_t const holelen)
83161@@ -1224,9 +1230,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83162 }
83163 #endif
83164
83165-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83166-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83167- void *buf, int len, int write);
83168+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83169+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83170+ void *buf, size_t len, int write);
83171
83172 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83173 unsigned long start, unsigned long nr_pages,
83174@@ -1258,34 +1264,6 @@ int set_page_dirty_lock(struct page *page);
83175 int clear_page_dirty_for_io(struct page *page);
83176 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83177
83178-/* Is the vma a continuation of the stack vma above it? */
83179-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83180-{
83181- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83182-}
83183-
83184-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83185- unsigned long addr)
83186-{
83187- return (vma->vm_flags & VM_GROWSDOWN) &&
83188- (vma->vm_start == addr) &&
83189- !vma_growsdown(vma->vm_prev, addr);
83190-}
83191-
83192-/* Is the vma a continuation of the stack vma below it? */
83193-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83194-{
83195- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83196-}
83197-
83198-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83199- unsigned long addr)
83200-{
83201- return (vma->vm_flags & VM_GROWSUP) &&
83202- (vma->vm_end == addr) &&
83203- !vma_growsup(vma->vm_next, addr);
83204-}
83205-
83206 extern struct task_struct *task_of_stack(struct task_struct *task,
83207 struct vm_area_struct *vma, bool in_group);
83208
83209@@ -1403,8 +1381,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83210 {
83211 return 0;
83212 }
83213+
83214+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83215+ unsigned long address)
83216+{
83217+ return 0;
83218+}
83219 #else
83220 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83221+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83222 #endif
83223
83224 #ifdef __PAGETABLE_PMD_FOLDED
83225@@ -1413,8 +1398,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83226 {
83227 return 0;
83228 }
83229+
83230+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83231+ unsigned long address)
83232+{
83233+ return 0;
83234+}
83235 #else
83236 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83237+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83238 #endif
83239
83240 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
83241@@ -1432,11 +1424,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83242 NULL: pud_offset(pgd, address);
83243 }
83244
83245+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83246+{
83247+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83248+ NULL: pud_offset(pgd, address);
83249+}
83250+
83251 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83252 {
83253 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83254 NULL: pmd_offset(pud, address);
83255 }
83256+
83257+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83258+{
83259+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83260+ NULL: pmd_offset(pud, address);
83261+}
83262 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83263
83264 #if USE_SPLIT_PTE_PTLOCKS
83265@@ -1819,12 +1823,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
83266 bool *need_rmap_locks);
83267 extern void exit_mmap(struct mm_struct *);
83268
83269+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83270+extern void gr_learn_resource(const struct task_struct *task, const int res,
83271+ const unsigned long wanted, const int gt);
83272+#else
83273+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83274+ const unsigned long wanted, const int gt)
83275+{
83276+}
83277+#endif
83278+
83279 static inline int check_data_rlimit(unsigned long rlim,
83280 unsigned long new,
83281 unsigned long start,
83282 unsigned long end_data,
83283 unsigned long start_data)
83284 {
83285+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
83286 if (rlim < RLIM_INFINITY) {
83287 if (((new - start) + (end_data - start_data)) > rlim)
83288 return -ENOSPC;
83289@@ -1849,7 +1864,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83290 unsigned long addr, unsigned long len,
83291 unsigned long flags, struct page **pages);
83292
83293-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83294+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83295
83296 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83297 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83298@@ -1857,6 +1872,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83299 unsigned long len, unsigned long prot, unsigned long flags,
83300 unsigned long pgoff, unsigned long *populate);
83301 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83302+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83303
83304 #ifdef CONFIG_MMU
83305 extern int __mm_populate(unsigned long addr, unsigned long len,
83306@@ -1885,10 +1901,11 @@ struct vm_unmapped_area_info {
83307 unsigned long high_limit;
83308 unsigned long align_mask;
83309 unsigned long align_offset;
83310+ unsigned long threadstack_offset;
83311 };
83312
83313-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83314-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83315+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83316+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83317
83318 /*
83319 * Search for an unmapped address range.
83320@@ -1900,7 +1917,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83321 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
83322 */
83323 static inline unsigned long
83324-vm_unmapped_area(struct vm_unmapped_area_info *info)
83325+vm_unmapped_area(const struct vm_unmapped_area_info *info)
83326 {
83327 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
83328 return unmapped_area(info);
83329@@ -1962,6 +1979,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
83330 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
83331 struct vm_area_struct **pprev);
83332
83333+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
83334+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
83335+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
83336+
83337 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
83338 NULL if none. Assume start_addr < end_addr. */
83339 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
83340@@ -1991,10 +2012,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
83341 }
83342
83343 #ifdef CONFIG_MMU
83344-pgprot_t vm_get_page_prot(unsigned long vm_flags);
83345+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
83346 void vma_set_page_prot(struct vm_area_struct *vma);
83347 #else
83348-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
83349+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
83350 {
83351 return __pgprot(0);
83352 }
83353@@ -2056,6 +2077,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
83354 static inline void vm_stat_account(struct mm_struct *mm,
83355 unsigned long flags, struct file *file, long pages)
83356 {
83357+
83358+#ifdef CONFIG_PAX_RANDMMAP
83359+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
83360+#endif
83361+
83362 mm->total_vm += pages;
83363 }
83364 #endif /* CONFIG_PROC_FS */
83365@@ -2159,7 +2185,7 @@ extern int unpoison_memory(unsigned long pfn);
83366 extern int sysctl_memory_failure_early_kill;
83367 extern int sysctl_memory_failure_recovery;
83368 extern void shake_page(struct page *p, int access);
83369-extern atomic_long_t num_poisoned_pages;
83370+extern atomic_long_unchecked_t num_poisoned_pages;
83371 extern int soft_offline_page(struct page *page, int flags);
83372
83373 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
83374@@ -2210,5 +2236,11 @@ void __init setup_nr_node_ids(void);
83375 static inline void setup_nr_node_ids(void) {}
83376 #endif
83377
83378+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83379+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
83380+#else
83381+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
83382+#endif
83383+
83384 #endif /* __KERNEL__ */
83385 #endif /* _LINUX_MM_H */
83386diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
83387index 6d34aa2..d73d848 100644
83388--- a/include/linux/mm_types.h
83389+++ b/include/linux/mm_types.h
83390@@ -309,7 +309,9 @@ struct vm_area_struct {
83391 #ifdef CONFIG_NUMA
83392 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
83393 #endif
83394-};
83395+
83396+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
83397+} __randomize_layout;
83398
83399 struct core_thread {
83400 struct task_struct *task;
83401@@ -459,7 +461,25 @@ struct mm_struct {
83402 /* address of the bounds directory */
83403 void __user *bd_addr;
83404 #endif
83405-};
83406+
83407+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
83408+ unsigned long pax_flags;
83409+#endif
83410+
83411+#ifdef CONFIG_PAX_DLRESOLVE
83412+ unsigned long call_dl_resolve;
83413+#endif
83414+
83415+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
83416+ unsigned long call_syscall;
83417+#endif
83418+
83419+#ifdef CONFIG_PAX_ASLR
83420+ unsigned long delta_mmap; /* randomized offset */
83421+ unsigned long delta_stack; /* randomized offset */
83422+#endif
83423+
83424+} __randomize_layout;
83425
83426 static inline void mm_init_cpumask(struct mm_struct *mm)
83427 {
83428diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
83429index c5d5278..f0b68c8 100644
83430--- a/include/linux/mmiotrace.h
83431+++ b/include/linux/mmiotrace.h
83432@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
83433 /* Called from ioremap.c */
83434 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
83435 void __iomem *addr);
83436-extern void mmiotrace_iounmap(volatile void __iomem *addr);
83437+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
83438
83439 /* For anyone to insert markers. Remember trailing newline. */
83440 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
83441@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
83442 {
83443 }
83444
83445-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
83446+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
83447 {
83448 }
83449
83450diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
83451index 2f0856d..5a4bc1e 100644
83452--- a/include/linux/mmzone.h
83453+++ b/include/linux/mmzone.h
83454@@ -527,7 +527,7 @@ struct zone {
83455
83456 ZONE_PADDING(_pad3_)
83457 /* Zone statistics */
83458- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83459+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83460 } ____cacheline_internodealigned_in_smp;
83461
83462 enum zone_flags {
83463diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
83464index 745def8..08a820b 100644
83465--- a/include/linux/mod_devicetable.h
83466+++ b/include/linux/mod_devicetable.h
83467@@ -139,7 +139,7 @@ struct usb_device_id {
83468 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
83469 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
83470
83471-#define HID_ANY_ID (~0)
83472+#define HID_ANY_ID (~0U)
83473 #define HID_BUS_ANY 0xffff
83474 #define HID_GROUP_ANY 0x0000
83475
83476@@ -475,7 +475,7 @@ struct dmi_system_id {
83477 const char *ident;
83478 struct dmi_strmatch matches[4];
83479 void *driver_data;
83480-};
83481+} __do_const;
83482 /*
83483 * struct dmi_device_id appears during expansion of
83484 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
83485diff --git a/include/linux/module.h b/include/linux/module.h
83486index b653d7c..22a238f 100644
83487--- a/include/linux/module.h
83488+++ b/include/linux/module.h
83489@@ -17,9 +17,11 @@
83490 #include <linux/moduleparam.h>
83491 #include <linux/jump_label.h>
83492 #include <linux/export.h>
83493+#include <linux/fs.h>
83494
83495 #include <linux/percpu.h>
83496 #include <asm/module.h>
83497+#include <asm/pgtable.h>
83498
83499 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
83500 #define MODULE_SIG_STRING "~Module signature appended~\n"
83501@@ -42,7 +44,7 @@ struct module_kobject {
83502 struct kobject *drivers_dir;
83503 struct module_param_attrs *mp;
83504 struct completion *kobj_completion;
83505-};
83506+} __randomize_layout;
83507
83508 struct module_attribute {
83509 struct attribute attr;
83510@@ -54,12 +56,13 @@ struct module_attribute {
83511 int (*test)(struct module *);
83512 void (*free)(struct module *);
83513 };
83514+typedef struct module_attribute __no_const module_attribute_no_const;
83515
83516 struct module_version_attribute {
83517 struct module_attribute mattr;
83518 const char *module_name;
83519 const char *version;
83520-} __attribute__ ((__aligned__(sizeof(void *))));
83521+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
83522
83523 extern ssize_t __modver_version_show(struct module_attribute *,
83524 struct module_kobject *, char *);
83525@@ -221,7 +224,7 @@ struct module {
83526
83527 /* Sysfs stuff. */
83528 struct module_kobject mkobj;
83529- struct module_attribute *modinfo_attrs;
83530+ module_attribute_no_const *modinfo_attrs;
83531 const char *version;
83532 const char *srcversion;
83533 struct kobject *holders_dir;
83534@@ -270,19 +273,16 @@ struct module {
83535 int (*init)(void);
83536
83537 /* If this is non-NULL, vfree after init() returns */
83538- void *module_init;
83539+ void *module_init_rx, *module_init_rw;
83540
83541 /* Here is the actual code + data, vfree'd on unload. */
83542- void *module_core;
83543+ void *module_core_rx, *module_core_rw;
83544
83545 /* Here are the sizes of the init and core sections */
83546- unsigned int init_size, core_size;
83547+ unsigned int init_size_rw, core_size_rw;
83548
83549 /* The size of the executable code in each section. */
83550- unsigned int init_text_size, core_text_size;
83551-
83552- /* Size of RO sections of the module (text+rodata) */
83553- unsigned int init_ro_size, core_ro_size;
83554+ unsigned int init_size_rx, core_size_rx;
83555
83556 /* Arch-specific module values */
83557 struct mod_arch_specific arch;
83558@@ -338,6 +338,10 @@ struct module {
83559 #ifdef CONFIG_EVENT_TRACING
83560 struct ftrace_event_call **trace_events;
83561 unsigned int num_trace_events;
83562+ struct file_operations trace_id;
83563+ struct file_operations trace_enable;
83564+ struct file_operations trace_format;
83565+ struct file_operations trace_filter;
83566 #endif
83567 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
83568 unsigned int num_ftrace_callsites;
83569@@ -361,7 +365,7 @@ struct module {
83570 ctor_fn_t *ctors;
83571 unsigned int num_ctors;
83572 #endif
83573-};
83574+} __randomize_layout;
83575 #ifndef MODULE_ARCH_INIT
83576 #define MODULE_ARCH_INIT {}
83577 #endif
83578@@ -382,18 +386,48 @@ bool is_module_address(unsigned long addr);
83579 bool is_module_percpu_address(unsigned long addr);
83580 bool is_module_text_address(unsigned long addr);
83581
83582+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
83583+{
83584+
83585+#ifdef CONFIG_PAX_KERNEXEC
83586+ if (ktla_ktva(addr) >= (unsigned long)start &&
83587+ ktla_ktva(addr) < (unsigned long)start + size)
83588+ return 1;
83589+#endif
83590+
83591+ return ((void *)addr >= start && (void *)addr < start + size);
83592+}
83593+
83594+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
83595+{
83596+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
83597+}
83598+
83599+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
83600+{
83601+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
83602+}
83603+
83604+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
83605+{
83606+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
83607+}
83608+
83609+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
83610+{
83611+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
83612+}
83613+
83614 static inline bool within_module_core(unsigned long addr,
83615 const struct module *mod)
83616 {
83617- return (unsigned long)mod->module_core <= addr &&
83618- addr < (unsigned long)mod->module_core + mod->core_size;
83619+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
83620 }
83621
83622 static inline bool within_module_init(unsigned long addr,
83623 const struct module *mod)
83624 {
83625- return (unsigned long)mod->module_init <= addr &&
83626- addr < (unsigned long)mod->module_init + mod->init_size;
83627+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
83628 }
83629
83630 static inline bool within_module(unsigned long addr, const struct module *mod)
83631diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
83632index f755626..641f822 100644
83633--- a/include/linux/moduleloader.h
83634+++ b/include/linux/moduleloader.h
83635@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
83636 sections. Returns NULL on failure. */
83637 void *module_alloc(unsigned long size);
83638
83639+#ifdef CONFIG_PAX_KERNEXEC
83640+void *module_alloc_exec(unsigned long size);
83641+#else
83642+#define module_alloc_exec(x) module_alloc(x)
83643+#endif
83644+
83645 /* Free memory returned from module_alloc. */
83646 void module_memfree(void *module_region);
83647
83648+#ifdef CONFIG_PAX_KERNEXEC
83649+void module_memfree_exec(void *module_region);
83650+#else
83651+#define module_memfree_exec(x) module_memfree((x))
83652+#endif
83653+
83654 /*
83655 * Apply the given relocation to the (simplified) ELF. Return -error
83656 * or 0.
83657@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
83658 unsigned int relsec,
83659 struct module *me)
83660 {
83661+#ifdef CONFIG_MODULES
83662 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83663 module_name(me));
83664+#endif
83665 return -ENOEXEC;
83666 }
83667 #endif
83668@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
83669 unsigned int relsec,
83670 struct module *me)
83671 {
83672+#ifdef CONFIG_MODULES
83673 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83674 module_name(me));
83675+#endif
83676 return -ENOEXEC;
83677 }
83678 #endif
83679diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
83680index 1c9effa..1160bdd 100644
83681--- a/include/linux/moduleparam.h
83682+++ b/include/linux/moduleparam.h
83683@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
83684 * @len is usually just sizeof(string).
83685 */
83686 #define module_param_string(name, string, len, perm) \
83687- static const struct kparam_string __param_string_##name \
83688+ static const struct kparam_string __param_string_##name __used \
83689 = { len, string }; \
83690 __module_param_call(MODULE_PARAM_PREFIX, name, \
83691 &param_ops_string, \
83692@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
83693 */
83694 #define module_param_array_named(name, array, type, nump, perm) \
83695 param_check_##type(name, &(array)[0]); \
83696- static const struct kparam_array __param_arr_##name \
83697+ static const struct kparam_array __param_arr_##name __used \
83698 = { .max = ARRAY_SIZE(array), .num = nump, \
83699 .ops = &param_ops_##type, \
83700 .elemsize = sizeof(array[0]), .elem = array }; \
83701diff --git a/include/linux/mount.h b/include/linux/mount.h
83702index c2c561d..a5f2a8c 100644
83703--- a/include/linux/mount.h
83704+++ b/include/linux/mount.h
83705@@ -66,7 +66,7 @@ struct vfsmount {
83706 struct dentry *mnt_root; /* root of the mounted tree */
83707 struct super_block *mnt_sb; /* pointer to superblock */
83708 int mnt_flags;
83709-};
83710+} __randomize_layout;
83711
83712 struct file; /* forward dec */
83713 struct path;
83714diff --git a/include/linux/namei.h b/include/linux/namei.h
83715index c899077..b9a2010 100644
83716--- a/include/linux/namei.h
83717+++ b/include/linux/namei.h
83718@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
83719 extern void unlock_rename(struct dentry *, struct dentry *);
83720
83721 extern void nd_jump_link(struct nameidata *nd, struct path *path);
83722-extern void nd_set_link(struct nameidata *nd, char *path);
83723-extern char *nd_get_link(struct nameidata *nd);
83724+extern void nd_set_link(struct nameidata *nd, const char *path);
83725+extern const char *nd_get_link(const struct nameidata *nd);
83726
83727 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
83728 {
83729diff --git a/include/linux/net.h b/include/linux/net.h
83730index 17d8339..81656c0 100644
83731--- a/include/linux/net.h
83732+++ b/include/linux/net.h
83733@@ -192,7 +192,7 @@ struct net_proto_family {
83734 int (*create)(struct net *net, struct socket *sock,
83735 int protocol, int kern);
83736 struct module *owner;
83737-};
83738+} __do_const;
83739
83740 struct iovec;
83741 struct kvec;
83742diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
83743index 52fd8e8..19430a1 100644
83744--- a/include/linux/netdevice.h
83745+++ b/include/linux/netdevice.h
83746@@ -1191,6 +1191,7 @@ struct net_device_ops {
83747 u8 state);
83748 #endif
83749 };
83750+typedef struct net_device_ops __no_const net_device_ops_no_const;
83751
83752 /**
83753 * enum net_device_priv_flags - &struct net_device priv_flags
83754@@ -1537,10 +1538,10 @@ struct net_device {
83755
83756 struct net_device_stats stats;
83757
83758- atomic_long_t rx_dropped;
83759- atomic_long_t tx_dropped;
83760+ atomic_long_unchecked_t rx_dropped;
83761+ atomic_long_unchecked_t tx_dropped;
83762
83763- atomic_t carrier_changes;
83764+ atomic_unchecked_t carrier_changes;
83765
83766 #ifdef CONFIG_WIRELESS_EXT
83767 const struct iw_handler_def * wireless_handlers;
83768diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
83769index 2517ece..0bbfcfb 100644
83770--- a/include/linux/netfilter.h
83771+++ b/include/linux/netfilter.h
83772@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
83773 #endif
83774 /* Use the module struct to lock set/get code in place */
83775 struct module *owner;
83776-};
83777+} __do_const;
83778
83779 /* Function to register/unregister hook points. */
83780 int nf_register_hook(struct nf_hook_ops *reg);
83781diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
83782index e955d47..04a5338 100644
83783--- a/include/linux/netfilter/nfnetlink.h
83784+++ b/include/linux/netfilter/nfnetlink.h
83785@@ -19,7 +19,7 @@ struct nfnl_callback {
83786 const struct nlattr * const cda[]);
83787 const struct nla_policy *policy; /* netlink attribute policy */
83788 const u_int16_t attr_count; /* number of nlattr's */
83789-};
83790+} __do_const;
83791
83792 struct nfnetlink_subsystem {
83793 const char *name;
83794diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
83795new file mode 100644
83796index 0000000..33f4af8
83797--- /dev/null
83798+++ b/include/linux/netfilter/xt_gradm.h
83799@@ -0,0 +1,9 @@
83800+#ifndef _LINUX_NETFILTER_XT_GRADM_H
83801+#define _LINUX_NETFILTER_XT_GRADM_H 1
83802+
83803+struct xt_gradm_mtinfo {
83804+ __u16 flags;
83805+ __u16 invflags;
83806+};
83807+
83808+#endif
83809diff --git a/include/linux/nls.h b/include/linux/nls.h
83810index 520681b..2b7fabb 100644
83811--- a/include/linux/nls.h
83812+++ b/include/linux/nls.h
83813@@ -31,7 +31,7 @@ struct nls_table {
83814 const unsigned char *charset2upper;
83815 struct module *owner;
83816 struct nls_table *next;
83817-};
83818+} __do_const;
83819
83820 /* this value hold the maximum octet of charset */
83821 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
83822@@ -46,7 +46,7 @@ enum utf16_endian {
83823 /* nls_base.c */
83824 extern int __register_nls(struct nls_table *, struct module *);
83825 extern int unregister_nls(struct nls_table *);
83826-extern struct nls_table *load_nls(char *);
83827+extern struct nls_table *load_nls(const char *);
83828 extern void unload_nls(struct nls_table *);
83829 extern struct nls_table *load_nls_default(void);
83830 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
83831diff --git a/include/linux/notifier.h b/include/linux/notifier.h
83832index d14a4c3..a078786 100644
83833--- a/include/linux/notifier.h
83834+++ b/include/linux/notifier.h
83835@@ -54,7 +54,8 @@ struct notifier_block {
83836 notifier_fn_t notifier_call;
83837 struct notifier_block __rcu *next;
83838 int priority;
83839-};
83840+} __do_const;
83841+typedef struct notifier_block __no_const notifier_block_no_const;
83842
83843 struct atomic_notifier_head {
83844 spinlock_t lock;
83845diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
83846index b2a0f15..4d7da32 100644
83847--- a/include/linux/oprofile.h
83848+++ b/include/linux/oprofile.h
83849@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
83850 int oprofilefs_create_ro_ulong(struct dentry * root,
83851 char const * name, ulong * val);
83852
83853-/** Create a file for read-only access to an atomic_t. */
83854+/** Create a file for read-only access to an atomic_unchecked_t. */
83855 int oprofilefs_create_ro_atomic(struct dentry * root,
83856- char const * name, atomic_t * val);
83857+ char const * name, atomic_unchecked_t * val);
83858
83859 /** create a directory */
83860 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
83861diff --git a/include/linux/padata.h b/include/linux/padata.h
83862index 4386946..f50c615 100644
83863--- a/include/linux/padata.h
83864+++ b/include/linux/padata.h
83865@@ -129,7 +129,7 @@ struct parallel_data {
83866 struct padata_serial_queue __percpu *squeue;
83867 atomic_t reorder_objects;
83868 atomic_t refcnt;
83869- atomic_t seq_nr;
83870+ atomic_unchecked_t seq_nr;
83871 struct padata_cpumask cpumask;
83872 spinlock_t lock ____cacheline_aligned;
83873 unsigned int processed;
83874diff --git a/include/linux/path.h b/include/linux/path.h
83875index d137218..be0c176 100644
83876--- a/include/linux/path.h
83877+++ b/include/linux/path.h
83878@@ -1,13 +1,15 @@
83879 #ifndef _LINUX_PATH_H
83880 #define _LINUX_PATH_H
83881
83882+#include <linux/compiler.h>
83883+
83884 struct dentry;
83885 struct vfsmount;
83886
83887 struct path {
83888 struct vfsmount *mnt;
83889 struct dentry *dentry;
83890-};
83891+} __randomize_layout;
83892
83893 extern void path_get(const struct path *);
83894 extern void path_put(const struct path *);
83895diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
83896index 8c78950..0d74ed9 100644
83897--- a/include/linux/pci_hotplug.h
83898+++ b/include/linux/pci_hotplug.h
83899@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
83900 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
83901 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
83902 int (*reset_slot) (struct hotplug_slot *slot, int probe);
83903-};
83904+} __do_const;
83905+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
83906
83907 /**
83908 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
83909diff --git a/include/linux/percpu.h b/include/linux/percpu.h
83910index caebf2a..4c3ae9d 100644
83911--- a/include/linux/percpu.h
83912+++ b/include/linux/percpu.h
83913@@ -34,7 +34,7 @@
83914 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
83915 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
83916 */
83917-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
83918+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
83919 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
83920
83921 /*
83922diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
83923index 664de5a..b3e1bf4 100644
83924--- a/include/linux/perf_event.h
83925+++ b/include/linux/perf_event.h
83926@@ -336,8 +336,8 @@ struct perf_event {
83927
83928 enum perf_event_active_state state;
83929 unsigned int attach_state;
83930- local64_t count;
83931- atomic64_t child_count;
83932+ local64_t count; /* PaX: fix it one day */
83933+ atomic64_unchecked_t child_count;
83934
83935 /*
83936 * These are the total time in nanoseconds that the event
83937@@ -388,8 +388,8 @@ struct perf_event {
83938 * These accumulate total time (in nanoseconds) that children
83939 * events have been enabled and running, respectively.
83940 */
83941- atomic64_t child_total_time_enabled;
83942- atomic64_t child_total_time_running;
83943+ atomic64_unchecked_t child_total_time_enabled;
83944+ atomic64_unchecked_t child_total_time_running;
83945
83946 /*
83947 * Protect attach/detach and child_list:
83948@@ -733,7 +733,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
83949 entry->ip[entry->nr++] = ip;
83950 }
83951
83952-extern int sysctl_perf_event_paranoid;
83953+extern int sysctl_perf_event_legitimately_concerned;
83954 extern int sysctl_perf_event_mlock;
83955 extern int sysctl_perf_event_sample_rate;
83956 extern int sysctl_perf_cpu_time_max_percent;
83957@@ -748,19 +748,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
83958 loff_t *ppos);
83959
83960
83961+static inline bool perf_paranoid_any(void)
83962+{
83963+ return sysctl_perf_event_legitimately_concerned > 2;
83964+}
83965+
83966 static inline bool perf_paranoid_tracepoint_raw(void)
83967 {
83968- return sysctl_perf_event_paranoid > -1;
83969+ return sysctl_perf_event_legitimately_concerned > -1;
83970 }
83971
83972 static inline bool perf_paranoid_cpu(void)
83973 {
83974- return sysctl_perf_event_paranoid > 0;
83975+ return sysctl_perf_event_legitimately_concerned > 0;
83976 }
83977
83978 static inline bool perf_paranoid_kernel(void)
83979 {
83980- return sysctl_perf_event_paranoid > 1;
83981+ return sysctl_perf_event_legitimately_concerned > 1;
83982 }
83983
83984 extern void perf_event_init(void);
83985@@ -891,7 +896,7 @@ struct perf_pmu_events_attr {
83986 struct device_attribute attr;
83987 u64 id;
83988 const char *event_str;
83989-};
83990+} __do_const;
83991
83992 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
83993 static struct perf_pmu_events_attr _var = { \
83994diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
83995index b9cf6c5..5462472 100644
83996--- a/include/linux/pid_namespace.h
83997+++ b/include/linux/pid_namespace.h
83998@@ -45,7 +45,7 @@ struct pid_namespace {
83999 int hide_pid;
84000 int reboot; /* group exit code if this pidns was rebooted */
84001 struct ns_common ns;
84002-};
84003+} __randomize_layout;
84004
84005 extern struct pid_namespace init_pid_ns;
84006
84007diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84008index eb8b8ac..62649e1 100644
84009--- a/include/linux/pipe_fs_i.h
84010+++ b/include/linux/pipe_fs_i.h
84011@@ -47,10 +47,10 @@ struct pipe_inode_info {
84012 struct mutex mutex;
84013 wait_queue_head_t wait;
84014 unsigned int nrbufs, curbuf, buffers;
84015- unsigned int readers;
84016- unsigned int writers;
84017- unsigned int files;
84018- unsigned int waiting_writers;
84019+ atomic_t readers;
84020+ atomic_t writers;
84021+ atomic_t files;
84022+ atomic_t waiting_writers;
84023 unsigned int r_counter;
84024 unsigned int w_counter;
84025 struct page *tmp_page;
84026diff --git a/include/linux/pm.h b/include/linux/pm.h
84027index 8b59763..8a05939 100644
84028--- a/include/linux/pm.h
84029+++ b/include/linux/pm.h
84030@@ -608,6 +608,7 @@ struct dev_pm_domain {
84031 struct dev_pm_ops ops;
84032 void (*detach)(struct device *dev, bool power_off);
84033 };
84034+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84035
84036 /*
84037 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84038diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84039index a9edab2..8bada56 100644
84040--- a/include/linux/pm_domain.h
84041+++ b/include/linux/pm_domain.h
84042@@ -39,11 +39,11 @@ struct gpd_dev_ops {
84043 int (*save_state)(struct device *dev);
84044 int (*restore_state)(struct device *dev);
84045 bool (*active_wakeup)(struct device *dev);
84046-};
84047+} __no_const;
84048
84049 struct gpd_cpuidle_data {
84050 unsigned int saved_exit_latency;
84051- struct cpuidle_state *idle_state;
84052+ cpuidle_state_no_const *idle_state;
84053 };
84054
84055 struct generic_pm_domain {
84056diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84057index 30e84d4..22278b4 100644
84058--- a/include/linux/pm_runtime.h
84059+++ b/include/linux/pm_runtime.h
84060@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84061
84062 static inline void pm_runtime_mark_last_busy(struct device *dev)
84063 {
84064- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84065+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84066 }
84067
84068 static inline bool pm_runtime_is_irq_safe(struct device *dev)
84069diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84070index 195aafc..49a7bc2 100644
84071--- a/include/linux/pnp.h
84072+++ b/include/linux/pnp.h
84073@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84074 struct pnp_fixup {
84075 char id[7];
84076 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84077-};
84078+} __do_const;
84079
84080 /* config parameters */
84081 #define PNP_CONFIG_NORMAL 0x0001
84082diff --git a/include/linux/poison.h b/include/linux/poison.h
84083index 2110a81..13a11bb 100644
84084--- a/include/linux/poison.h
84085+++ b/include/linux/poison.h
84086@@ -19,8 +19,8 @@
84087 * under normal circumstances, used to verify that nobody uses
84088 * non-initialized list entries.
84089 */
84090-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84091-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84092+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84093+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84094
84095 /********** include/linux/timer.h **********/
84096 /*
84097diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84098index d8b187c3..9a9257a 100644
84099--- a/include/linux/power/smartreflex.h
84100+++ b/include/linux/power/smartreflex.h
84101@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84102 int (*notify)(struct omap_sr *sr, u32 status);
84103 u8 notify_flags;
84104 u8 class_type;
84105-};
84106+} __do_const;
84107
84108 /**
84109 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84110diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84111index 4ea1d37..80f4b33 100644
84112--- a/include/linux/ppp-comp.h
84113+++ b/include/linux/ppp-comp.h
84114@@ -84,7 +84,7 @@ struct compressor {
84115 struct module *owner;
84116 /* Extra skb space needed by the compressor algorithm */
84117 unsigned int comp_extra;
84118-};
84119+} __do_const;
84120
84121 /*
84122 * The return value from decompress routine is the length of the
84123diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84124index de83b4e..c4b997d 100644
84125--- a/include/linux/preempt.h
84126+++ b/include/linux/preempt.h
84127@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84128 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84129 #endif
84130
84131+#define raw_preempt_count_add(val) __preempt_count_add(val)
84132+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84133+
84134 #define __preempt_count_inc() __preempt_count_add(1)
84135 #define __preempt_count_dec() __preempt_count_sub(1)
84136
84137 #define preempt_count_inc() preempt_count_add(1)
84138+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84139 #define preempt_count_dec() preempt_count_sub(1)
84140+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84141
84142 #ifdef CONFIG_PREEMPT_COUNT
84143
84144@@ -41,6 +46,12 @@ do { \
84145 barrier(); \
84146 } while (0)
84147
84148+#define raw_preempt_disable() \
84149+do { \
84150+ raw_preempt_count_inc(); \
84151+ barrier(); \
84152+} while (0)
84153+
84154 #define sched_preempt_enable_no_resched() \
84155 do { \
84156 barrier(); \
84157@@ -49,6 +60,12 @@ do { \
84158
84159 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84160
84161+#define raw_preempt_enable_no_resched() \
84162+do { \
84163+ barrier(); \
84164+ raw_preempt_count_dec(); \
84165+} while (0)
84166+
84167 #ifdef CONFIG_PREEMPT
84168 #define preempt_enable() \
84169 do { \
84170@@ -113,8 +130,10 @@ do { \
84171 * region.
84172 */
84173 #define preempt_disable() barrier()
84174+#define raw_preempt_disable() barrier()
84175 #define sched_preempt_enable_no_resched() barrier()
84176 #define preempt_enable_no_resched() barrier()
84177+#define raw_preempt_enable_no_resched() barrier()
84178 #define preempt_enable() barrier()
84179 #define preempt_check_resched() do { } while (0)
84180
84181@@ -128,11 +147,13 @@ do { \
84182 /*
84183 * Modules have no business playing preemption tricks.
84184 */
84185+#ifndef CONFIG_PAX_KERNEXEC
84186 #undef sched_preempt_enable_no_resched
84187 #undef preempt_enable_no_resched
84188 #undef preempt_enable_no_resched_notrace
84189 #undef preempt_check_resched
84190 #endif
84191+#endif
84192
84193 #define preempt_set_need_resched() \
84194 do { \
84195diff --git a/include/linux/printk.h b/include/linux/printk.h
84196index 4d5bf57..d94eccf 100644
84197--- a/include/linux/printk.h
84198+++ b/include/linux/printk.h
84199@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
84200 #endif
84201
84202 typedef int(*printk_func_t)(const char *fmt, va_list args);
84203+extern int kptr_restrict;
84204
84205 #ifdef CONFIG_PRINTK
84206 asmlinkage __printf(5, 0)
84207@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84208
84209 extern int printk_delay_msec;
84210 extern int dmesg_restrict;
84211-extern int kptr_restrict;
84212
84213 extern void wake_up_klogd(void);
84214
84215diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84216index b97bf2e..f14c92d4 100644
84217--- a/include/linux/proc_fs.h
84218+++ b/include/linux/proc_fs.h
84219@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84220 extern struct proc_dir_entry *proc_symlink(const char *,
84221 struct proc_dir_entry *, const char *);
84222 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84223+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84224 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84225 struct proc_dir_entry *, void *);
84226+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84227+ struct proc_dir_entry *, void *);
84228 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84229 struct proc_dir_entry *);
84230
84231@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84232 return proc_create_data(name, mode, parent, proc_fops, NULL);
84233 }
84234
84235+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84236+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84237+{
84238+#ifdef CONFIG_GRKERNSEC_PROC_USER
84239+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84240+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84241+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84242+#else
84243+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84244+#endif
84245+}
84246+
84247+
84248 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84249 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84250 extern void *PDE_DATA(const struct inode *);
84251@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84252 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84253 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84254 struct proc_dir_entry *parent) {return NULL;}
84255+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84256+ struct proc_dir_entry *parent) { return NULL; }
84257 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84258 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84259+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84260+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84261 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84262 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84263 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84264@@ -79,7 +99,7 @@ struct net;
84265 static inline struct proc_dir_entry *proc_net_mkdir(
84266 struct net *net, const char *name, struct proc_dir_entry *parent)
84267 {
84268- return proc_mkdir_data(name, 0, parent, net);
84269+ return proc_mkdir_data_restrict(name, 0, parent, net);
84270 }
84271
84272 #endif /* _LINUX_PROC_FS_H */
84273diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84274index 42dfc61..8113a99 100644
84275--- a/include/linux/proc_ns.h
84276+++ b/include/linux/proc_ns.h
84277@@ -16,7 +16,7 @@ struct proc_ns_operations {
84278 struct ns_common *(*get)(struct task_struct *task);
84279 void (*put)(struct ns_common *ns);
84280 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
84281-};
84282+} __do_const __randomize_layout;
84283
84284 extern const struct proc_ns_operations netns_operations;
84285 extern const struct proc_ns_operations utsns_operations;
84286diff --git a/include/linux/quota.h b/include/linux/quota.h
84287index b86df49..8002997 100644
84288--- a/include/linux/quota.h
84289+++ b/include/linux/quota.h
84290@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
84291
84292 extern bool qid_eq(struct kqid left, struct kqid right);
84293 extern bool qid_lt(struct kqid left, struct kqid right);
84294-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84295+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84296 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84297 extern bool qid_valid(struct kqid qid);
84298
84299diff --git a/include/linux/random.h b/include/linux/random.h
84300index b05856e..0a9f14e 100644
84301--- a/include/linux/random.h
84302+++ b/include/linux/random.h
84303@@ -9,9 +9,19 @@
84304 #include <uapi/linux/random.h>
84305
84306 extern void add_device_randomness(const void *, unsigned int);
84307+
84308+static inline void add_latent_entropy(void)
84309+{
84310+
84311+#ifdef LATENT_ENTROPY_PLUGIN
84312+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84313+#endif
84314+
84315+}
84316+
84317 extern void add_input_randomness(unsigned int type, unsigned int code,
84318- unsigned int value);
84319-extern void add_interrupt_randomness(int irq, int irq_flags);
84320+ unsigned int value) __latent_entropy;
84321+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
84322
84323 extern void get_random_bytes(void *buf, int nbytes);
84324 extern void get_random_bytes_arch(void *buf, int nbytes);
84325@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
84326 extern const struct file_operations random_fops, urandom_fops;
84327 #endif
84328
84329-unsigned int get_random_int(void);
84330+unsigned int __intentional_overflow(-1) get_random_int(void);
84331 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
84332
84333-u32 prandom_u32(void);
84334+u32 prandom_u32(void) __intentional_overflow(-1);
84335 void prandom_bytes(void *buf, size_t nbytes);
84336 void prandom_seed(u32 seed);
84337 void prandom_reseed_late(void);
84338@@ -37,6 +47,11 @@ struct rnd_state {
84339 u32 prandom_u32_state(struct rnd_state *state);
84340 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84341
84342+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
84343+{
84344+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
84345+}
84346+
84347 /**
84348 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
84349 * @ep_ro: right open interval endpoint
84350@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84351 *
84352 * Returns: pseudo-random number in interval [0, ep_ro)
84353 */
84354-static inline u32 prandom_u32_max(u32 ep_ro)
84355+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
84356 {
84357 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
84358 }
84359diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
84360index 378c5ee..aa84a47 100644
84361--- a/include/linux/rbtree_augmented.h
84362+++ b/include/linux/rbtree_augmented.h
84363@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
84364 old->rbaugmented = rbcompute(old); \
84365 } \
84366 rbstatic const struct rb_augment_callbacks rbname = { \
84367- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
84368+ .propagate = rbname ## _propagate, \
84369+ .copy = rbname ## _copy, \
84370+ .rotate = rbname ## _rotate \
84371 };
84372
84373
84374diff --git a/include/linux/rculist.h b/include/linux/rculist.h
84375index 529bc94..82ce778 100644
84376--- a/include/linux/rculist.h
84377+++ b/include/linux/rculist.h
84378@@ -29,8 +29,8 @@
84379 */
84380 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
84381 {
84382- ACCESS_ONCE(list->next) = list;
84383- ACCESS_ONCE(list->prev) = list;
84384+ ACCESS_ONCE_RW(list->next) = list;
84385+ ACCESS_ONCE_RW(list->prev) = list;
84386 }
84387
84388 /*
84389@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
84390 struct list_head *prev, struct list_head *next);
84391 #endif
84392
84393+void __pax_list_add_rcu(struct list_head *new,
84394+ struct list_head *prev, struct list_head *next);
84395+
84396 /**
84397 * list_add_rcu - add a new entry to rcu-protected list
84398 * @new: new entry to be added
84399@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
84400 __list_add_rcu(new, head, head->next);
84401 }
84402
84403+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
84404+{
84405+ __pax_list_add_rcu(new, head, head->next);
84406+}
84407+
84408 /**
84409 * list_add_tail_rcu - add a new entry to rcu-protected list
84410 * @new: new entry to be added
84411@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
84412 __list_add_rcu(new, head->prev, head);
84413 }
84414
84415+static inline void pax_list_add_tail_rcu(struct list_head *new,
84416+ struct list_head *head)
84417+{
84418+ __pax_list_add_rcu(new, head->prev, head);
84419+}
84420+
84421 /**
84422 * list_del_rcu - deletes entry from list without re-initialization
84423 * @entry: the element to delete from the list.
84424@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
84425 entry->prev = LIST_POISON2;
84426 }
84427
84428+extern void pax_list_del_rcu(struct list_head *entry);
84429+
84430 /**
84431 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
84432 * @n: the element to delete from the hash list.
84433diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
84434index ed4f593..8a51501 100644
84435--- a/include/linux/rcupdate.h
84436+++ b/include/linux/rcupdate.h
84437@@ -332,7 +332,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
84438 #define rcu_note_voluntary_context_switch(t) \
84439 do { \
84440 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
84441- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
84442+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
84443 } while (0)
84444 #else /* #ifdef CONFIG_TASKS_RCU */
84445 #define TASKS_RCU(x) do { } while (0)
84446diff --git a/include/linux/reboot.h b/include/linux/reboot.h
84447index 67fc8fc..a90f7d8 100644
84448--- a/include/linux/reboot.h
84449+++ b/include/linux/reboot.h
84450@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
84451 */
84452
84453 extern void migrate_to_reboot_cpu(void);
84454-extern void machine_restart(char *cmd);
84455-extern void machine_halt(void);
84456-extern void machine_power_off(void);
84457+extern void machine_restart(char *cmd) __noreturn;
84458+extern void machine_halt(void) __noreturn;
84459+extern void machine_power_off(void) __noreturn;
84460
84461 extern void machine_shutdown(void);
84462 struct pt_regs;
84463@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
84464 */
84465
84466 extern void kernel_restart_prepare(char *cmd);
84467-extern void kernel_restart(char *cmd);
84468-extern void kernel_halt(void);
84469-extern void kernel_power_off(void);
84470+extern void kernel_restart(char *cmd) __noreturn;
84471+extern void kernel_halt(void) __noreturn;
84472+extern void kernel_power_off(void) __noreturn;
84473
84474 extern int C_A_D; /* for sysctl */
84475 void ctrl_alt_del(void);
84476@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
84477 * Emergency restart, callable from an interrupt handler.
84478 */
84479
84480-extern void emergency_restart(void);
84481+extern void emergency_restart(void) __noreturn;
84482 #include <asm/emergency-restart.h>
84483
84484 #endif /* _LINUX_REBOOT_H */
84485diff --git a/include/linux/regset.h b/include/linux/regset.h
84486index 8e0c9fe..ac4d221 100644
84487--- a/include/linux/regset.h
84488+++ b/include/linux/regset.h
84489@@ -161,7 +161,8 @@ struct user_regset {
84490 unsigned int align;
84491 unsigned int bias;
84492 unsigned int core_note_type;
84493-};
84494+} __do_const;
84495+typedef struct user_regset __no_const user_regset_no_const;
84496
84497 /**
84498 * struct user_regset_view - available regsets
84499diff --git a/include/linux/relay.h b/include/linux/relay.h
84500index d7c8359..818daf5 100644
84501--- a/include/linux/relay.h
84502+++ b/include/linux/relay.h
84503@@ -157,7 +157,7 @@ struct rchan_callbacks
84504 * The callback should return 0 if successful, negative if not.
84505 */
84506 int (*remove_buf_file)(struct dentry *dentry);
84507-};
84508+} __no_const;
84509
84510 /*
84511 * CONFIG_RELAY kernel API, kernel/relay.c
84512diff --git a/include/linux/rio.h b/include/linux/rio.h
84513index 6bda06f..bf39a9b 100644
84514--- a/include/linux/rio.h
84515+++ b/include/linux/rio.h
84516@@ -358,7 +358,7 @@ struct rio_ops {
84517 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
84518 u64 rstart, u32 size, u32 flags);
84519 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
84520-};
84521+} __no_const;
84522
84523 #define RIO_RESOURCE_MEM 0x00000100
84524 #define RIO_RESOURCE_DOORBELL 0x00000200
84525diff --git a/include/linux/rmap.h b/include/linux/rmap.h
84526index d9d7e7e..86f47ac 100644
84527--- a/include/linux/rmap.h
84528+++ b/include/linux/rmap.h
84529@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
84530 void anon_vma_init(void); /* create anon_vma_cachep */
84531 int anon_vma_prepare(struct vm_area_struct *);
84532 void unlink_anon_vmas(struct vm_area_struct *);
84533-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
84534-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
84535+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
84536+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
84537
84538 static inline void anon_vma_merge(struct vm_area_struct *vma,
84539 struct vm_area_struct *next)
84540diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
84541index ed8f9e7..999bc96 100644
84542--- a/include/linux/scatterlist.h
84543+++ b/include/linux/scatterlist.h
84544@@ -1,6 +1,7 @@
84545 #ifndef _LINUX_SCATTERLIST_H
84546 #define _LINUX_SCATTERLIST_H
84547
84548+#include <linux/sched.h>
84549 #include <linux/string.h>
84550 #include <linux/bug.h>
84551 #include <linux/mm.h>
84552@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
84553 #ifdef CONFIG_DEBUG_SG
84554 BUG_ON(!virt_addr_valid(buf));
84555 #endif
84556+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84557+ if (object_starts_on_stack(buf)) {
84558+ void *adjbuf = buf - current->stack + current->lowmem_stack;
84559+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
84560+ } else
84561+#endif
84562 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
84563 }
84564
84565diff --git a/include/linux/sched.h b/include/linux/sched.h
84566index 8db31ef..0af1f81 100644
84567--- a/include/linux/sched.h
84568+++ b/include/linux/sched.h
84569@@ -133,6 +133,7 @@ struct fs_struct;
84570 struct perf_event_context;
84571 struct blk_plug;
84572 struct filename;
84573+struct linux_binprm;
84574
84575 #define VMACACHE_BITS 2
84576 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
84577@@ -415,7 +416,7 @@ extern char __sched_text_start[], __sched_text_end[];
84578 extern int in_sched_functions(unsigned long addr);
84579
84580 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
84581-extern signed long schedule_timeout(signed long timeout);
84582+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
84583 extern signed long schedule_timeout_interruptible(signed long timeout);
84584 extern signed long schedule_timeout_killable(signed long timeout);
84585 extern signed long schedule_timeout_uninterruptible(signed long timeout);
84586@@ -426,6 +427,19 @@ struct nsproxy;
84587 struct user_namespace;
84588
84589 #ifdef CONFIG_MMU
84590+
84591+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
84592+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
84593+#else
84594+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
84595+{
84596+ return 0;
84597+}
84598+#endif
84599+
84600+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
84601+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
84602+
84603 extern void arch_pick_mmap_layout(struct mm_struct *mm);
84604 extern unsigned long
84605 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
84606@@ -724,6 +738,17 @@ struct signal_struct {
84607 #ifdef CONFIG_TASKSTATS
84608 struct taskstats *stats;
84609 #endif
84610+
84611+#ifdef CONFIG_GRKERNSEC
84612+ u32 curr_ip;
84613+ u32 saved_ip;
84614+ u32 gr_saddr;
84615+ u32 gr_daddr;
84616+ u16 gr_sport;
84617+ u16 gr_dport;
84618+ u8 used_accept:1;
84619+#endif
84620+
84621 #ifdef CONFIG_AUDIT
84622 unsigned audit_tty;
84623 unsigned audit_tty_log_passwd;
84624@@ -750,7 +775,7 @@ struct signal_struct {
84625 struct mutex cred_guard_mutex; /* guard against foreign influences on
84626 * credential calculations
84627 * (notably. ptrace) */
84628-};
84629+} __randomize_layout;
84630
84631 /*
84632 * Bits in flags field of signal_struct.
84633@@ -803,6 +828,14 @@ struct user_struct {
84634 struct key *session_keyring; /* UID's default session keyring */
84635 #endif
84636
84637+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
84638+ unsigned char kernel_banned;
84639+#endif
84640+#ifdef CONFIG_GRKERNSEC_BRUTE
84641+ unsigned char suid_banned;
84642+ unsigned long suid_ban_expires;
84643+#endif
84644+
84645 /* Hash table maintenance information */
84646 struct hlist_node uidhash_node;
84647 kuid_t uid;
84648@@ -810,7 +843,7 @@ struct user_struct {
84649 #ifdef CONFIG_PERF_EVENTS
84650 atomic_long_t locked_vm;
84651 #endif
84652-};
84653+} __randomize_layout;
84654
84655 extern int uids_sysfs_init(void);
84656
84657@@ -1274,6 +1307,9 @@ enum perf_event_task_context {
84658 struct task_struct {
84659 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
84660 void *stack;
84661+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84662+ void *lowmem_stack;
84663+#endif
84664 atomic_t usage;
84665 unsigned int flags; /* per process flags, defined below */
84666 unsigned int ptrace;
84667@@ -1405,8 +1441,8 @@ struct task_struct {
84668 struct list_head thread_node;
84669
84670 struct completion *vfork_done; /* for vfork() */
84671- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
84672- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84673+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
84674+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84675
84676 cputime_t utime, stime, utimescaled, stimescaled;
84677 cputime_t gtime;
84678@@ -1431,11 +1467,6 @@ struct task_struct {
84679 struct task_cputime cputime_expires;
84680 struct list_head cpu_timers[3];
84681
84682-/* process credentials */
84683- const struct cred __rcu *real_cred; /* objective and real subjective task
84684- * credentials (COW) */
84685- const struct cred __rcu *cred; /* effective (overridable) subjective task
84686- * credentials (COW) */
84687 char comm[TASK_COMM_LEN]; /* executable name excluding path
84688 - access with [gs]et_task_comm (which lock
84689 it with task_lock())
84690@@ -1453,6 +1484,10 @@ struct task_struct {
84691 #endif
84692 /* CPU-specific state of this task */
84693 struct thread_struct thread;
84694+/* thread_info moved to task_struct */
84695+#ifdef CONFIG_X86
84696+ struct thread_info tinfo;
84697+#endif
84698 /* filesystem information */
84699 struct fs_struct *fs;
84700 /* open file information */
84701@@ -1527,6 +1562,10 @@ struct task_struct {
84702 gfp_t lockdep_reclaim_gfp;
84703 #endif
84704
84705+/* process credentials */
84706+ const struct cred __rcu *real_cred; /* objective and real subjective task
84707+ * credentials (COW) */
84708+
84709 /* journalling filesystem info */
84710 void *journal_info;
84711
84712@@ -1565,6 +1604,10 @@ struct task_struct {
84713 /* cg_list protected by css_set_lock and tsk->alloc_lock */
84714 struct list_head cg_list;
84715 #endif
84716+
84717+ const struct cred __rcu *cred; /* effective (overridable) subjective task
84718+ * credentials (COW) */
84719+
84720 #ifdef CONFIG_FUTEX
84721 struct robust_list_head __user *robust_list;
84722 #ifdef CONFIG_COMPAT
84723@@ -1673,7 +1716,7 @@ struct task_struct {
84724 * Number of functions that haven't been traced
84725 * because of depth overrun.
84726 */
84727- atomic_t trace_overrun;
84728+ atomic_unchecked_t trace_overrun;
84729 /* Pause for the tracing */
84730 atomic_t tracing_graph_pause;
84731 #endif
84732@@ -1701,7 +1744,78 @@ struct task_struct {
84733 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
84734 unsigned long task_state_change;
84735 #endif
84736-};
84737+
84738+#ifdef CONFIG_GRKERNSEC
84739+ /* grsecurity */
84740+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84741+ u64 exec_id;
84742+#endif
84743+#ifdef CONFIG_GRKERNSEC_SETXID
84744+ const struct cred *delayed_cred;
84745+#endif
84746+ struct dentry *gr_chroot_dentry;
84747+ struct acl_subject_label *acl;
84748+ struct acl_subject_label *tmpacl;
84749+ struct acl_role_label *role;
84750+ struct file *exec_file;
84751+ unsigned long brute_expires;
84752+ u16 acl_role_id;
84753+ u8 inherited;
84754+ /* is this the task that authenticated to the special role */
84755+ u8 acl_sp_role;
84756+ u8 is_writable;
84757+ u8 brute;
84758+ u8 gr_is_chrooted;
84759+#endif
84760+
84761+} __randomize_layout;
84762+
84763+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
84764+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
84765+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
84766+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
84767+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
84768+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
84769+
84770+#ifdef CONFIG_PAX_SOFTMODE
84771+extern int pax_softmode;
84772+#endif
84773+
84774+extern int pax_check_flags(unsigned long *);
84775+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
84776+
84777+/* if tsk != current then task_lock must be held on it */
84778+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84779+static inline unsigned long pax_get_flags(struct task_struct *tsk)
84780+{
84781+ if (likely(tsk->mm))
84782+ return tsk->mm->pax_flags;
84783+ else
84784+ return 0UL;
84785+}
84786+
84787+/* if tsk != current then task_lock must be held on it */
84788+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
84789+{
84790+ if (likely(tsk->mm)) {
84791+ tsk->mm->pax_flags = flags;
84792+ return 0;
84793+ }
84794+ return -EINVAL;
84795+}
84796+#endif
84797+
84798+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84799+extern void pax_set_initial_flags(struct linux_binprm *bprm);
84800+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
84801+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
84802+#endif
84803+
84804+struct path;
84805+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
84806+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
84807+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
84808+extern void pax_report_refcount_overflow(struct pt_regs *regs);
84809
84810 /* Future-safe accessor for struct task_struct's cpus_allowed. */
84811 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
84812@@ -1783,7 +1897,7 @@ struct pid_namespace;
84813 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
84814 struct pid_namespace *ns);
84815
84816-static inline pid_t task_pid_nr(struct task_struct *tsk)
84817+static inline pid_t task_pid_nr(const struct task_struct *tsk)
84818 {
84819 return tsk->pid;
84820 }
84821@@ -2150,6 +2264,25 @@ extern u64 sched_clock_cpu(int cpu);
84822
84823 extern void sched_clock_init(void);
84824
84825+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84826+static inline void populate_stack(void)
84827+{
84828+ struct task_struct *curtask = current;
84829+ int c;
84830+ int *ptr = curtask->stack;
84831+ int *end = curtask->stack + THREAD_SIZE;
84832+
84833+ while (ptr < end) {
84834+ c = *(volatile int *)ptr;
84835+ ptr += PAGE_SIZE/sizeof(int);
84836+ }
84837+}
84838+#else
84839+static inline void populate_stack(void)
84840+{
84841+}
84842+#endif
84843+
84844 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
84845 static inline void sched_clock_tick(void)
84846 {
84847@@ -2283,7 +2416,9 @@ void yield(void);
84848 extern struct exec_domain default_exec_domain;
84849
84850 union thread_union {
84851+#ifndef CONFIG_X86
84852 struct thread_info thread_info;
84853+#endif
84854 unsigned long stack[THREAD_SIZE/sizeof(long)];
84855 };
84856
84857@@ -2316,6 +2451,7 @@ extern struct pid_namespace init_pid_ns;
84858 */
84859
84860 extern struct task_struct *find_task_by_vpid(pid_t nr);
84861+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
84862 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
84863 struct pid_namespace *ns);
84864
84865@@ -2480,7 +2616,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
84866 extern void exit_itimers(struct signal_struct *);
84867 extern void flush_itimer_signals(void);
84868
84869-extern void do_group_exit(int);
84870+extern __noreturn void do_group_exit(int);
84871
84872 extern int do_execve(struct filename *,
84873 const char __user * const __user *,
84874@@ -2701,9 +2837,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
84875 #define task_stack_end_corrupted(task) \
84876 (*(end_of_stack(task)) != STACK_END_MAGIC)
84877
84878-static inline int object_is_on_stack(void *obj)
84879+static inline int object_starts_on_stack(const void *obj)
84880 {
84881- void *stack = task_stack_page(current);
84882+ const void *stack = task_stack_page(current);
84883
84884 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
84885 }
84886diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
84887index 596a0e0..bea77ec 100644
84888--- a/include/linux/sched/sysctl.h
84889+++ b/include/linux/sched/sysctl.h
84890@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
84891 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
84892
84893 extern int sysctl_max_map_count;
84894+extern unsigned long sysctl_heap_stack_gap;
84895
84896 extern unsigned int sysctl_sched_latency;
84897 extern unsigned int sysctl_sched_min_granularity;
84898diff --git a/include/linux/security.h b/include/linux/security.h
84899index ba96471..74fb3f6 100644
84900--- a/include/linux/security.h
84901+++ b/include/linux/security.h
84902@@ -27,6 +27,7 @@
84903 #include <linux/slab.h>
84904 #include <linux/err.h>
84905 #include <linux/string.h>
84906+#include <linux/grsecurity.h>
84907
84908 struct linux_binprm;
84909 struct cred;
84910@@ -116,8 +117,6 @@ struct seq_file;
84911
84912 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
84913
84914-void reset_security_ops(void);
84915-
84916 #ifdef CONFIG_MMU
84917 extern unsigned long mmap_min_addr;
84918 extern unsigned long dac_mmap_min_addr;
84919@@ -1729,7 +1728,7 @@ struct security_operations {
84920 struct audit_context *actx);
84921 void (*audit_rule_free) (void *lsmrule);
84922 #endif /* CONFIG_AUDIT */
84923-};
84924+} __randomize_layout;
84925
84926 /* prototypes */
84927 extern int security_init(void);
84928diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
84929index dc368b8..e895209 100644
84930--- a/include/linux/semaphore.h
84931+++ b/include/linux/semaphore.h
84932@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
84933 }
84934
84935 extern void down(struct semaphore *sem);
84936-extern int __must_check down_interruptible(struct semaphore *sem);
84937+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
84938 extern int __must_check down_killable(struct semaphore *sem);
84939 extern int __must_check down_trylock(struct semaphore *sem);
84940 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
84941diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
84942index cf6a9da..bd86b1f 100644
84943--- a/include/linux/seq_file.h
84944+++ b/include/linux/seq_file.h
84945@@ -27,6 +27,9 @@ struct seq_file {
84946 struct mutex lock;
84947 const struct seq_operations *op;
84948 int poll_event;
84949+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84950+ u64 exec_id;
84951+#endif
84952 #ifdef CONFIG_USER_NS
84953 struct user_namespace *user_ns;
84954 #endif
84955@@ -39,6 +42,7 @@ struct seq_operations {
84956 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
84957 int (*show) (struct seq_file *m, void *v);
84958 };
84959+typedef struct seq_operations __no_const seq_operations_no_const;
84960
84961 #define SEQ_SKIP 1
84962
84963@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
84964
84965 char *mangle_path(char *s, const char *p, const char *esc);
84966 int seq_open(struct file *, const struct seq_operations *);
84967+int seq_open_restrict(struct file *, const struct seq_operations *);
84968 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
84969 loff_t seq_lseek(struct file *, loff_t, int);
84970 int seq_release(struct inode *, struct file *);
84971@@ -153,6 +158,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
84972 }
84973
84974 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
84975+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
84976 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
84977 int single_release(struct inode *, struct file *);
84978 void *__seq_open_private(struct file *, const struct seq_operations *, int);
84979diff --git a/include/linux/shm.h b/include/linux/shm.h
84980index 6fb8016..ab4465e 100644
84981--- a/include/linux/shm.h
84982+++ b/include/linux/shm.h
84983@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
84984 /* The task created the shm object. NULL if the task is dead. */
84985 struct task_struct *shm_creator;
84986 struct list_head shm_clist; /* list by creator */
84987+#ifdef CONFIG_GRKERNSEC
84988+ u64 shm_createtime;
84989+ pid_t shm_lapid;
84990+#endif
84991 };
84992
84993 /* shm_mode upper byte flags */
84994diff --git a/include/linux/signal.h b/include/linux/signal.h
84995index ab1e039..ad4229e 100644
84996--- a/include/linux/signal.h
84997+++ b/include/linux/signal.h
84998@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
84999 * know it'll be handled, so that they don't get converted to
85000 * SIGKILL or just silently dropped.
85001 */
85002- kernel_sigaction(sig, (__force __sighandler_t)2);
85003+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85004 }
85005
85006 static inline void disallow_signal(int sig)
85007diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85008index 85ab7d7..eb1585a 100644
85009--- a/include/linux/skbuff.h
85010+++ b/include/linux/skbuff.h
85011@@ -763,7 +763,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85012 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85013 int node);
85014 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85015-static inline struct sk_buff *alloc_skb(unsigned int size,
85016+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85017 gfp_t priority)
85018 {
85019 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85020@@ -1952,7 +1952,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85021 return skb->inner_transport_header - skb->inner_network_header;
85022 }
85023
85024-static inline int skb_network_offset(const struct sk_buff *skb)
85025+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85026 {
85027 return skb_network_header(skb) - skb->data;
85028 }
85029@@ -2012,7 +2012,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
85030 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85031 */
85032 #ifndef NET_SKB_PAD
85033-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85034+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85035 #endif
85036
85037 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85038@@ -2655,9 +2655,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85039 int *err);
85040 unsigned int datagram_poll(struct file *file, struct socket *sock,
85041 struct poll_table_struct *wait);
85042-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85043+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85044 struct iov_iter *to, int size);
85045-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85046+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85047 struct msghdr *msg, int size)
85048 {
85049 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
85050@@ -3131,6 +3131,9 @@ static inline void nf_reset(struct sk_buff *skb)
85051 nf_bridge_put(skb->nf_bridge);
85052 skb->nf_bridge = NULL;
85053 #endif
85054+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85055+ skb->nf_trace = 0;
85056+#endif
85057 }
85058
85059 static inline void nf_reset_trace(struct sk_buff *skb)
85060diff --git a/include/linux/slab.h b/include/linux/slab.h
85061index 9a139b6..aab37b4 100644
85062--- a/include/linux/slab.h
85063+++ b/include/linux/slab.h
85064@@ -14,15 +14,29 @@
85065 #include <linux/gfp.h>
85066 #include <linux/types.h>
85067 #include <linux/workqueue.h>
85068-
85069+#include <linux/err.h>
85070
85071 /*
85072 * Flags to pass to kmem_cache_create().
85073 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85074 */
85075 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85076+
85077+#ifdef CONFIG_PAX_USERCOPY_SLABS
85078+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85079+#else
85080+#define SLAB_USERCOPY 0x00000000UL
85081+#endif
85082+
85083 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85084 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85085+
85086+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85087+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85088+#else
85089+#define SLAB_NO_SANITIZE 0x00000000UL
85090+#endif
85091+
85092 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85093 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85094 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85095@@ -98,10 +112,13 @@
85096 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85097 * Both make kfree a no-op.
85098 */
85099-#define ZERO_SIZE_PTR ((void *)16)
85100+#define ZERO_SIZE_PTR \
85101+({ \
85102+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85103+ (void *)(-MAX_ERRNO-1L); \
85104+})
85105
85106-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85107- (unsigned long)ZERO_SIZE_PTR)
85108+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85109
85110 #include <linux/kmemleak.h>
85111
85112@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85113 void kfree(const void *);
85114 void kzfree(const void *);
85115 size_t ksize(const void *);
85116+const char *check_heap_object(const void *ptr, unsigned long n);
85117+bool is_usercopy_object(const void *ptr);
85118
85119 /*
85120 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85121@@ -236,6 +255,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85122 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85123 #endif
85124
85125+#ifdef CONFIG_PAX_USERCOPY_SLABS
85126+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85127+#endif
85128+
85129 /*
85130 * Figure out which kmalloc slab an allocation of a certain size
85131 * belongs to.
85132@@ -244,7 +267,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85133 * 2 = 120 .. 192 bytes
85134 * n = 2^(n-1) .. 2^n -1
85135 */
85136-static __always_inline int kmalloc_index(size_t size)
85137+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85138 {
85139 if (!size)
85140 return 0;
85141@@ -287,14 +310,14 @@ static __always_inline int kmalloc_index(size_t size)
85142 }
85143 #endif /* !CONFIG_SLOB */
85144
85145-void *__kmalloc(size_t size, gfp_t flags);
85146+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
85147 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85148
85149 #ifdef CONFIG_NUMA
85150-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85151+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
85152 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85153 #else
85154-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85155+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
85156 {
85157 return __kmalloc(size, flags);
85158 }
85159diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85160index b869d16..1453c73 100644
85161--- a/include/linux/slab_def.h
85162+++ b/include/linux/slab_def.h
85163@@ -40,7 +40,7 @@ struct kmem_cache {
85164 /* 4) cache creation/removal */
85165 const char *name;
85166 struct list_head list;
85167- int refcount;
85168+ atomic_t refcount;
85169 int object_size;
85170 int align;
85171
85172@@ -56,10 +56,14 @@ struct kmem_cache {
85173 unsigned long node_allocs;
85174 unsigned long node_frees;
85175 unsigned long node_overflow;
85176- atomic_t allochit;
85177- atomic_t allocmiss;
85178- atomic_t freehit;
85179- atomic_t freemiss;
85180+ atomic_unchecked_t allochit;
85181+ atomic_unchecked_t allocmiss;
85182+ atomic_unchecked_t freehit;
85183+ atomic_unchecked_t freemiss;
85184+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85185+ atomic_unchecked_t sanitized;
85186+ atomic_unchecked_t not_sanitized;
85187+#endif
85188
85189 /*
85190 * If debugging is enabled, then the allocator can add additional
85191diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85192index d82abd4..408c3a0 100644
85193--- a/include/linux/slub_def.h
85194+++ b/include/linux/slub_def.h
85195@@ -74,7 +74,7 @@ struct kmem_cache {
85196 struct kmem_cache_order_objects max;
85197 struct kmem_cache_order_objects min;
85198 gfp_t allocflags; /* gfp flags to use on each alloc */
85199- int refcount; /* Refcount for slab cache destroy */
85200+ atomic_t refcount; /* Refcount for slab cache destroy */
85201 void (*ctor)(void *);
85202 int inuse; /* Offset to metadata */
85203 int align; /* Alignment */
85204diff --git a/include/linux/smp.h b/include/linux/smp.h
85205index 93dff5f..933c561 100644
85206--- a/include/linux/smp.h
85207+++ b/include/linux/smp.h
85208@@ -176,7 +176,9 @@ static inline void wake_up_all_idle_cpus(void) { }
85209 #endif
85210
85211 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85212+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85213 #define put_cpu() preempt_enable()
85214+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85215
85216 /*
85217 * Callback to arch code if there's nosmp or maxcpus=0 on the
85218diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85219index 46cca4c..3323536 100644
85220--- a/include/linux/sock_diag.h
85221+++ b/include/linux/sock_diag.h
85222@@ -11,7 +11,7 @@ struct sock;
85223 struct sock_diag_handler {
85224 __u8 family;
85225 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85226-};
85227+} __do_const;
85228
85229 int sock_diag_register(const struct sock_diag_handler *h);
85230 void sock_diag_unregister(const struct sock_diag_handler *h);
85231diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85232index 680f9a3..f13aeb0 100644
85233--- a/include/linux/sonet.h
85234+++ b/include/linux/sonet.h
85235@@ -7,7 +7,7 @@
85236 #include <uapi/linux/sonet.h>
85237
85238 struct k_sonet_stats {
85239-#define __HANDLE_ITEM(i) atomic_t i
85240+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85241 __SONET_ITEMS
85242 #undef __HANDLE_ITEM
85243 };
85244diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85245index 07d8e53..dc934c9 100644
85246--- a/include/linux/sunrpc/addr.h
85247+++ b/include/linux/sunrpc/addr.h
85248@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85249 {
85250 switch (sap->sa_family) {
85251 case AF_INET:
85252- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85253+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85254 case AF_INET6:
85255- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85256+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85257 }
85258 return 0;
85259 }
85260@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85261 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85262 const struct sockaddr *src)
85263 {
85264- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85265+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85266 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85267
85268 dsin->sin_family = ssin->sin_family;
85269@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85270 if (sa->sa_family != AF_INET6)
85271 return 0;
85272
85273- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85274+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85275 }
85276
85277 #endif /* _LINUX_SUNRPC_ADDR_H */
85278diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85279index 598ba80..d90cba6 100644
85280--- a/include/linux/sunrpc/clnt.h
85281+++ b/include/linux/sunrpc/clnt.h
85282@@ -100,7 +100,7 @@ struct rpc_procinfo {
85283 unsigned int p_timer; /* Which RTT timer to use */
85284 u32 p_statidx; /* Which procedure to account */
85285 const char * p_name; /* name of procedure */
85286-};
85287+} __do_const;
85288
85289 #ifdef __KERNEL__
85290
85291diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85292index 6f22cfe..9fd0909 100644
85293--- a/include/linux/sunrpc/svc.h
85294+++ b/include/linux/sunrpc/svc.h
85295@@ -420,7 +420,7 @@ struct svc_procedure {
85296 unsigned int pc_count; /* call count */
85297 unsigned int pc_cachetype; /* cache info (NFS) */
85298 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85299-};
85300+} __do_const;
85301
85302 /*
85303 * Function prototypes.
85304diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85305index 975da75..318c083 100644
85306--- a/include/linux/sunrpc/svc_rdma.h
85307+++ b/include/linux/sunrpc/svc_rdma.h
85308@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
85309 extern unsigned int svcrdma_max_requests;
85310 extern unsigned int svcrdma_max_req_size;
85311
85312-extern atomic_t rdma_stat_recv;
85313-extern atomic_t rdma_stat_read;
85314-extern atomic_t rdma_stat_write;
85315-extern atomic_t rdma_stat_sq_starve;
85316-extern atomic_t rdma_stat_rq_starve;
85317-extern atomic_t rdma_stat_rq_poll;
85318-extern atomic_t rdma_stat_rq_prod;
85319-extern atomic_t rdma_stat_sq_poll;
85320-extern atomic_t rdma_stat_sq_prod;
85321+extern atomic_unchecked_t rdma_stat_recv;
85322+extern atomic_unchecked_t rdma_stat_read;
85323+extern atomic_unchecked_t rdma_stat_write;
85324+extern atomic_unchecked_t rdma_stat_sq_starve;
85325+extern atomic_unchecked_t rdma_stat_rq_starve;
85326+extern atomic_unchecked_t rdma_stat_rq_poll;
85327+extern atomic_unchecked_t rdma_stat_rq_prod;
85328+extern atomic_unchecked_t rdma_stat_sq_poll;
85329+extern atomic_unchecked_t rdma_stat_sq_prod;
85330
85331 #define RPCRDMA_VERSION 1
85332
85333diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
85334index 8d71d65..f79586e 100644
85335--- a/include/linux/sunrpc/svcauth.h
85336+++ b/include/linux/sunrpc/svcauth.h
85337@@ -120,7 +120,7 @@ struct auth_ops {
85338 int (*release)(struct svc_rqst *rq);
85339 void (*domain_release)(struct auth_domain *);
85340 int (*set_client)(struct svc_rqst *rq);
85341-};
85342+} __do_const;
85343
85344 #define SVC_GARBAGE 1
85345 #define SVC_SYSERR 2
85346diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
85347index e7a018e..49f8b17 100644
85348--- a/include/linux/swiotlb.h
85349+++ b/include/linux/swiotlb.h
85350@@ -60,7 +60,8 @@ extern void
85351
85352 extern void
85353 swiotlb_free_coherent(struct device *hwdev, size_t size,
85354- void *vaddr, dma_addr_t dma_handle);
85355+ void *vaddr, dma_addr_t dma_handle,
85356+ struct dma_attrs *attrs);
85357
85358 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
85359 unsigned long offset, size_t size,
85360diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
85361index 85893d7..4923581 100644
85362--- a/include/linux/syscalls.h
85363+++ b/include/linux/syscalls.h
85364@@ -99,10 +99,16 @@ union bpf_attr;
85365 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
85366
85367 #define __SC_DECL(t, a) t a
85368+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
85369 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
85370 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
85371 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
85372-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
85373+#define __SC_LONG(t, a) __typeof( \
85374+ __builtin_choose_expr( \
85375+ sizeof(t) > sizeof(int), \
85376+ (t) 0, \
85377+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
85378+ )) a
85379 #define __SC_CAST(t, a) (t) a
85380 #define __SC_ARGS(t, a) a
85381 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
85382@@ -384,11 +390,11 @@ asmlinkage long sys_sync(void);
85383 asmlinkage long sys_fsync(unsigned int fd);
85384 asmlinkage long sys_fdatasync(unsigned int fd);
85385 asmlinkage long sys_bdflush(int func, long data);
85386-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
85387- char __user *type, unsigned long flags,
85388+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
85389+ const char __user *type, unsigned long flags,
85390 void __user *data);
85391-asmlinkage long sys_umount(char __user *name, int flags);
85392-asmlinkage long sys_oldumount(char __user *name);
85393+asmlinkage long sys_umount(const char __user *name, int flags);
85394+asmlinkage long sys_oldumount(const char __user *name);
85395 asmlinkage long sys_truncate(const char __user *path, long length);
85396 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
85397 asmlinkage long sys_stat(const char __user *filename,
85398@@ -600,7 +606,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
85399 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
85400 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
85401 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
85402- struct sockaddr __user *, int);
85403+ struct sockaddr __user *, int) __intentional_overflow(0);
85404 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
85405 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
85406 unsigned int vlen, unsigned flags);
85407diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
85408index 27b3b0b..e093dd9 100644
85409--- a/include/linux/syscore_ops.h
85410+++ b/include/linux/syscore_ops.h
85411@@ -16,7 +16,7 @@ struct syscore_ops {
85412 int (*suspend)(void);
85413 void (*resume)(void);
85414 void (*shutdown)(void);
85415-};
85416+} __do_const;
85417
85418 extern void register_syscore_ops(struct syscore_ops *ops);
85419 extern void unregister_syscore_ops(struct syscore_ops *ops);
85420diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
85421index b7361f8..341a15a 100644
85422--- a/include/linux/sysctl.h
85423+++ b/include/linux/sysctl.h
85424@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
85425
85426 extern int proc_dostring(struct ctl_table *, int,
85427 void __user *, size_t *, loff_t *);
85428+extern int proc_dostring_modpriv(struct ctl_table *, int,
85429+ void __user *, size_t *, loff_t *);
85430 extern int proc_dointvec(struct ctl_table *, int,
85431 void __user *, size_t *, loff_t *);
85432 extern int proc_dointvec_minmax(struct ctl_table *, int,
85433@@ -113,7 +115,8 @@ struct ctl_table
85434 struct ctl_table_poll *poll;
85435 void *extra1;
85436 void *extra2;
85437-};
85438+} __do_const __randomize_layout;
85439+typedef struct ctl_table __no_const ctl_table_no_const;
85440
85441 struct ctl_node {
85442 struct rb_node node;
85443diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
85444index ddad161..a3efd26 100644
85445--- a/include/linux/sysfs.h
85446+++ b/include/linux/sysfs.h
85447@@ -34,7 +34,8 @@ struct attribute {
85448 struct lock_class_key *key;
85449 struct lock_class_key skey;
85450 #endif
85451-};
85452+} __do_const;
85453+typedef struct attribute __no_const attribute_no_const;
85454
85455 /**
85456 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
85457@@ -63,7 +64,8 @@ struct attribute_group {
85458 struct attribute *, int);
85459 struct attribute **attrs;
85460 struct bin_attribute **bin_attrs;
85461-};
85462+} __do_const;
85463+typedef struct attribute_group __no_const attribute_group_no_const;
85464
85465 /**
85466 * Use these macros to make defining attributes easier. See include/linux/device.h
85467@@ -137,7 +139,8 @@ struct bin_attribute {
85468 char *, loff_t, size_t);
85469 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
85470 struct vm_area_struct *vma);
85471-};
85472+} __do_const;
85473+typedef struct bin_attribute __no_const bin_attribute_no_const;
85474
85475 /**
85476 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
85477diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
85478index 387fa7d..3fcde6b 100644
85479--- a/include/linux/sysrq.h
85480+++ b/include/linux/sysrq.h
85481@@ -16,6 +16,7 @@
85482
85483 #include <linux/errno.h>
85484 #include <linux/types.h>
85485+#include <linux/compiler.h>
85486
85487 /* Possible values of bitmask for enabling sysrq functions */
85488 /* 0x0001 is reserved for enable everything */
85489@@ -33,7 +34,7 @@ struct sysrq_key_op {
85490 char *help_msg;
85491 char *action_msg;
85492 int enable_mask;
85493-};
85494+} __do_const;
85495
85496 #ifdef CONFIG_MAGIC_SYSRQ
85497
85498diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
85499index ff307b5..f1a4468 100644
85500--- a/include/linux/thread_info.h
85501+++ b/include/linux/thread_info.h
85502@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
85503 #error "no set_restore_sigmask() provided and default one won't work"
85504 #endif
85505
85506+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
85507+
85508+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
85509+{
85510+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
85511+}
85512+
85513 #endif /* __KERNEL__ */
85514
85515 #endif /* _LINUX_THREAD_INFO_H */
85516diff --git a/include/linux/tty.h b/include/linux/tty.h
85517index 7d66ae5..0327149 100644
85518--- a/include/linux/tty.h
85519+++ b/include/linux/tty.h
85520@@ -202,7 +202,7 @@ struct tty_port {
85521 const struct tty_port_operations *ops; /* Port operations */
85522 spinlock_t lock; /* Lock protecting tty field */
85523 int blocked_open; /* Waiting to open */
85524- int count; /* Usage count */
85525+ atomic_t count; /* Usage count */
85526 wait_queue_head_t open_wait; /* Open waiters */
85527 wait_queue_head_t close_wait; /* Close waiters */
85528 wait_queue_head_t delta_msr_wait; /* Modem status change */
85529@@ -290,7 +290,7 @@ struct tty_struct {
85530 /* If the tty has a pending do_SAK, queue it here - akpm */
85531 struct work_struct SAK_work;
85532 struct tty_port *port;
85533-};
85534+} __randomize_layout;
85535
85536 /* Each of a tty's open files has private_data pointing to tty_file_private */
85537 struct tty_file_private {
85538@@ -549,7 +549,7 @@ extern int tty_port_open(struct tty_port *port,
85539 struct tty_struct *tty, struct file *filp);
85540 static inline int tty_port_users(struct tty_port *port)
85541 {
85542- return port->count + port->blocked_open;
85543+ return atomic_read(&port->count) + port->blocked_open;
85544 }
85545
85546 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
85547diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
85548index 92e337c..f46757b 100644
85549--- a/include/linux/tty_driver.h
85550+++ b/include/linux/tty_driver.h
85551@@ -291,7 +291,7 @@ struct tty_operations {
85552 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
85553 #endif
85554 const struct file_operations *proc_fops;
85555-};
85556+} __do_const __randomize_layout;
85557
85558 struct tty_driver {
85559 int magic; /* magic number for this structure */
85560@@ -325,7 +325,7 @@ struct tty_driver {
85561
85562 const struct tty_operations *ops;
85563 struct list_head tty_drivers;
85564-};
85565+} __randomize_layout;
85566
85567 extern struct list_head tty_drivers;
85568
85569diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
85570index 00c9d68..bc0188b 100644
85571--- a/include/linux/tty_ldisc.h
85572+++ b/include/linux/tty_ldisc.h
85573@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
85574
85575 struct module *owner;
85576
85577- int refcount;
85578+ atomic_t refcount;
85579 };
85580
85581 struct tty_ldisc {
85582diff --git a/include/linux/types.h b/include/linux/types.h
85583index a0bb704..f511c77 100644
85584--- a/include/linux/types.h
85585+++ b/include/linux/types.h
85586@@ -177,10 +177,26 @@ typedef struct {
85587 int counter;
85588 } atomic_t;
85589
85590+#ifdef CONFIG_PAX_REFCOUNT
85591+typedef struct {
85592+ int counter;
85593+} atomic_unchecked_t;
85594+#else
85595+typedef atomic_t atomic_unchecked_t;
85596+#endif
85597+
85598 #ifdef CONFIG_64BIT
85599 typedef struct {
85600 long counter;
85601 } atomic64_t;
85602+
85603+#ifdef CONFIG_PAX_REFCOUNT
85604+typedef struct {
85605+ long counter;
85606+} atomic64_unchecked_t;
85607+#else
85608+typedef atomic64_t atomic64_unchecked_t;
85609+#endif
85610 #endif
85611
85612 struct list_head {
85613diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
85614index ecd3319..8a36ded 100644
85615--- a/include/linux/uaccess.h
85616+++ b/include/linux/uaccess.h
85617@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
85618 long ret; \
85619 mm_segment_t old_fs = get_fs(); \
85620 \
85621- set_fs(KERNEL_DS); \
85622 pagefault_disable(); \
85623- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
85624- pagefault_enable(); \
85625+ set_fs(KERNEL_DS); \
85626+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
85627 set_fs(old_fs); \
85628+ pagefault_enable(); \
85629 ret; \
85630 })
85631
85632diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
85633index 2d1f9b6..d7a9fce 100644
85634--- a/include/linux/uidgid.h
85635+++ b/include/linux/uidgid.h
85636@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
85637
85638 #endif /* CONFIG_USER_NS */
85639
85640+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
85641+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
85642+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
85643+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
85644+
85645 #endif /* _LINUX_UIDGID_H */
85646diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
85647index 32c0e83..671eb35 100644
85648--- a/include/linux/uio_driver.h
85649+++ b/include/linux/uio_driver.h
85650@@ -67,7 +67,7 @@ struct uio_device {
85651 struct module *owner;
85652 struct device *dev;
85653 int minor;
85654- atomic_t event;
85655+ atomic_unchecked_t event;
85656 struct fasync_struct *async_queue;
85657 wait_queue_head_t wait;
85658 struct uio_info *info;
85659diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
85660index 99c1b4d..562e6f3 100644
85661--- a/include/linux/unaligned/access_ok.h
85662+++ b/include/linux/unaligned/access_ok.h
85663@@ -4,34 +4,34 @@
85664 #include <linux/kernel.h>
85665 #include <asm/byteorder.h>
85666
85667-static inline u16 get_unaligned_le16(const void *p)
85668+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
85669 {
85670- return le16_to_cpup((__le16 *)p);
85671+ return le16_to_cpup((const __le16 *)p);
85672 }
85673
85674-static inline u32 get_unaligned_le32(const void *p)
85675+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
85676 {
85677- return le32_to_cpup((__le32 *)p);
85678+ return le32_to_cpup((const __le32 *)p);
85679 }
85680
85681-static inline u64 get_unaligned_le64(const void *p)
85682+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
85683 {
85684- return le64_to_cpup((__le64 *)p);
85685+ return le64_to_cpup((const __le64 *)p);
85686 }
85687
85688-static inline u16 get_unaligned_be16(const void *p)
85689+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
85690 {
85691- return be16_to_cpup((__be16 *)p);
85692+ return be16_to_cpup((const __be16 *)p);
85693 }
85694
85695-static inline u32 get_unaligned_be32(const void *p)
85696+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
85697 {
85698- return be32_to_cpup((__be32 *)p);
85699+ return be32_to_cpup((const __be32 *)p);
85700 }
85701
85702-static inline u64 get_unaligned_be64(const void *p)
85703+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
85704 {
85705- return be64_to_cpup((__be64 *)p);
85706+ return be64_to_cpup((const __be64 *)p);
85707 }
85708
85709 static inline void put_unaligned_le16(u16 val, void *p)
85710diff --git a/include/linux/usb.h b/include/linux/usb.h
85711index 058a769..c17a1c2c 100644
85712--- a/include/linux/usb.h
85713+++ b/include/linux/usb.h
85714@@ -566,7 +566,7 @@ struct usb_device {
85715 int maxchild;
85716
85717 u32 quirks;
85718- atomic_t urbnum;
85719+ atomic_unchecked_t urbnum;
85720
85721 unsigned long active_duration;
85722
85723@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
85724
85725 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
85726 __u8 request, __u8 requesttype, __u16 value, __u16 index,
85727- void *data, __u16 size, int timeout);
85728+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
85729 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
85730 void *data, int len, int *actual_length, int timeout);
85731 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
85732diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
85733index 9fd9e48..e2c5f35 100644
85734--- a/include/linux/usb/renesas_usbhs.h
85735+++ b/include/linux/usb/renesas_usbhs.h
85736@@ -39,7 +39,7 @@ enum {
85737 */
85738 struct renesas_usbhs_driver_callback {
85739 int (*notify_hotplug)(struct platform_device *pdev);
85740-};
85741+} __no_const;
85742
85743 /*
85744 * callback functions for platform
85745diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
85746index 8297e5b..0dfae27 100644
85747--- a/include/linux/user_namespace.h
85748+++ b/include/linux/user_namespace.h
85749@@ -39,7 +39,7 @@ struct user_namespace {
85750 struct key *persistent_keyring_register;
85751 struct rw_semaphore persistent_keyring_register_sem;
85752 #endif
85753-};
85754+} __randomize_layout;
85755
85756 extern struct user_namespace init_user_ns;
85757
85758diff --git a/include/linux/utsname.h b/include/linux/utsname.h
85759index 5093f58..c103e58 100644
85760--- a/include/linux/utsname.h
85761+++ b/include/linux/utsname.h
85762@@ -25,7 +25,7 @@ struct uts_namespace {
85763 struct new_utsname name;
85764 struct user_namespace *user_ns;
85765 struct ns_common ns;
85766-};
85767+} __randomize_layout;
85768 extern struct uts_namespace init_uts_ns;
85769
85770 #ifdef CONFIG_UTS_NS
85771diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
85772index 6f8fbcf..4efc177 100644
85773--- a/include/linux/vermagic.h
85774+++ b/include/linux/vermagic.h
85775@@ -25,9 +25,42 @@
85776 #define MODULE_ARCH_VERMAGIC ""
85777 #endif
85778
85779+#ifdef CONFIG_PAX_REFCOUNT
85780+#define MODULE_PAX_REFCOUNT "REFCOUNT "
85781+#else
85782+#define MODULE_PAX_REFCOUNT ""
85783+#endif
85784+
85785+#ifdef CONSTIFY_PLUGIN
85786+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
85787+#else
85788+#define MODULE_CONSTIFY_PLUGIN ""
85789+#endif
85790+
85791+#ifdef STACKLEAK_PLUGIN
85792+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
85793+#else
85794+#define MODULE_STACKLEAK_PLUGIN ""
85795+#endif
85796+
85797+#ifdef RANDSTRUCT_PLUGIN
85798+#include <generated/randomize_layout_hash.h>
85799+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
85800+#else
85801+#define MODULE_RANDSTRUCT_PLUGIN
85802+#endif
85803+
85804+#ifdef CONFIG_GRKERNSEC
85805+#define MODULE_GRSEC "GRSEC "
85806+#else
85807+#define MODULE_GRSEC ""
85808+#endif
85809+
85810 #define VERMAGIC_STRING \
85811 UTS_RELEASE " " \
85812 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
85813 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
85814- MODULE_ARCH_VERMAGIC
85815+ MODULE_ARCH_VERMAGIC \
85816+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
85817+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
85818
85819diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
85820index b483abd..af305ad 100644
85821--- a/include/linux/vga_switcheroo.h
85822+++ b/include/linux/vga_switcheroo.h
85823@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
85824
85825 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
85826
85827-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
85828+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
85829 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
85830-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
85831+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
85832 #else
85833
85834 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
85835@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
85836
85837 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
85838
85839-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85840+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85841 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
85842-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85843+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85844
85845 #endif
85846 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
85847diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
85848index b87696f..1d11de7 100644
85849--- a/include/linux/vmalloc.h
85850+++ b/include/linux/vmalloc.h
85851@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
85852 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
85853 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
85854 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
85855+
85856+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85857+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
85858+#endif
85859+
85860 /* bits [20..32] reserved for arch specific ioremap internals */
85861
85862 /*
85863@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
85864 unsigned long flags, pgprot_t prot);
85865 extern void vunmap(const void *addr);
85866
85867+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85868+extern void unmap_process_stacks(struct task_struct *task);
85869+#endif
85870+
85871 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
85872 unsigned long uaddr, void *kaddr,
85873 unsigned long size);
85874@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
85875
85876 /* for /dev/kmem */
85877 extern long vread(char *buf, char *addr, unsigned long count);
85878-extern long vwrite(char *buf, char *addr, unsigned long count);
85879+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
85880
85881 /*
85882 * Internals. Dont't use..
85883diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
85884index 82e7db7..f8ce3d0 100644
85885--- a/include/linux/vmstat.h
85886+++ b/include/linux/vmstat.h
85887@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
85888 /*
85889 * Zone based page accounting with per cpu differentials.
85890 */
85891-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85892+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85893
85894 static inline void zone_page_state_add(long x, struct zone *zone,
85895 enum zone_stat_item item)
85896 {
85897- atomic_long_add(x, &zone->vm_stat[item]);
85898- atomic_long_add(x, &vm_stat[item]);
85899+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
85900+ atomic_long_add_unchecked(x, &vm_stat[item]);
85901 }
85902
85903-static inline unsigned long global_page_state(enum zone_stat_item item)
85904+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
85905 {
85906- long x = atomic_long_read(&vm_stat[item]);
85907+ long x = atomic_long_read_unchecked(&vm_stat[item]);
85908 #ifdef CONFIG_SMP
85909 if (x < 0)
85910 x = 0;
85911@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
85912 return x;
85913 }
85914
85915-static inline unsigned long zone_page_state(struct zone *zone,
85916+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
85917 enum zone_stat_item item)
85918 {
85919- long x = atomic_long_read(&zone->vm_stat[item]);
85920+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
85921 #ifdef CONFIG_SMP
85922 if (x < 0)
85923 x = 0;
85924@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
85925 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
85926 enum zone_stat_item item)
85927 {
85928- long x = atomic_long_read(&zone->vm_stat[item]);
85929+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
85930
85931 #ifdef CONFIG_SMP
85932 int cpu;
85933@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
85934
85935 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
85936 {
85937- atomic_long_inc(&zone->vm_stat[item]);
85938- atomic_long_inc(&vm_stat[item]);
85939+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
85940+ atomic_long_inc_unchecked(&vm_stat[item]);
85941 }
85942
85943 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
85944 {
85945- atomic_long_dec(&zone->vm_stat[item]);
85946- atomic_long_dec(&vm_stat[item]);
85947+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
85948+ atomic_long_dec_unchecked(&vm_stat[item]);
85949 }
85950
85951 static inline void __inc_zone_page_state(struct page *page,
85952diff --git a/include/linux/xattr.h b/include/linux/xattr.h
85953index 91b0a68..0e9adf6 100644
85954--- a/include/linux/xattr.h
85955+++ b/include/linux/xattr.h
85956@@ -28,7 +28,7 @@ struct xattr_handler {
85957 size_t size, int handler_flags);
85958 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
85959 size_t size, int flags, int handler_flags);
85960-};
85961+} __do_const;
85962
85963 struct xattr {
85964 const char *name;
85965@@ -37,6 +37,9 @@ struct xattr {
85966 };
85967
85968 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
85969+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
85970+ssize_t pax_getxattr(struct dentry *, void *, size_t);
85971+#endif
85972 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
85973 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
85974 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
85975diff --git a/include/linux/zlib.h b/include/linux/zlib.h
85976index 92dbbd3..13ab0b3 100644
85977--- a/include/linux/zlib.h
85978+++ b/include/linux/zlib.h
85979@@ -31,6 +31,7 @@
85980 #define _ZLIB_H
85981
85982 #include <linux/zconf.h>
85983+#include <linux/compiler.h>
85984
85985 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
85986 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
85987@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
85988
85989 /* basic functions */
85990
85991-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
85992+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
85993 /*
85994 Returns the number of bytes that needs to be allocated for a per-
85995 stream workspace with the specified parameters. A pointer to this
85996diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
85997index eb76cfd..9fd0e7c 100644
85998--- a/include/media/v4l2-dev.h
85999+++ b/include/media/v4l2-dev.h
86000@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86001 int (*mmap) (struct file *, struct vm_area_struct *);
86002 int (*open) (struct file *);
86003 int (*release) (struct file *);
86004-};
86005+} __do_const;
86006
86007 /*
86008 * Newer version of video_device, handled by videodev2.c
86009diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86010index ffb69da..040393e 100644
86011--- a/include/media/v4l2-device.h
86012+++ b/include/media/v4l2-device.h
86013@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86014 this function returns 0. If the name ends with a digit (e.g. cx18),
86015 then the name will be set to cx18-0 since cx180 looks really odd. */
86016 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86017- atomic_t *instance);
86018+ atomic_unchecked_t *instance);
86019
86020 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86021 Since the parent disappears this ensures that v4l2_dev doesn't have an
86022diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86023index 2a25dec..bf6dd8a 100644
86024--- a/include/net/9p/transport.h
86025+++ b/include/net/9p/transport.h
86026@@ -62,7 +62,7 @@ struct p9_trans_module {
86027 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86028 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86029 char *, char *, int , int, int, int);
86030-};
86031+} __do_const;
86032
86033 void v9fs_register_trans(struct p9_trans_module *m);
86034 void v9fs_unregister_trans(struct p9_trans_module *m);
86035diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86036index a175ba4..196eb8242 100644
86037--- a/include/net/af_unix.h
86038+++ b/include/net/af_unix.h
86039@@ -36,7 +36,7 @@ struct unix_skb_parms {
86040 u32 secid; /* Security ID */
86041 #endif
86042 u32 consumed;
86043-};
86044+} __randomize_layout;
86045
86046 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86047 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86048diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86049index d1bb342..e12f7d2 100644
86050--- a/include/net/bluetooth/l2cap.h
86051+++ b/include/net/bluetooth/l2cap.h
86052@@ -608,7 +608,7 @@ struct l2cap_ops {
86053 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
86054 unsigned long hdr_len,
86055 unsigned long len, int nb);
86056-};
86057+} __do_const;
86058
86059 struct l2cap_conn {
86060 struct hci_conn *hcon;
86061diff --git a/include/net/bonding.h b/include/net/bonding.h
86062index 983a94b..7aa9b16 100644
86063--- a/include/net/bonding.h
86064+++ b/include/net/bonding.h
86065@@ -647,7 +647,7 @@ extern struct rtnl_link_ops bond_link_ops;
86066
86067 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
86068 {
86069- atomic_long_inc(&dev->tx_dropped);
86070+ atomic_long_inc_unchecked(&dev->tx_dropped);
86071 dev_kfree_skb_any(skb);
86072 }
86073
86074diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86075index f2ae33d..c457cf0 100644
86076--- a/include/net/caif/cfctrl.h
86077+++ b/include/net/caif/cfctrl.h
86078@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86079 void (*radioset_rsp)(void);
86080 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86081 struct cflayer *client_layer);
86082-};
86083+} __no_const;
86084
86085 /* Link Setup Parameters for CAIF-Links. */
86086 struct cfctrl_link_param {
86087@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86088 struct cfctrl {
86089 struct cfsrvl serv;
86090 struct cfctrl_rsp res;
86091- atomic_t req_seq_no;
86092- atomic_t rsp_seq_no;
86093+ atomic_unchecked_t req_seq_no;
86094+ atomic_unchecked_t rsp_seq_no;
86095 struct list_head list;
86096 /* Protects from simultaneous access to first_req list */
86097 spinlock_t info_list_lock;
86098diff --git a/include/net/flow.h b/include/net/flow.h
86099index 8109a15..504466d 100644
86100--- a/include/net/flow.h
86101+++ b/include/net/flow.h
86102@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86103
86104 void flow_cache_flush(struct net *net);
86105 void flow_cache_flush_deferred(struct net *net);
86106-extern atomic_t flow_cache_genid;
86107+extern atomic_unchecked_t flow_cache_genid;
86108
86109 #endif
86110diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86111index 6c92415..3a352d8 100644
86112--- a/include/net/genetlink.h
86113+++ b/include/net/genetlink.h
86114@@ -130,7 +130,7 @@ struct genl_ops {
86115 u8 cmd;
86116 u8 internal_flags;
86117 u8 flags;
86118-};
86119+} __do_const;
86120
86121 int __genl_register_family(struct genl_family *family);
86122
86123diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86124index 734d9b5..48a9a4b 100644
86125--- a/include/net/gro_cells.h
86126+++ b/include/net/gro_cells.h
86127@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86128 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
86129
86130 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86131- atomic_long_inc(&dev->rx_dropped);
86132+ atomic_long_inc_unchecked(&dev->rx_dropped);
86133 kfree_skb(skb);
86134 return;
86135 }
86136diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86137index 848e85c..051c7de 100644
86138--- a/include/net/inet_connection_sock.h
86139+++ b/include/net/inet_connection_sock.h
86140@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86141 int (*bind_conflict)(const struct sock *sk,
86142 const struct inet_bind_bucket *tb, bool relax);
86143 void (*mtu_reduced)(struct sock *sk);
86144-};
86145+} __do_const;
86146
86147 /** inet_connection_sock - INET connection oriented sock
86148 *
86149diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86150index 80479ab..0c3f647 100644
86151--- a/include/net/inetpeer.h
86152+++ b/include/net/inetpeer.h
86153@@ -47,7 +47,7 @@ struct inet_peer {
86154 */
86155 union {
86156 struct {
86157- atomic_t rid; /* Frag reception counter */
86158+ atomic_unchecked_t rid; /* Frag reception counter */
86159 };
86160 struct rcu_head rcu;
86161 struct inet_peer *gc_next;
86162diff --git a/include/net/ip.h b/include/net/ip.h
86163index 09cf5ae..ab62fcf 100644
86164--- a/include/net/ip.h
86165+++ b/include/net/ip.h
86166@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86167 }
86168 }
86169
86170-u32 ip_idents_reserve(u32 hash, int segs);
86171+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86172 void __ip_select_ident(struct iphdr *iph, int segs);
86173
86174 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86175diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86176index 09a819e..3ab9e14 100644
86177--- a/include/net/ip_fib.h
86178+++ b/include/net/ip_fib.h
86179@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86180
86181 #define FIB_RES_SADDR(net, res) \
86182 ((FIB_RES_NH(res).nh_saddr_genid == \
86183- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86184+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86185 FIB_RES_NH(res).nh_saddr : \
86186 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86187 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86188diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86189index 615b20b..fd4cbd8 100644
86190--- a/include/net/ip_vs.h
86191+++ b/include/net/ip_vs.h
86192@@ -534,7 +534,7 @@ struct ip_vs_conn {
86193 struct ip_vs_conn *control; /* Master control connection */
86194 atomic_t n_control; /* Number of controlled ones */
86195 struct ip_vs_dest *dest; /* real server */
86196- atomic_t in_pkts; /* incoming packet counter */
86197+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86198
86199 /* Packet transmitter for different forwarding methods. If it
86200 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
86201@@ -682,7 +682,7 @@ struct ip_vs_dest {
86202 __be16 port; /* port number of the server */
86203 union nf_inet_addr addr; /* IP address of the server */
86204 volatile unsigned int flags; /* dest status flags */
86205- atomic_t conn_flags; /* flags to copy to conn */
86206+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86207 atomic_t weight; /* server weight */
86208
86209 atomic_t refcnt; /* reference counter */
86210@@ -928,11 +928,11 @@ struct netns_ipvs {
86211 /* ip_vs_lblc */
86212 int sysctl_lblc_expiration;
86213 struct ctl_table_header *lblc_ctl_header;
86214- struct ctl_table *lblc_ctl_table;
86215+ ctl_table_no_const *lblc_ctl_table;
86216 /* ip_vs_lblcr */
86217 int sysctl_lblcr_expiration;
86218 struct ctl_table_header *lblcr_ctl_header;
86219- struct ctl_table *lblcr_ctl_table;
86220+ ctl_table_no_const *lblcr_ctl_table;
86221 /* ip_vs_est */
86222 struct list_head est_list; /* estimator list */
86223 spinlock_t est_lock;
86224diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86225index 8d4f588..2e37ad2 100644
86226--- a/include/net/irda/ircomm_tty.h
86227+++ b/include/net/irda/ircomm_tty.h
86228@@ -33,6 +33,7 @@
86229 #include <linux/termios.h>
86230 #include <linux/timer.h>
86231 #include <linux/tty.h> /* struct tty_struct */
86232+#include <asm/local.h>
86233
86234 #include <net/irda/irias_object.h>
86235 #include <net/irda/ircomm_core.h>
86236diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86237index 714cc9a..ea05f3e 100644
86238--- a/include/net/iucv/af_iucv.h
86239+++ b/include/net/iucv/af_iucv.h
86240@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86241 struct iucv_sock_list {
86242 struct hlist_head head;
86243 rwlock_t lock;
86244- atomic_t autobind_name;
86245+ atomic_unchecked_t autobind_name;
86246 };
86247
86248 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86249diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86250index f3be818..bf46196 100644
86251--- a/include/net/llc_c_ac.h
86252+++ b/include/net/llc_c_ac.h
86253@@ -87,7 +87,7 @@
86254 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86255 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86256
86257-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86258+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86259
86260 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86261 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86262diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86263index 3948cf1..83b28c4 100644
86264--- a/include/net/llc_c_ev.h
86265+++ b/include/net/llc_c_ev.h
86266@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86267 return (struct llc_conn_state_ev *)skb->cb;
86268 }
86269
86270-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86271-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86272+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86273+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86274
86275 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86276 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86277diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86278index 48f3f89..0e92c50 100644
86279--- a/include/net/llc_c_st.h
86280+++ b/include/net/llc_c_st.h
86281@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86282 u8 next_state;
86283 const llc_conn_ev_qfyr_t *ev_qualifiers;
86284 const llc_conn_action_t *ev_actions;
86285-};
86286+} __do_const;
86287
86288 struct llc_conn_state {
86289 u8 current_state;
86290diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86291index a61b98c..aade1eb 100644
86292--- a/include/net/llc_s_ac.h
86293+++ b/include/net/llc_s_ac.h
86294@@ -23,7 +23,7 @@
86295 #define SAP_ACT_TEST_IND 9
86296
86297 /* All action functions must look like this */
86298-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86299+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86300
86301 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86302 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86303diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86304index c4359e2..76dbc4a 100644
86305--- a/include/net/llc_s_st.h
86306+++ b/include/net/llc_s_st.h
86307@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86308 llc_sap_ev_t ev;
86309 u8 next_state;
86310 const llc_sap_action_t *ev_actions;
86311-};
86312+} __do_const;
86313
86314 struct llc_sap_state {
86315 u8 curr_state;
86316diff --git a/include/net/mac80211.h b/include/net/mac80211.h
86317index 29c7be8..746bd73 100644
86318--- a/include/net/mac80211.h
86319+++ b/include/net/mac80211.h
86320@@ -4869,7 +4869,7 @@ struct rate_control_ops {
86321 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
86322
86323 u32 (*get_expected_throughput)(void *priv_sta);
86324-};
86325+} __do_const;
86326
86327 static inline int rate_supported(struct ieee80211_sta *sta,
86328 enum ieee80211_band band,
86329diff --git a/include/net/neighbour.h b/include/net/neighbour.h
86330index 76f7084..8f36e39 100644
86331--- a/include/net/neighbour.h
86332+++ b/include/net/neighbour.h
86333@@ -163,7 +163,7 @@ struct neigh_ops {
86334 void (*error_report)(struct neighbour *, struct sk_buff *);
86335 int (*output)(struct neighbour *, struct sk_buff *);
86336 int (*connected_output)(struct neighbour *, struct sk_buff *);
86337-};
86338+} __do_const;
86339
86340 struct pneigh_entry {
86341 struct pneigh_entry *next;
86342@@ -217,7 +217,7 @@ struct neigh_table {
86343 struct neigh_statistics __percpu *stats;
86344 struct neigh_hash_table __rcu *nht;
86345 struct pneigh_entry **phash_buckets;
86346-};
86347+} __randomize_layout;
86348
86349 enum {
86350 NEIGH_ARP_TABLE = 0,
86351diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
86352index 2e8756b8..0bd0083 100644
86353--- a/include/net/net_namespace.h
86354+++ b/include/net/net_namespace.h
86355@@ -130,8 +130,8 @@ struct net {
86356 struct netns_ipvs *ipvs;
86357 #endif
86358 struct sock *diag_nlsk;
86359- atomic_t fnhe_genid;
86360-};
86361+ atomic_unchecked_t fnhe_genid;
86362+} __randomize_layout;
86363
86364 #include <linux/seq_file_net.h>
86365
86366@@ -287,7 +287,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
86367 #define __net_init __init
86368 #define __net_exit __exit_refok
86369 #define __net_initdata __initdata
86370+#ifdef CONSTIFY_PLUGIN
86371 #define __net_initconst __initconst
86372+#else
86373+#define __net_initconst __initdata
86374+#endif
86375 #endif
86376
86377 struct pernet_operations {
86378@@ -297,7 +301,7 @@ struct pernet_operations {
86379 void (*exit_batch)(struct list_head *net_exit_list);
86380 int *id;
86381 size_t size;
86382-};
86383+} __do_const;
86384
86385 /*
86386 * Use these carefully. If you implement a network device and it
86387@@ -345,12 +349,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
86388
86389 static inline int rt_genid_ipv4(struct net *net)
86390 {
86391- return atomic_read(&net->ipv4.rt_genid);
86392+ return atomic_read_unchecked(&net->ipv4.rt_genid);
86393 }
86394
86395 static inline void rt_genid_bump_ipv4(struct net *net)
86396 {
86397- atomic_inc(&net->ipv4.rt_genid);
86398+ atomic_inc_unchecked(&net->ipv4.rt_genid);
86399 }
86400
86401 extern void (*__fib6_flush_trees)(struct net *net);
86402@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
86403
86404 static inline int fnhe_genid(struct net *net)
86405 {
86406- return atomic_read(&net->fnhe_genid);
86407+ return atomic_read_unchecked(&net->fnhe_genid);
86408 }
86409
86410 static inline void fnhe_genid_bump(struct net *net)
86411 {
86412- atomic_inc(&net->fnhe_genid);
86413+ atomic_inc_unchecked(&net->fnhe_genid);
86414 }
86415
86416 #endif /* __NET_NET_NAMESPACE_H */
86417diff --git a/include/net/netlink.h b/include/net/netlink.h
86418index 6415835..ab96d87 100644
86419--- a/include/net/netlink.h
86420+++ b/include/net/netlink.h
86421@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
86422 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
86423 {
86424 if (mark)
86425- skb_trim(skb, (unsigned char *) mark - skb->data);
86426+ skb_trim(skb, (const unsigned char *) mark - skb->data);
86427 }
86428
86429 /**
86430diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
86431index 29d6a94..235d3d84 100644
86432--- a/include/net/netns/conntrack.h
86433+++ b/include/net/netns/conntrack.h
86434@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
86435 struct nf_proto_net {
86436 #ifdef CONFIG_SYSCTL
86437 struct ctl_table_header *ctl_table_header;
86438- struct ctl_table *ctl_table;
86439+ ctl_table_no_const *ctl_table;
86440 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
86441 struct ctl_table_header *ctl_compat_header;
86442- struct ctl_table *ctl_compat_table;
86443+ ctl_table_no_const *ctl_compat_table;
86444 #endif
86445 #endif
86446 unsigned int users;
86447@@ -60,7 +60,7 @@ struct nf_ip_net {
86448 struct nf_icmp_net icmpv6;
86449 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
86450 struct ctl_table_header *ctl_table_header;
86451- struct ctl_table *ctl_table;
86452+ ctl_table_no_const *ctl_table;
86453 #endif
86454 };
86455
86456diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
86457index 0ffef1a..2ce1ceb 100644
86458--- a/include/net/netns/ipv4.h
86459+++ b/include/net/netns/ipv4.h
86460@@ -84,7 +84,7 @@ struct netns_ipv4 {
86461
86462 struct ping_group_range ping_group_range;
86463
86464- atomic_t dev_addr_genid;
86465+ atomic_unchecked_t dev_addr_genid;
86466
86467 #ifdef CONFIG_SYSCTL
86468 unsigned long *sysctl_local_reserved_ports;
86469@@ -98,6 +98,6 @@ struct netns_ipv4 {
86470 struct fib_rules_ops *mr_rules_ops;
86471 #endif
86472 #endif
86473- atomic_t rt_genid;
86474+ atomic_unchecked_t rt_genid;
86475 };
86476 #endif
86477diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
86478index 69ae41f..4f94868 100644
86479--- a/include/net/netns/ipv6.h
86480+++ b/include/net/netns/ipv6.h
86481@@ -75,8 +75,8 @@ struct netns_ipv6 {
86482 struct fib_rules_ops *mr6_rules_ops;
86483 #endif
86484 #endif
86485- atomic_t dev_addr_genid;
86486- atomic_t fib6_sernum;
86487+ atomic_unchecked_t dev_addr_genid;
86488+ atomic_unchecked_t fib6_sernum;
86489 };
86490
86491 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
86492diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
86493index 730d82a..045f2c4 100644
86494--- a/include/net/netns/xfrm.h
86495+++ b/include/net/netns/xfrm.h
86496@@ -78,7 +78,7 @@ struct netns_xfrm {
86497
86498 /* flow cache part */
86499 struct flow_cache flow_cache_global;
86500- atomic_t flow_cache_genid;
86501+ atomic_unchecked_t flow_cache_genid;
86502 struct list_head flow_cache_gc_list;
86503 spinlock_t flow_cache_gc_lock;
86504 struct work_struct flow_cache_gc_work;
86505diff --git a/include/net/ping.h b/include/net/ping.h
86506index f074060..830fba0 100644
86507--- a/include/net/ping.h
86508+++ b/include/net/ping.h
86509@@ -54,7 +54,7 @@ struct ping_iter_state {
86510
86511 extern struct proto ping_prot;
86512 #if IS_ENABLED(CONFIG_IPV6)
86513-extern struct pingv6_ops pingv6_ops;
86514+extern struct pingv6_ops *pingv6_ops;
86515 #endif
86516
86517 struct pingfakehdr {
86518diff --git a/include/net/protocol.h b/include/net/protocol.h
86519index d6fcc1f..ca277058 100644
86520--- a/include/net/protocol.h
86521+++ b/include/net/protocol.h
86522@@ -49,7 +49,7 @@ struct net_protocol {
86523 * socket lookup?
86524 */
86525 icmp_strict_tag_validation:1;
86526-};
86527+} __do_const;
86528
86529 #if IS_ENABLED(CONFIG_IPV6)
86530 struct inet6_protocol {
86531@@ -62,7 +62,7 @@ struct inet6_protocol {
86532 u8 type, u8 code, int offset,
86533 __be32 info);
86534 unsigned int flags; /* INET6_PROTO_xxx */
86535-};
86536+} __do_const;
86537
86538 #define INET6_PROTO_NOPOLICY 0x1
86539 #define INET6_PROTO_FINAL 0x2
86540diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
86541index e21b9f9..0191ef0 100644
86542--- a/include/net/rtnetlink.h
86543+++ b/include/net/rtnetlink.h
86544@@ -93,7 +93,7 @@ struct rtnl_link_ops {
86545 int (*fill_slave_info)(struct sk_buff *skb,
86546 const struct net_device *dev,
86547 const struct net_device *slave_dev);
86548-};
86549+} __do_const;
86550
86551 int __rtnl_link_register(struct rtnl_link_ops *ops);
86552 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
86553diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
86554index 4a5b9a3..ca27d73 100644
86555--- a/include/net/sctp/checksum.h
86556+++ b/include/net/sctp/checksum.h
86557@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
86558 unsigned int offset)
86559 {
86560 struct sctphdr *sh = sctp_hdr(skb);
86561- __le32 ret, old = sh->checksum;
86562- const struct skb_checksum_ops ops = {
86563+ __le32 ret, old = sh->checksum;
86564+ static const struct skb_checksum_ops ops = {
86565 .update = sctp_csum_update,
86566 .combine = sctp_csum_combine,
86567 };
86568diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
86569index 487ef34..d457f98 100644
86570--- a/include/net/sctp/sm.h
86571+++ b/include/net/sctp/sm.h
86572@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
86573 typedef struct {
86574 sctp_state_fn_t *fn;
86575 const char *name;
86576-} sctp_sm_table_entry_t;
86577+} __do_const sctp_sm_table_entry_t;
86578
86579 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
86580 * currently in use.
86581@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
86582 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
86583
86584 /* Extern declarations for major data structures. */
86585-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86586+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86587
86588
86589 /* Get the size of a DATA chunk payload. */
86590diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
86591index 2bb2fcf..d17c291 100644
86592--- a/include/net/sctp/structs.h
86593+++ b/include/net/sctp/structs.h
86594@@ -509,7 +509,7 @@ struct sctp_pf {
86595 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
86596 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
86597 struct sctp_af *af;
86598-};
86599+} __do_const;
86600
86601
86602 /* Structure to track chunk fragments that have been acked, but peer
86603diff --git a/include/net/sock.h b/include/net/sock.h
86604index 2210fec..2249ad0 100644
86605--- a/include/net/sock.h
86606+++ b/include/net/sock.h
86607@@ -362,7 +362,7 @@ struct sock {
86608 unsigned int sk_napi_id;
86609 unsigned int sk_ll_usec;
86610 #endif
86611- atomic_t sk_drops;
86612+ atomic_unchecked_t sk_drops;
86613 int sk_rcvbuf;
86614
86615 struct sk_filter __rcu *sk_filter;
86616@@ -1061,7 +1061,7 @@ struct proto {
86617 void (*destroy_cgroup)(struct mem_cgroup *memcg);
86618 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
86619 #endif
86620-};
86621+} __randomize_layout;
86622
86623 /*
86624 * Bits in struct cg_proto.flags
86625@@ -1239,7 +1239,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
86626 page_counter_uncharge(&prot->memory_allocated, amt);
86627 }
86628
86629-static inline long
86630+static inline long __intentional_overflow(-1)
86631 sk_memory_allocated(const struct sock *sk)
86632 {
86633 struct proto *prot = sk->sk_prot;
86634@@ -1385,7 +1385,7 @@ struct sock_iocb {
86635 struct scm_cookie *scm;
86636 struct msghdr *msg, async_msg;
86637 struct kiocb *kiocb;
86638-};
86639+} __randomize_layout;
86640
86641 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
86642 {
86643@@ -1826,7 +1826,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
86644 }
86645
86646 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
86647- char __user *from, char *to,
86648+ char __user *from, unsigned char *to,
86649 int copy, int offset)
86650 {
86651 if (skb->ip_summed == CHECKSUM_NONE) {
86652@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
86653 }
86654 }
86655
86656-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86657+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86658
86659 /**
86660 * sk_page_frag - return an appropriate page_frag
86661diff --git a/include/net/tcp.h b/include/net/tcp.h
86662index 9d9111e..349c847 100644
86663--- a/include/net/tcp.h
86664+++ b/include/net/tcp.h
86665@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
86666 void tcp_xmit_retransmit_queue(struct sock *);
86667 void tcp_simple_retransmit(struct sock *);
86668 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
86669-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86670+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86671
86672 void tcp_send_probe0(struct sock *);
86673 void tcp_send_partial(struct sock *);
86674@@ -689,8 +689,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
86675 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
86676 */
86677 struct tcp_skb_cb {
86678- __u32 seq; /* Starting sequence number */
86679- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
86680+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
86681+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
86682 union {
86683 /* Note : tcp_tw_isn is used in input path only
86684 * (isn chosen by tcp_timewait_state_process())
86685@@ -715,7 +715,7 @@ struct tcp_skb_cb {
86686
86687 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
86688 /* 1 byte hole */
86689- __u32 ack_seq; /* Sequence number ACK'd */
86690+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
86691 union {
86692 struct inet_skb_parm h4;
86693 #if IS_ENABLED(CONFIG_IPV6)
86694diff --git a/include/net/xfrm.h b/include/net/xfrm.h
86695index dc4865e..152ee4c 100644
86696--- a/include/net/xfrm.h
86697+++ b/include/net/xfrm.h
86698@@ -285,7 +285,6 @@ struct xfrm_dst;
86699 struct xfrm_policy_afinfo {
86700 unsigned short family;
86701 struct dst_ops *dst_ops;
86702- void (*garbage_collect)(struct net *net);
86703 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
86704 const xfrm_address_t *saddr,
86705 const xfrm_address_t *daddr);
86706@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
86707 struct net_device *dev,
86708 const struct flowi *fl);
86709 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
86710-};
86711+} __do_const;
86712
86713 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
86714 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
86715@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
86716 int (*transport_finish)(struct sk_buff *skb,
86717 int async);
86718 void (*local_error)(struct sk_buff *skb, u32 mtu);
86719-};
86720+} __do_const;
86721
86722 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
86723 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
86724@@ -437,7 +436,7 @@ struct xfrm_mode {
86725 struct module *owner;
86726 unsigned int encap;
86727 int flags;
86728-};
86729+} __do_const;
86730
86731 /* Flags for xfrm_mode. */
86732 enum {
86733@@ -534,7 +533,7 @@ struct xfrm_policy {
86734 struct timer_list timer;
86735
86736 struct flow_cache_object flo;
86737- atomic_t genid;
86738+ atomic_unchecked_t genid;
86739 u32 priority;
86740 u32 index;
86741 struct xfrm_mark mark;
86742@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
86743 }
86744
86745 void xfrm_garbage_collect(struct net *net);
86746+void xfrm_garbage_collect_deferred(struct net *net);
86747
86748 #else
86749
86750@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
86751 static inline void xfrm_garbage_collect(struct net *net)
86752 {
86753 }
86754+static inline void xfrm_garbage_collect_deferred(struct net *net)
86755+{
86756+}
86757 #endif
86758
86759 static __inline__
86760diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
86761index 1017e0b..227aa4d 100644
86762--- a/include/rdma/iw_cm.h
86763+++ b/include/rdma/iw_cm.h
86764@@ -122,7 +122,7 @@ struct iw_cm_verbs {
86765 int backlog);
86766
86767 int (*destroy_listen)(struct iw_cm_id *cm_id);
86768-};
86769+} __no_const;
86770
86771 /**
86772 * iw_create_cm_id - Create an IW CM identifier.
86773diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
86774index 93d14da..734b3d8 100644
86775--- a/include/scsi/libfc.h
86776+++ b/include/scsi/libfc.h
86777@@ -771,6 +771,7 @@ struct libfc_function_template {
86778 */
86779 void (*disc_stop_final) (struct fc_lport *);
86780 };
86781+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
86782
86783 /**
86784 * struct fc_disc - Discovery context
86785@@ -875,7 +876,7 @@ struct fc_lport {
86786 struct fc_vport *vport;
86787
86788 /* Operational Information */
86789- struct libfc_function_template tt;
86790+ libfc_function_template_no_const tt;
86791 u8 link_up;
86792 u8 qfull;
86793 enum fc_lport_state state;
86794diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
86795index 3a4edd1..feb2e3e 100644
86796--- a/include/scsi/scsi_device.h
86797+++ b/include/scsi/scsi_device.h
86798@@ -185,9 +185,9 @@ struct scsi_device {
86799 unsigned int max_device_blocked; /* what device_blocked counts down from */
86800 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
86801
86802- atomic_t iorequest_cnt;
86803- atomic_t iodone_cnt;
86804- atomic_t ioerr_cnt;
86805+ atomic_unchecked_t iorequest_cnt;
86806+ atomic_unchecked_t iodone_cnt;
86807+ atomic_unchecked_t ioerr_cnt;
86808
86809 struct device sdev_gendev,
86810 sdev_dev;
86811diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
86812index 007a0bc..7188db8 100644
86813--- a/include/scsi/scsi_transport_fc.h
86814+++ b/include/scsi/scsi_transport_fc.h
86815@@ -756,7 +756,8 @@ struct fc_function_template {
86816 unsigned long show_host_system_hostname:1;
86817
86818 unsigned long disable_target_scan:1;
86819-};
86820+} __do_const;
86821+typedef struct fc_function_template __no_const fc_function_template_no_const;
86822
86823
86824 /**
86825diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
86826index 396e8f7..b037e89 100644
86827--- a/include/sound/compress_driver.h
86828+++ b/include/sound/compress_driver.h
86829@@ -129,7 +129,7 @@ struct snd_compr_ops {
86830 struct snd_compr_caps *caps);
86831 int (*get_codec_caps) (struct snd_compr_stream *stream,
86832 struct snd_compr_codec_caps *codec);
86833-};
86834+} __no_const;
86835
86836 /**
86837 * struct snd_compr: Compressed device
86838diff --git a/include/sound/soc.h b/include/sound/soc.h
86839index ac8b333..59c3692 100644
86840--- a/include/sound/soc.h
86841+++ b/include/sound/soc.h
86842@@ -853,7 +853,7 @@ struct snd_soc_codec_driver {
86843 enum snd_soc_dapm_type, int);
86844
86845 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
86846-};
86847+} __do_const;
86848
86849 /* SoC platform interface */
86850 struct snd_soc_platform_driver {
86851@@ -880,7 +880,7 @@ struct snd_soc_platform_driver {
86852 const struct snd_compr_ops *compr_ops;
86853
86854 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
86855-};
86856+} __do_const;
86857
86858 struct snd_soc_dai_link_component {
86859 const char *name;
86860diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
86861index 672150b..9d4bec4 100644
86862--- a/include/target/target_core_base.h
86863+++ b/include/target/target_core_base.h
86864@@ -767,7 +767,7 @@ struct se_device {
86865 atomic_long_t write_bytes;
86866 /* Active commands on this virtual SE device */
86867 atomic_t simple_cmds;
86868- atomic_t dev_ordered_id;
86869+ atomic_unchecked_t dev_ordered_id;
86870 atomic_t dev_ordered_sync;
86871 atomic_t dev_qf_count;
86872 int export_count;
86873diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
86874new file mode 100644
86875index 0000000..fb634b7
86876--- /dev/null
86877+++ b/include/trace/events/fs.h
86878@@ -0,0 +1,53 @@
86879+#undef TRACE_SYSTEM
86880+#define TRACE_SYSTEM fs
86881+
86882+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
86883+#define _TRACE_FS_H
86884+
86885+#include <linux/fs.h>
86886+#include <linux/tracepoint.h>
86887+
86888+TRACE_EVENT(do_sys_open,
86889+
86890+ TP_PROTO(const char *filename, int flags, int mode),
86891+
86892+ TP_ARGS(filename, flags, mode),
86893+
86894+ TP_STRUCT__entry(
86895+ __string( filename, filename )
86896+ __field( int, flags )
86897+ __field( int, mode )
86898+ ),
86899+
86900+ TP_fast_assign(
86901+ __assign_str(filename, filename);
86902+ __entry->flags = flags;
86903+ __entry->mode = mode;
86904+ ),
86905+
86906+ TP_printk("\"%s\" %x %o",
86907+ __get_str(filename), __entry->flags, __entry->mode)
86908+);
86909+
86910+TRACE_EVENT(open_exec,
86911+
86912+ TP_PROTO(const char *filename),
86913+
86914+ TP_ARGS(filename),
86915+
86916+ TP_STRUCT__entry(
86917+ __string( filename, filename )
86918+ ),
86919+
86920+ TP_fast_assign(
86921+ __assign_str(filename, filename);
86922+ ),
86923+
86924+ TP_printk("\"%s\"",
86925+ __get_str(filename))
86926+);
86927+
86928+#endif /* _TRACE_FS_H */
86929+
86930+/* This part must be outside protection */
86931+#include <trace/define_trace.h>
86932diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
86933index 3608beb..df39d8a 100644
86934--- a/include/trace/events/irq.h
86935+++ b/include/trace/events/irq.h
86936@@ -36,7 +36,7 @@ struct softirq_action;
86937 */
86938 TRACE_EVENT(irq_handler_entry,
86939
86940- TP_PROTO(int irq, struct irqaction *action),
86941+ TP_PROTO(int irq, const struct irqaction *action),
86942
86943 TP_ARGS(irq, action),
86944
86945@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
86946 */
86947 TRACE_EVENT(irq_handler_exit,
86948
86949- TP_PROTO(int irq, struct irqaction *action, int ret),
86950+ TP_PROTO(int irq, const struct irqaction *action, int ret),
86951
86952 TP_ARGS(irq, action, ret),
86953
86954diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
86955index 7caf44c..23c6f27 100644
86956--- a/include/uapi/linux/a.out.h
86957+++ b/include/uapi/linux/a.out.h
86958@@ -39,6 +39,14 @@ enum machine_type {
86959 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
86960 };
86961
86962+/* Constants for the N_FLAGS field */
86963+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
86964+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
86965+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
86966+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
86967+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
86968+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
86969+
86970 #if !defined (N_MAGIC)
86971 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
86972 #endif
86973diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
86974index 22b6ad3..aeba37e 100644
86975--- a/include/uapi/linux/bcache.h
86976+++ b/include/uapi/linux/bcache.h
86977@@ -5,6 +5,7 @@
86978 * Bcache on disk data structures
86979 */
86980
86981+#include <linux/compiler.h>
86982 #include <asm/types.h>
86983
86984 #define BITMASK(name, type, field, offset, size) \
86985@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
86986 /* Btree keys - all units are in sectors */
86987
86988 struct bkey {
86989- __u64 high;
86990- __u64 low;
86991+ __u64 high __intentional_overflow(-1);
86992+ __u64 low __intentional_overflow(-1);
86993 __u64 ptr[];
86994 };
86995
86996diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
86997index d876736..ccce5c0 100644
86998--- a/include/uapi/linux/byteorder/little_endian.h
86999+++ b/include/uapi/linux/byteorder/little_endian.h
87000@@ -42,51 +42,51 @@
87001
87002 static inline __le64 __cpu_to_le64p(const __u64 *p)
87003 {
87004- return (__force __le64)*p;
87005+ return (__force const __le64)*p;
87006 }
87007-static inline __u64 __le64_to_cpup(const __le64 *p)
87008+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87009 {
87010- return (__force __u64)*p;
87011+ return (__force const __u64)*p;
87012 }
87013 static inline __le32 __cpu_to_le32p(const __u32 *p)
87014 {
87015- return (__force __le32)*p;
87016+ return (__force const __le32)*p;
87017 }
87018 static inline __u32 __le32_to_cpup(const __le32 *p)
87019 {
87020- return (__force __u32)*p;
87021+ return (__force const __u32)*p;
87022 }
87023 static inline __le16 __cpu_to_le16p(const __u16 *p)
87024 {
87025- return (__force __le16)*p;
87026+ return (__force const __le16)*p;
87027 }
87028 static inline __u16 __le16_to_cpup(const __le16 *p)
87029 {
87030- return (__force __u16)*p;
87031+ return (__force const __u16)*p;
87032 }
87033 static inline __be64 __cpu_to_be64p(const __u64 *p)
87034 {
87035- return (__force __be64)__swab64p(p);
87036+ return (__force const __be64)__swab64p(p);
87037 }
87038 static inline __u64 __be64_to_cpup(const __be64 *p)
87039 {
87040- return __swab64p((__u64 *)p);
87041+ return __swab64p((const __u64 *)p);
87042 }
87043 static inline __be32 __cpu_to_be32p(const __u32 *p)
87044 {
87045- return (__force __be32)__swab32p(p);
87046+ return (__force const __be32)__swab32p(p);
87047 }
87048-static inline __u32 __be32_to_cpup(const __be32 *p)
87049+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87050 {
87051- return __swab32p((__u32 *)p);
87052+ return __swab32p((const __u32 *)p);
87053 }
87054 static inline __be16 __cpu_to_be16p(const __u16 *p)
87055 {
87056- return (__force __be16)__swab16p(p);
87057+ return (__force const __be16)__swab16p(p);
87058 }
87059 static inline __u16 __be16_to_cpup(const __be16 *p)
87060 {
87061- return __swab16p((__u16 *)p);
87062+ return __swab16p((const __u16 *)p);
87063 }
87064 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87065 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87066diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87067index 71e1d0e..6cc9caf 100644
87068--- a/include/uapi/linux/elf.h
87069+++ b/include/uapi/linux/elf.h
87070@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87071 #define PT_GNU_EH_FRAME 0x6474e550
87072
87073 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87074+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87075+
87076+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87077+
87078+/* Constants for the e_flags field */
87079+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87080+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87081+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87082+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87083+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87084+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87085
87086 /*
87087 * Extended Numbering
87088@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87089 #define DT_DEBUG 21
87090 #define DT_TEXTREL 22
87091 #define DT_JMPREL 23
87092+#define DT_FLAGS 30
87093+ #define DF_TEXTREL 0x00000004
87094 #define DT_ENCODING 32
87095 #define OLD_DT_LOOS 0x60000000
87096 #define DT_LOOS 0x6000000d
87097@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87098 #define PF_W 0x2
87099 #define PF_X 0x1
87100
87101+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87102+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87103+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87104+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87105+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87106+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87107+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87108+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87109+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87110+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87111+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87112+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87113+
87114 typedef struct elf32_phdr{
87115 Elf32_Word p_type;
87116 Elf32_Off p_offset;
87117@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87118 #define EI_OSABI 7
87119 #define EI_PAD 8
87120
87121+#define EI_PAX 14
87122+
87123 #define ELFMAG0 0x7f /* EI_MAG */
87124 #define ELFMAG1 'E'
87125 #define ELFMAG2 'L'
87126diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87127index aa169c4..6a2771d 100644
87128--- a/include/uapi/linux/personality.h
87129+++ b/include/uapi/linux/personality.h
87130@@ -30,6 +30,7 @@ enum {
87131 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87132 ADDR_NO_RANDOMIZE | \
87133 ADDR_COMPAT_LAYOUT | \
87134+ ADDR_LIMIT_3GB | \
87135 MMAP_PAGE_ZERO)
87136
87137 /*
87138diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87139index 7530e74..e714828 100644
87140--- a/include/uapi/linux/screen_info.h
87141+++ b/include/uapi/linux/screen_info.h
87142@@ -43,7 +43,8 @@ struct screen_info {
87143 __u16 pages; /* 0x32 */
87144 __u16 vesa_attributes; /* 0x34 */
87145 __u32 capabilities; /* 0x36 */
87146- __u8 _reserved[6]; /* 0x3a */
87147+ __u16 vesapm_size; /* 0x3a */
87148+ __u8 _reserved[4]; /* 0x3c */
87149 } __attribute__((packed));
87150
87151 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87152diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87153index 0e011eb..82681b1 100644
87154--- a/include/uapi/linux/swab.h
87155+++ b/include/uapi/linux/swab.h
87156@@ -43,7 +43,7 @@
87157 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87158 */
87159
87160-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87161+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87162 {
87163 #ifdef __HAVE_BUILTIN_BSWAP16__
87164 return __builtin_bswap16(val);
87165@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87166 #endif
87167 }
87168
87169-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87170+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87171 {
87172 #ifdef __HAVE_BUILTIN_BSWAP32__
87173 return __builtin_bswap32(val);
87174@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87175 #endif
87176 }
87177
87178-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87179+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87180 {
87181 #ifdef __HAVE_BUILTIN_BSWAP64__
87182 return __builtin_bswap64(val);
87183diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87184index 1590c49..5eab462 100644
87185--- a/include/uapi/linux/xattr.h
87186+++ b/include/uapi/linux/xattr.h
87187@@ -73,5 +73,9 @@
87188 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87189 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87190
87191+/* User namespace */
87192+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87193+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87194+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87195
87196 #endif /* _UAPI_LINUX_XATTR_H */
87197diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87198index f9466fa..f4e2b81 100644
87199--- a/include/video/udlfb.h
87200+++ b/include/video/udlfb.h
87201@@ -53,10 +53,10 @@ struct dlfb_data {
87202 u32 pseudo_palette[256];
87203 int blank_mode; /*one of FB_BLANK_ */
87204 /* blit-only rendering path metrics, exposed through sysfs */
87205- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87206- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87207- atomic_t bytes_sent; /* to usb, after compression including overhead */
87208- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87209+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87210+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87211+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87212+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87213 };
87214
87215 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87216diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87217index 30f5362..8ed8ac9 100644
87218--- a/include/video/uvesafb.h
87219+++ b/include/video/uvesafb.h
87220@@ -122,6 +122,7 @@ struct uvesafb_par {
87221 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87222 u8 pmi_setpal; /* PMI for palette changes */
87223 u16 *pmi_base; /* protected mode interface location */
87224+ u8 *pmi_code; /* protected mode code location */
87225 void *pmi_start;
87226 void *pmi_pal;
87227 u8 *vbe_state_orig; /*
87228diff --git a/init/Kconfig b/init/Kconfig
87229index 9afb971..27d6fca 100644
87230--- a/init/Kconfig
87231+++ b/init/Kconfig
87232@@ -1129,6 +1129,7 @@ endif # CGROUPS
87233
87234 config CHECKPOINT_RESTORE
87235 bool "Checkpoint/restore support" if EXPERT
87236+ depends on !GRKERNSEC
87237 default n
87238 help
87239 Enables additional kernel features in a sake of checkpoint/restore.
87240@@ -1654,7 +1655,7 @@ config SLUB_DEBUG
87241
87242 config COMPAT_BRK
87243 bool "Disable heap randomization"
87244- default y
87245+ default n
87246 help
87247 Randomizing heap placement makes heap exploits harder, but it
87248 also breaks ancient binaries (including anything libc5 based).
87249@@ -1985,7 +1986,7 @@ config INIT_ALL_POSSIBLE
87250 config STOP_MACHINE
87251 bool
87252 default y
87253- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87254+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87255 help
87256 Need stop_machine() primitive.
87257
87258diff --git a/init/Makefile b/init/Makefile
87259index 7bc47ee..6da2dc7 100644
87260--- a/init/Makefile
87261+++ b/init/Makefile
87262@@ -2,6 +2,9 @@
87263 # Makefile for the linux kernel.
87264 #
87265
87266+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87267+asflags-y := $(GCC_PLUGINS_AFLAGS)
87268+
87269 obj-y := main.o version.o mounts.o
87270 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87271 obj-y += noinitramfs.o
87272diff --git a/init/do_mounts.c b/init/do_mounts.c
87273index eb41008..f5dbbf9 100644
87274--- a/init/do_mounts.c
87275+++ b/init/do_mounts.c
87276@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
87277 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87278 {
87279 struct super_block *s;
87280- int err = sys_mount(name, "/root", fs, flags, data);
87281+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87282 if (err)
87283 return err;
87284
87285- sys_chdir("/root");
87286+ sys_chdir((const char __force_user *)"/root");
87287 s = current->fs->pwd.dentry->d_sb;
87288 ROOT_DEV = s->s_dev;
87289 printk(KERN_INFO
87290@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
87291 va_start(args, fmt);
87292 vsprintf(buf, fmt, args);
87293 va_end(args);
87294- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87295+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87296 if (fd >= 0) {
87297 sys_ioctl(fd, FDEJECT, 0);
87298 sys_close(fd);
87299 }
87300 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87301- fd = sys_open("/dev/console", O_RDWR, 0);
87302+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87303 if (fd >= 0) {
87304 sys_ioctl(fd, TCGETS, (long)&termios);
87305 termios.c_lflag &= ~ICANON;
87306 sys_ioctl(fd, TCSETSF, (long)&termios);
87307- sys_read(fd, &c, 1);
87308+ sys_read(fd, (char __user *)&c, 1);
87309 termios.c_lflag |= ICANON;
87310 sys_ioctl(fd, TCSETSF, (long)&termios);
87311 sys_close(fd);
87312@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
87313 mount_root();
87314 out:
87315 devtmpfs_mount("dev");
87316- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87317- sys_chroot(".");
87318+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87319+ sys_chroot((const char __force_user *)".");
87320 }
87321
87322 static bool is_tmpfs;
87323diff --git a/init/do_mounts.h b/init/do_mounts.h
87324index f5b978a..69dbfe8 100644
87325--- a/init/do_mounts.h
87326+++ b/init/do_mounts.h
87327@@ -15,15 +15,15 @@ extern int root_mountflags;
87328
87329 static inline int create_dev(char *name, dev_t dev)
87330 {
87331- sys_unlink(name);
87332- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
87333+ sys_unlink((char __force_user *)name);
87334+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
87335 }
87336
87337 #if BITS_PER_LONG == 32
87338 static inline u32 bstat(char *name)
87339 {
87340 struct stat64 stat;
87341- if (sys_stat64(name, &stat) != 0)
87342+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
87343 return 0;
87344 if (!S_ISBLK(stat.st_mode))
87345 return 0;
87346@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
87347 static inline u32 bstat(char *name)
87348 {
87349 struct stat stat;
87350- if (sys_newstat(name, &stat) != 0)
87351+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
87352 return 0;
87353 if (!S_ISBLK(stat.st_mode))
87354 return 0;
87355diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
87356index 3e0878e..8a9d7a0 100644
87357--- a/init/do_mounts_initrd.c
87358+++ b/init/do_mounts_initrd.c
87359@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
87360 {
87361 sys_unshare(CLONE_FS | CLONE_FILES);
87362 /* stdin/stdout/stderr for /linuxrc */
87363- sys_open("/dev/console", O_RDWR, 0);
87364+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
87365 sys_dup(0);
87366 sys_dup(0);
87367 /* move initrd over / and chdir/chroot in initrd root */
87368- sys_chdir("/root");
87369- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87370- sys_chroot(".");
87371+ sys_chdir((const char __force_user *)"/root");
87372+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87373+ sys_chroot((const char __force_user *)".");
87374 sys_setsid();
87375 return 0;
87376 }
87377@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
87378 create_dev("/dev/root.old", Root_RAM0);
87379 /* mount initrd on rootfs' /root */
87380 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
87381- sys_mkdir("/old", 0700);
87382- sys_chdir("/old");
87383+ sys_mkdir((const char __force_user *)"/old", 0700);
87384+ sys_chdir((const char __force_user *)"/old");
87385
87386 /* try loading default modules from initrd */
87387 load_default_modules();
87388@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
87389 current->flags &= ~PF_FREEZER_SKIP;
87390
87391 /* move initrd to rootfs' /old */
87392- sys_mount("..", ".", NULL, MS_MOVE, NULL);
87393+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
87394 /* switch root and cwd back to / of rootfs */
87395- sys_chroot("..");
87396+ sys_chroot((const char __force_user *)"..");
87397
87398 if (new_decode_dev(real_root_dev) == Root_RAM0) {
87399- sys_chdir("/old");
87400+ sys_chdir((const char __force_user *)"/old");
87401 return;
87402 }
87403
87404- sys_chdir("/");
87405+ sys_chdir((const char __force_user *)"/");
87406 ROOT_DEV = new_decode_dev(real_root_dev);
87407 mount_root();
87408
87409 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
87410- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
87411+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
87412 if (!error)
87413 printk("okay\n");
87414 else {
87415- int fd = sys_open("/dev/root.old", O_RDWR, 0);
87416+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
87417 if (error == -ENOENT)
87418 printk("/initrd does not exist. Ignored.\n");
87419 else
87420 printk("failed\n");
87421 printk(KERN_NOTICE "Unmounting old root\n");
87422- sys_umount("/old", MNT_DETACH);
87423+ sys_umount((char __force_user *)"/old", MNT_DETACH);
87424 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
87425 if (fd < 0) {
87426 error = fd;
87427@@ -127,11 +127,11 @@ int __init initrd_load(void)
87428 * mounted in the normal path.
87429 */
87430 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
87431- sys_unlink("/initrd.image");
87432+ sys_unlink((const char __force_user *)"/initrd.image");
87433 handle_initrd();
87434 return 1;
87435 }
87436 }
87437- sys_unlink("/initrd.image");
87438+ sys_unlink((const char __force_user *)"/initrd.image");
87439 return 0;
87440 }
87441diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
87442index 8cb6db5..d729f50 100644
87443--- a/init/do_mounts_md.c
87444+++ b/init/do_mounts_md.c
87445@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
87446 partitioned ? "_d" : "", minor,
87447 md_setup_args[ent].device_names);
87448
87449- fd = sys_open(name, 0, 0);
87450+ fd = sys_open((char __force_user *)name, 0, 0);
87451 if (fd < 0) {
87452 printk(KERN_ERR "md: open failed - cannot start "
87453 "array %s\n", name);
87454@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
87455 * array without it
87456 */
87457 sys_close(fd);
87458- fd = sys_open(name, 0, 0);
87459+ fd = sys_open((char __force_user *)name, 0, 0);
87460 sys_ioctl(fd, BLKRRPART, 0);
87461 }
87462 sys_close(fd);
87463@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
87464
87465 wait_for_device_probe();
87466
87467- fd = sys_open("/dev/md0", 0, 0);
87468+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
87469 if (fd >= 0) {
87470 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
87471 sys_close(fd);
87472diff --git a/init/init_task.c b/init/init_task.c
87473index ba0a7f36..2bcf1d5 100644
87474--- a/init/init_task.c
87475+++ b/init/init_task.c
87476@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
87477 * Initial thread structure. Alignment of this is handled by a special
87478 * linker map entry.
87479 */
87480+#ifdef CONFIG_X86
87481+union thread_union init_thread_union __init_task_data;
87482+#else
87483 union thread_union init_thread_union __init_task_data =
87484 { INIT_THREAD_INFO(init_task) };
87485+#endif
87486diff --git a/init/initramfs.c b/init/initramfs.c
87487index ad1bd77..dca2c1b 100644
87488--- a/init/initramfs.c
87489+++ b/init/initramfs.c
87490@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
87491
87492 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
87493 while (count) {
87494- ssize_t rv = sys_write(fd, p, count);
87495+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
87496
87497 if (rv < 0) {
87498 if (rv == -EINTR || rv == -EAGAIN)
87499@@ -107,7 +107,7 @@ static void __init free_hash(void)
87500 }
87501 }
87502
87503-static long __init do_utime(char *filename, time_t mtime)
87504+static long __init do_utime(char __force_user *filename, time_t mtime)
87505 {
87506 struct timespec t[2];
87507
87508@@ -142,7 +142,7 @@ static void __init dir_utime(void)
87509 struct dir_entry *de, *tmp;
87510 list_for_each_entry_safe(de, tmp, &dir_list, list) {
87511 list_del(&de->list);
87512- do_utime(de->name, de->mtime);
87513+ do_utime((char __force_user *)de->name, de->mtime);
87514 kfree(de->name);
87515 kfree(de);
87516 }
87517@@ -304,7 +304,7 @@ static int __init maybe_link(void)
87518 if (nlink >= 2) {
87519 char *old = find_link(major, minor, ino, mode, collected);
87520 if (old)
87521- return (sys_link(old, collected) < 0) ? -1 : 1;
87522+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
87523 }
87524 return 0;
87525 }
87526@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
87527 {
87528 struct stat st;
87529
87530- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
87531+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
87532 if (S_ISDIR(st.st_mode))
87533- sys_rmdir(path);
87534+ sys_rmdir((char __force_user *)path);
87535 else
87536- sys_unlink(path);
87537+ sys_unlink((char __force_user *)path);
87538 }
87539 }
87540
87541@@ -338,7 +338,7 @@ static int __init do_name(void)
87542 int openflags = O_WRONLY|O_CREAT;
87543 if (ml != 1)
87544 openflags |= O_TRUNC;
87545- wfd = sys_open(collected, openflags, mode);
87546+ wfd = sys_open((char __force_user *)collected, openflags, mode);
87547
87548 if (wfd >= 0) {
87549 sys_fchown(wfd, uid, gid);
87550@@ -350,17 +350,17 @@ static int __init do_name(void)
87551 }
87552 }
87553 } else if (S_ISDIR(mode)) {
87554- sys_mkdir(collected, mode);
87555- sys_chown(collected, uid, gid);
87556- sys_chmod(collected, mode);
87557+ sys_mkdir((char __force_user *)collected, mode);
87558+ sys_chown((char __force_user *)collected, uid, gid);
87559+ sys_chmod((char __force_user *)collected, mode);
87560 dir_add(collected, mtime);
87561 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
87562 S_ISFIFO(mode) || S_ISSOCK(mode)) {
87563 if (maybe_link() == 0) {
87564- sys_mknod(collected, mode, rdev);
87565- sys_chown(collected, uid, gid);
87566- sys_chmod(collected, mode);
87567- do_utime(collected, mtime);
87568+ sys_mknod((char __force_user *)collected, mode, rdev);
87569+ sys_chown((char __force_user *)collected, uid, gid);
87570+ sys_chmod((char __force_user *)collected, mode);
87571+ do_utime((char __force_user *)collected, mtime);
87572 }
87573 }
87574 return 0;
87575@@ -372,7 +372,7 @@ static int __init do_copy(void)
87576 if (xwrite(wfd, victim, body_len) != body_len)
87577 error("write error");
87578 sys_close(wfd);
87579- do_utime(vcollected, mtime);
87580+ do_utime((char __force_user *)vcollected, mtime);
87581 kfree(vcollected);
87582 eat(body_len);
87583 state = SkipIt;
87584@@ -390,9 +390,9 @@ static int __init do_symlink(void)
87585 {
87586 collected[N_ALIGN(name_len) + body_len] = '\0';
87587 clean_path(collected, 0);
87588- sys_symlink(collected + N_ALIGN(name_len), collected);
87589- sys_lchown(collected, uid, gid);
87590- do_utime(collected, mtime);
87591+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
87592+ sys_lchown((char __force_user *)collected, uid, gid);
87593+ do_utime((char __force_user *)collected, mtime);
87594 state = SkipIt;
87595 next_state = Reset;
87596 return 0;
87597diff --git a/init/main.c b/init/main.c
87598index 61b99376..85893612d 100644
87599--- a/init/main.c
87600+++ b/init/main.c
87601@@ -100,6 +100,8 @@ extern void radix_tree_init(void);
87602 static inline void mark_rodata_ro(void) { }
87603 #endif
87604
87605+extern void grsecurity_init(void);
87606+
87607 /*
87608 * Debug helper: via this flag we know that we are in 'early bootup code'
87609 * where only the boot processor is running with IRQ disabled. This means
87610@@ -161,6 +163,75 @@ static int __init set_reset_devices(char *str)
87611
87612 __setup("reset_devices", set_reset_devices);
87613
87614+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
87615+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
87616+static int __init setup_grsec_proc_gid(char *str)
87617+{
87618+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
87619+ return 1;
87620+}
87621+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
87622+#endif
87623+
87624+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
87625+unsigned long pax_user_shadow_base __read_only;
87626+EXPORT_SYMBOL(pax_user_shadow_base);
87627+extern char pax_enter_kernel_user[];
87628+extern char pax_exit_kernel_user[];
87629+#endif
87630+
87631+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
87632+static int __init setup_pax_nouderef(char *str)
87633+{
87634+#ifdef CONFIG_X86_32
87635+ unsigned int cpu;
87636+ struct desc_struct *gdt;
87637+
87638+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
87639+ gdt = get_cpu_gdt_table(cpu);
87640+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
87641+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
87642+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
87643+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
87644+ }
87645+ loadsegment(ds, __KERNEL_DS);
87646+ loadsegment(es, __KERNEL_DS);
87647+ loadsegment(ss, __KERNEL_DS);
87648+#else
87649+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
87650+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
87651+ clone_pgd_mask = ~(pgdval_t)0UL;
87652+ pax_user_shadow_base = 0UL;
87653+ setup_clear_cpu_cap(X86_FEATURE_PCID);
87654+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
87655+#endif
87656+
87657+ return 0;
87658+}
87659+early_param("pax_nouderef", setup_pax_nouderef);
87660+
87661+#ifdef CONFIG_X86_64
87662+static int __init setup_pax_weakuderef(char *str)
87663+{
87664+ if (clone_pgd_mask != ~(pgdval_t)0UL)
87665+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
87666+ return 1;
87667+}
87668+__setup("pax_weakuderef", setup_pax_weakuderef);
87669+#endif
87670+#endif
87671+
87672+#ifdef CONFIG_PAX_SOFTMODE
87673+int pax_softmode;
87674+
87675+static int __init setup_pax_softmode(char *str)
87676+{
87677+ get_option(&str, &pax_softmode);
87678+ return 1;
87679+}
87680+__setup("pax_softmode=", setup_pax_softmode);
87681+#endif
87682+
87683 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
87684 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
87685 static const char *panic_later, *panic_param;
87686@@ -735,7 +806,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
87687 struct blacklist_entry *entry;
87688 char *fn_name;
87689
87690- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
87691+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
87692 if (!fn_name)
87693 return false;
87694
87695@@ -787,7 +858,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
87696 {
87697 int count = preempt_count();
87698 int ret;
87699- char msgbuf[64];
87700+ const char *msg1 = "", *msg2 = "";
87701
87702 if (initcall_blacklisted(fn))
87703 return -EPERM;
87704@@ -797,18 +868,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
87705 else
87706 ret = fn();
87707
87708- msgbuf[0] = 0;
87709-
87710 if (preempt_count() != count) {
87711- sprintf(msgbuf, "preemption imbalance ");
87712+ msg1 = " preemption imbalance";
87713 preempt_count_set(count);
87714 }
87715 if (irqs_disabled()) {
87716- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
87717+ msg2 = " disabled interrupts";
87718 local_irq_enable();
87719 }
87720- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
87721+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
87722
87723+ add_latent_entropy();
87724 return ret;
87725 }
87726
87727@@ -914,8 +984,8 @@ static int run_init_process(const char *init_filename)
87728 {
87729 argv_init[0] = init_filename;
87730 return do_execve(getname_kernel(init_filename),
87731- (const char __user *const __user *)argv_init,
87732- (const char __user *const __user *)envp_init);
87733+ (const char __user *const __force_user *)argv_init,
87734+ (const char __user *const __force_user *)envp_init);
87735 }
87736
87737 static int try_to_run_init_process(const char *init_filename)
87738@@ -932,6 +1002,10 @@ static int try_to_run_init_process(const char *init_filename)
87739 return ret;
87740 }
87741
87742+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87743+extern int gr_init_ran;
87744+#endif
87745+
87746 static noinline void __init kernel_init_freeable(void);
87747
87748 static int __ref kernel_init(void *unused)
87749@@ -956,6 +1030,11 @@ static int __ref kernel_init(void *unused)
87750 ramdisk_execute_command, ret);
87751 }
87752
87753+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87754+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
87755+ gr_init_ran = 1;
87756+#endif
87757+
87758 /*
87759 * We try each of these until one succeeds.
87760 *
87761@@ -1016,7 +1095,7 @@ static noinline void __init kernel_init_freeable(void)
87762 do_basic_setup();
87763
87764 /* Open the /dev/console on the rootfs, this should never fail */
87765- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
87766+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
87767 pr_err("Warning: unable to open an initial console.\n");
87768
87769 (void) sys_dup(0);
87770@@ -1029,11 +1108,13 @@ static noinline void __init kernel_init_freeable(void)
87771 if (!ramdisk_execute_command)
87772 ramdisk_execute_command = "/init";
87773
87774- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
87775+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
87776 ramdisk_execute_command = NULL;
87777 prepare_namespace();
87778 }
87779
87780+ grsecurity_init();
87781+
87782 /*
87783 * Ok, we have completed the initial bootup, and
87784 * we're essentially up and running. Get rid of the
87785diff --git a/ipc/compat.c b/ipc/compat.c
87786index 9b3c85f..1c4d897 100644
87787--- a/ipc/compat.c
87788+++ b/ipc/compat.c
87789@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
87790 COMPAT_SHMLBA);
87791 if (err < 0)
87792 return err;
87793- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
87794+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
87795 }
87796 case SHMDT:
87797 return sys_shmdt(compat_ptr(ptr));
87798diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
87799index 8ad93c2..efd80f8 100644
87800--- a/ipc/ipc_sysctl.c
87801+++ b/ipc/ipc_sysctl.c
87802@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
87803 static int proc_ipc_dointvec(struct ctl_table *table, int write,
87804 void __user *buffer, size_t *lenp, loff_t *ppos)
87805 {
87806- struct ctl_table ipc_table;
87807+ ctl_table_no_const ipc_table;
87808
87809 memcpy(&ipc_table, table, sizeof(ipc_table));
87810 ipc_table.data = get_ipc(table);
87811@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
87812 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
87813 void __user *buffer, size_t *lenp, loff_t *ppos)
87814 {
87815- struct ctl_table ipc_table;
87816+ ctl_table_no_const ipc_table;
87817
87818 memcpy(&ipc_table, table, sizeof(ipc_table));
87819 ipc_table.data = get_ipc(table);
87820@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
87821 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87822 void __user *buffer, size_t *lenp, loff_t *ppos)
87823 {
87824- struct ctl_table ipc_table;
87825+ ctl_table_no_const ipc_table;
87826 memcpy(&ipc_table, table, sizeof(ipc_table));
87827 ipc_table.data = get_ipc(table);
87828
87829@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87830 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
87831 void __user *buffer, size_t *lenp, loff_t *ppos)
87832 {
87833- struct ctl_table ipc_table;
87834+ ctl_table_no_const ipc_table;
87835 int dummy = 0;
87836
87837 memcpy(&ipc_table, table, sizeof(ipc_table));
87838diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
87839index 68d4e95..1477ded 100644
87840--- a/ipc/mq_sysctl.c
87841+++ b/ipc/mq_sysctl.c
87842@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
87843 static int proc_mq_dointvec(struct ctl_table *table, int write,
87844 void __user *buffer, size_t *lenp, loff_t *ppos)
87845 {
87846- struct ctl_table mq_table;
87847+ ctl_table_no_const mq_table;
87848 memcpy(&mq_table, table, sizeof(mq_table));
87849 mq_table.data = get_mq(table);
87850
87851@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
87852 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
87853 void __user *buffer, size_t *lenp, loff_t *ppos)
87854 {
87855- struct ctl_table mq_table;
87856+ ctl_table_no_const mq_table;
87857 memcpy(&mq_table, table, sizeof(mq_table));
87858 mq_table.data = get_mq(table);
87859
87860diff --git a/ipc/mqueue.c b/ipc/mqueue.c
87861index 7635a1c..7432cb6 100644
87862--- a/ipc/mqueue.c
87863+++ b/ipc/mqueue.c
87864@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
87865 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
87866 info->attr.mq_msgsize);
87867
87868+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
87869 spin_lock(&mq_lock);
87870 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
87871 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
87872diff --git a/ipc/shm.c b/ipc/shm.c
87873index 19633b4..d454904 100644
87874--- a/ipc/shm.c
87875+++ b/ipc/shm.c
87876@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
87877 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
87878 #endif
87879
87880+#ifdef CONFIG_GRKERNSEC
87881+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
87882+ const u64 shm_createtime, const kuid_t cuid,
87883+ const int shmid);
87884+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
87885+ const u64 shm_createtime);
87886+#endif
87887+
87888 void shm_init_ns(struct ipc_namespace *ns)
87889 {
87890 ns->shm_ctlmax = SHMMAX;
87891@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
87892 shp->shm_lprid = 0;
87893 shp->shm_atim = shp->shm_dtim = 0;
87894 shp->shm_ctim = get_seconds();
87895+#ifdef CONFIG_GRKERNSEC
87896+ shp->shm_createtime = ktime_get_ns();
87897+#endif
87898 shp->shm_segsz = size;
87899 shp->shm_nattch = 0;
87900 shp->shm_file = file;
87901@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87902 f_mode = FMODE_READ | FMODE_WRITE;
87903 }
87904 if (shmflg & SHM_EXEC) {
87905+
87906+#ifdef CONFIG_PAX_MPROTECT
87907+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
87908+ goto out;
87909+#endif
87910+
87911 prot |= PROT_EXEC;
87912 acc_mode |= S_IXUGO;
87913 }
87914@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87915 if (err)
87916 goto out_unlock;
87917
87918+#ifdef CONFIG_GRKERNSEC
87919+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
87920+ shp->shm_perm.cuid, shmid) ||
87921+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
87922+ err = -EACCES;
87923+ goto out_unlock;
87924+ }
87925+#endif
87926+
87927 ipc_lock_object(&shp->shm_perm);
87928
87929 /* check if shm_destroy() is tearing down shp */
87930@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87931 path = shp->shm_file->f_path;
87932 path_get(&path);
87933 shp->shm_nattch++;
87934+#ifdef CONFIG_GRKERNSEC
87935+ shp->shm_lapid = current->pid;
87936+#endif
87937 size = i_size_read(path.dentry->d_inode);
87938 ipc_unlock_object(&shp->shm_perm);
87939 rcu_read_unlock();
87940diff --git a/ipc/util.c b/ipc/util.c
87941index 106bed0..f851429 100644
87942--- a/ipc/util.c
87943+++ b/ipc/util.c
87944@@ -71,6 +71,8 @@ struct ipc_proc_iface {
87945 int (*show)(struct seq_file *, void *);
87946 };
87947
87948+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
87949+
87950 /**
87951 * ipc_init - initialise ipc subsystem
87952 *
87953@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
87954 granted_mode >>= 6;
87955 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
87956 granted_mode >>= 3;
87957+
87958+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
87959+ return -1;
87960+
87961 /* is there some bit set in requested_mode but not in granted_mode? */
87962 if ((requested_mode & ~granted_mode & 0007) &&
87963 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
87964diff --git a/kernel/audit.c b/kernel/audit.c
87965index 72ab759..757deba 100644
87966--- a/kernel/audit.c
87967+++ b/kernel/audit.c
87968@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
87969 3) suppressed due to audit_rate_limit
87970 4) suppressed due to audit_backlog_limit
87971 */
87972-static atomic_t audit_lost = ATOMIC_INIT(0);
87973+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
87974
87975 /* The netlink socket. */
87976 static struct sock *audit_sock;
87977@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
87978 unsigned long now;
87979 int print;
87980
87981- atomic_inc(&audit_lost);
87982+ atomic_inc_unchecked(&audit_lost);
87983
87984 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
87985
87986@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
87987 if (print) {
87988 if (printk_ratelimit())
87989 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
87990- atomic_read(&audit_lost),
87991+ atomic_read_unchecked(&audit_lost),
87992 audit_rate_limit,
87993 audit_backlog_limit);
87994 audit_panic(message);
87995@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
87996 s.pid = audit_pid;
87997 s.rate_limit = audit_rate_limit;
87998 s.backlog_limit = audit_backlog_limit;
87999- s.lost = atomic_read(&audit_lost);
88000+ s.lost = atomic_read_unchecked(&audit_lost);
88001 s.backlog = skb_queue_len(&audit_skb_queue);
88002 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
88003 s.backlog_wait_time = audit_backlog_wait_time;
88004diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88005index 072566d..1190489 100644
88006--- a/kernel/auditsc.c
88007+++ b/kernel/auditsc.c
88008@@ -2056,7 +2056,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88009 }
88010
88011 /* global counter which is incremented every time something logs in */
88012-static atomic_t session_id = ATOMIC_INIT(0);
88013+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88014
88015 static int audit_set_loginuid_perm(kuid_t loginuid)
88016 {
88017@@ -2123,7 +2123,7 @@ int audit_set_loginuid(kuid_t loginuid)
88018
88019 /* are we setting or clearing? */
88020 if (uid_valid(loginuid))
88021- sessionid = (unsigned int)atomic_inc_return(&session_id);
88022+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88023
88024 task->sessionid = sessionid;
88025 task->loginuid = loginuid;
88026diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88027index a64e7a2..2e69448 100644
88028--- a/kernel/bpf/core.c
88029+++ b/kernel/bpf/core.c
88030@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88031 * random section of illegal instructions.
88032 */
88033 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
88034- hdr = module_alloc(size);
88035+ hdr = module_alloc_exec(size);
88036 if (hdr == NULL)
88037 return NULL;
88038
88039 /* Fill space with illegal/arch-dep instructions. */
88040 bpf_fill_ill_insns(hdr, size);
88041
88042+ pax_open_kernel();
88043 hdr->pages = size / PAGE_SIZE;
88044+ pax_close_kernel();
88045+
88046 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
88047 PAGE_SIZE - sizeof(*hdr));
88048 start = (prandom_u32() % hole) & ~(alignment - 1);
88049@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88050
88051 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
88052 {
88053- module_memfree(hdr);
88054+ module_memfree_exec(hdr);
88055 }
88056 #endif /* CONFIG_BPF_JIT */
88057
88058diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
88059index 536edc2..d28c85d 100644
88060--- a/kernel/bpf/syscall.c
88061+++ b/kernel/bpf/syscall.c
88062@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
88063 int err;
88064
88065 /* the syscall is limited to root temporarily. This restriction will be
88066- * lifted when security audit is clean. Note that eBPF+tracing must have
88067- * this restriction, since it may pass kernel data to user space
88068+ * lifted by upstream when a half-assed security audit is clean. Note
88069+ * that eBPF+tracing must have this restriction, since it may pass
88070+ * kernel data to user space
88071 */
88072 if (!capable(CAP_SYS_ADMIN))
88073 return -EPERM;
88074+#ifdef CONFIG_GRKERNSEC
88075+ return -EPERM;
88076+#endif
88077
88078 if (!access_ok(VERIFY_READ, uattr, 1))
88079 return -EFAULT;
88080diff --git a/kernel/capability.c b/kernel/capability.c
88081index 989f5bf..d317ca0 100644
88082--- a/kernel/capability.c
88083+++ b/kernel/capability.c
88084@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88085 * before modification is attempted and the application
88086 * fails.
88087 */
88088+ if (tocopy > ARRAY_SIZE(kdata))
88089+ return -EFAULT;
88090+
88091 if (copy_to_user(dataptr, kdata, tocopy
88092 * sizeof(struct __user_cap_data_struct))) {
88093 return -EFAULT;
88094@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88095 int ret;
88096
88097 rcu_read_lock();
88098- ret = security_capable(__task_cred(t), ns, cap);
88099+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88100+ gr_task_is_capable(t, __task_cred(t), cap);
88101 rcu_read_unlock();
88102
88103- return (ret == 0);
88104+ return ret;
88105 }
88106
88107 /**
88108@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88109 int ret;
88110
88111 rcu_read_lock();
88112- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88113+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88114 rcu_read_unlock();
88115
88116- return (ret == 0);
88117+ return ret;
88118 }
88119
88120 /**
88121@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88122 BUG();
88123 }
88124
88125- if (security_capable(current_cred(), ns, cap) == 0) {
88126+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88127 current->flags |= PF_SUPERPRIV;
88128 return true;
88129 }
88130@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88131 }
88132 EXPORT_SYMBOL(ns_capable);
88133
88134+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88135+{
88136+ if (unlikely(!cap_valid(cap))) {
88137+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88138+ BUG();
88139+ }
88140+
88141+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88142+ current->flags |= PF_SUPERPRIV;
88143+ return true;
88144+ }
88145+ return false;
88146+}
88147+EXPORT_SYMBOL(ns_capable_nolog);
88148+
88149 /**
88150 * file_ns_capable - Determine if the file's opener had a capability in effect
88151 * @file: The file we want to check
88152@@ -427,6 +446,12 @@ bool capable(int cap)
88153 }
88154 EXPORT_SYMBOL(capable);
88155
88156+bool capable_nolog(int cap)
88157+{
88158+ return ns_capable_nolog(&init_user_ns, cap);
88159+}
88160+EXPORT_SYMBOL(capable_nolog);
88161+
88162 /**
88163 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88164 * @inode: The inode in question
88165@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88166 kgid_has_mapping(ns, inode->i_gid);
88167 }
88168 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88169+
88170+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88171+{
88172+ struct user_namespace *ns = current_user_ns();
88173+
88174+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88175+ kgid_has_mapping(ns, inode->i_gid);
88176+}
88177+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88178diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88179index 04cfe8a..adadcc0 100644
88180--- a/kernel/cgroup.c
88181+++ b/kernel/cgroup.c
88182@@ -5343,6 +5343,9 @@ static void cgroup_release_agent(struct work_struct *work)
88183 if (!pathbuf || !agentbuf)
88184 goto out;
88185
88186+ if (agentbuf[0] == '\0')
88187+ goto out;
88188+
88189 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
88190 if (!path)
88191 goto out;
88192@@ -5528,7 +5531,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88193 struct task_struct *task;
88194 int count = 0;
88195
88196- seq_printf(seq, "css_set %p\n", cset);
88197+ seq_printf(seq, "css_set %pK\n", cset);
88198
88199 list_for_each_entry(task, &cset->tasks, cg_list) {
88200 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88201diff --git a/kernel/compat.c b/kernel/compat.c
88202index ebb3c36..1df606e 100644
88203--- a/kernel/compat.c
88204+++ b/kernel/compat.c
88205@@ -13,6 +13,7 @@
88206
88207 #include <linux/linkage.h>
88208 #include <linux/compat.h>
88209+#include <linux/module.h>
88210 #include <linux/errno.h>
88211 #include <linux/time.h>
88212 #include <linux/signal.h>
88213@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88214 mm_segment_t oldfs;
88215 long ret;
88216
88217- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88218+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88219 oldfs = get_fs();
88220 set_fs(KERNEL_DS);
88221 ret = hrtimer_nanosleep_restart(restart);
88222@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88223 oldfs = get_fs();
88224 set_fs(KERNEL_DS);
88225 ret = hrtimer_nanosleep(&tu,
88226- rmtp ? (struct timespec __user *)&rmt : NULL,
88227+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88228 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88229 set_fs(oldfs);
88230
88231@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88232 mm_segment_t old_fs = get_fs();
88233
88234 set_fs(KERNEL_DS);
88235- ret = sys_sigpending((old_sigset_t __user *) &s);
88236+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88237 set_fs(old_fs);
88238 if (ret == 0)
88239 ret = put_user(s, set);
88240@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88241 mm_segment_t old_fs = get_fs();
88242
88243 set_fs(KERNEL_DS);
88244- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88245+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88246 set_fs(old_fs);
88247
88248 if (!ret) {
88249@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88250 set_fs (KERNEL_DS);
88251 ret = sys_wait4(pid,
88252 (stat_addr ?
88253- (unsigned int __user *) &status : NULL),
88254- options, (struct rusage __user *) &r);
88255+ (unsigned int __force_user *) &status : NULL),
88256+ options, (struct rusage __force_user *) &r);
88257 set_fs (old_fs);
88258
88259 if (ret > 0) {
88260@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88261 memset(&info, 0, sizeof(info));
88262
88263 set_fs(KERNEL_DS);
88264- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88265- uru ? (struct rusage __user *)&ru : NULL);
88266+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88267+ uru ? (struct rusage __force_user *)&ru : NULL);
88268 set_fs(old_fs);
88269
88270 if ((ret < 0) || (info.si_signo == 0))
88271@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88272 oldfs = get_fs();
88273 set_fs(KERNEL_DS);
88274 err = sys_timer_settime(timer_id, flags,
88275- (struct itimerspec __user *) &newts,
88276- (struct itimerspec __user *) &oldts);
88277+ (struct itimerspec __force_user *) &newts,
88278+ (struct itimerspec __force_user *) &oldts);
88279 set_fs(oldfs);
88280 if (!err && old && put_compat_itimerspec(old, &oldts))
88281 return -EFAULT;
88282@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88283 oldfs = get_fs();
88284 set_fs(KERNEL_DS);
88285 err = sys_timer_gettime(timer_id,
88286- (struct itimerspec __user *) &ts);
88287+ (struct itimerspec __force_user *) &ts);
88288 set_fs(oldfs);
88289 if (!err && put_compat_itimerspec(setting, &ts))
88290 return -EFAULT;
88291@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88292 oldfs = get_fs();
88293 set_fs(KERNEL_DS);
88294 err = sys_clock_settime(which_clock,
88295- (struct timespec __user *) &ts);
88296+ (struct timespec __force_user *) &ts);
88297 set_fs(oldfs);
88298 return err;
88299 }
88300@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88301 oldfs = get_fs();
88302 set_fs(KERNEL_DS);
88303 err = sys_clock_gettime(which_clock,
88304- (struct timespec __user *) &ts);
88305+ (struct timespec __force_user *) &ts);
88306 set_fs(oldfs);
88307 if (!err && compat_put_timespec(&ts, tp))
88308 return -EFAULT;
88309@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88310
88311 oldfs = get_fs();
88312 set_fs(KERNEL_DS);
88313- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
88314+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
88315 set_fs(oldfs);
88316
88317 err = compat_put_timex(utp, &txc);
88318@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
88319 oldfs = get_fs();
88320 set_fs(KERNEL_DS);
88321 err = sys_clock_getres(which_clock,
88322- (struct timespec __user *) &ts);
88323+ (struct timespec __force_user *) &ts);
88324 set_fs(oldfs);
88325 if (!err && tp && compat_put_timespec(&ts, tp))
88326 return -EFAULT;
88327@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
88328 struct timespec tu;
88329 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
88330
88331- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
88332+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
88333 oldfs = get_fs();
88334 set_fs(KERNEL_DS);
88335 err = clock_nanosleep_restart(restart);
88336@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
88337 oldfs = get_fs();
88338 set_fs(KERNEL_DS);
88339 err = sys_clock_nanosleep(which_clock, flags,
88340- (struct timespec __user *) &in,
88341- (struct timespec __user *) &out);
88342+ (struct timespec __force_user *) &in,
88343+ (struct timespec __force_user *) &out);
88344 set_fs(oldfs);
88345
88346 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
88347@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
88348 mm_segment_t old_fs = get_fs();
88349
88350 set_fs(KERNEL_DS);
88351- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
88352+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
88353 set_fs(old_fs);
88354 if (compat_put_timespec(&t, interval))
88355 return -EFAULT;
88356diff --git a/kernel/configs.c b/kernel/configs.c
88357index c18b1f1..b9a0132 100644
88358--- a/kernel/configs.c
88359+++ b/kernel/configs.c
88360@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
88361 struct proc_dir_entry *entry;
88362
88363 /* create the current config file */
88364+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
88365+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
88366+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
88367+ &ikconfig_file_ops);
88368+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88369+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
88370+ &ikconfig_file_ops);
88371+#endif
88372+#else
88373 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
88374 &ikconfig_file_ops);
88375+#endif
88376+
88377 if (!entry)
88378 return -ENOMEM;
88379
88380diff --git a/kernel/cred.c b/kernel/cred.c
88381index e0573a4..26c0fd3 100644
88382--- a/kernel/cred.c
88383+++ b/kernel/cred.c
88384@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
88385 validate_creds(cred);
88386 alter_cred_subscribers(cred, -1);
88387 put_cred(cred);
88388+
88389+#ifdef CONFIG_GRKERNSEC_SETXID
88390+ cred = (struct cred *) tsk->delayed_cred;
88391+ if (cred != NULL) {
88392+ tsk->delayed_cred = NULL;
88393+ validate_creds(cred);
88394+ alter_cred_subscribers(cred, -1);
88395+ put_cred(cred);
88396+ }
88397+#endif
88398 }
88399
88400 /**
88401@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
88402 * Always returns 0 thus allowing this function to be tail-called at the end
88403 * of, say, sys_setgid().
88404 */
88405-int commit_creds(struct cred *new)
88406+static int __commit_creds(struct cred *new)
88407 {
88408 struct task_struct *task = current;
88409 const struct cred *old = task->real_cred;
88410@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
88411
88412 get_cred(new); /* we will require a ref for the subj creds too */
88413
88414+ gr_set_role_label(task, new->uid, new->gid);
88415+
88416 /* dumpability changes */
88417 if (!uid_eq(old->euid, new->euid) ||
88418 !gid_eq(old->egid, new->egid) ||
88419@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
88420 put_cred(old);
88421 return 0;
88422 }
88423+#ifdef CONFIG_GRKERNSEC_SETXID
88424+extern int set_user(struct cred *new);
88425+
88426+void gr_delayed_cred_worker(void)
88427+{
88428+ const struct cred *new = current->delayed_cred;
88429+ struct cred *ncred;
88430+
88431+ current->delayed_cred = NULL;
88432+
88433+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
88434+ // from doing get_cred on it when queueing this
88435+ put_cred(new);
88436+ return;
88437+ } else if (new == NULL)
88438+ return;
88439+
88440+ ncred = prepare_creds();
88441+ if (!ncred)
88442+ goto die;
88443+ // uids
88444+ ncred->uid = new->uid;
88445+ ncred->euid = new->euid;
88446+ ncred->suid = new->suid;
88447+ ncred->fsuid = new->fsuid;
88448+ // gids
88449+ ncred->gid = new->gid;
88450+ ncred->egid = new->egid;
88451+ ncred->sgid = new->sgid;
88452+ ncred->fsgid = new->fsgid;
88453+ // groups
88454+ set_groups(ncred, new->group_info);
88455+ // caps
88456+ ncred->securebits = new->securebits;
88457+ ncred->cap_inheritable = new->cap_inheritable;
88458+ ncred->cap_permitted = new->cap_permitted;
88459+ ncred->cap_effective = new->cap_effective;
88460+ ncred->cap_bset = new->cap_bset;
88461+
88462+ if (set_user(ncred)) {
88463+ abort_creds(ncred);
88464+ goto die;
88465+ }
88466+
88467+ // from doing get_cred on it when queueing this
88468+ put_cred(new);
88469+
88470+ __commit_creds(ncred);
88471+ return;
88472+die:
88473+ // from doing get_cred on it when queueing this
88474+ put_cred(new);
88475+ do_group_exit(SIGKILL);
88476+}
88477+#endif
88478+
88479+int commit_creds(struct cred *new)
88480+{
88481+#ifdef CONFIG_GRKERNSEC_SETXID
88482+ int ret;
88483+ int schedule_it = 0;
88484+ struct task_struct *t;
88485+ unsigned oldsecurebits = current_cred()->securebits;
88486+
88487+ /* we won't get called with tasklist_lock held for writing
88488+ and interrupts disabled as the cred struct in that case is
88489+ init_cred
88490+ */
88491+ if (grsec_enable_setxid && !current_is_single_threaded() &&
88492+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
88493+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
88494+ schedule_it = 1;
88495+ }
88496+ ret = __commit_creds(new);
88497+ if (schedule_it) {
88498+ rcu_read_lock();
88499+ read_lock(&tasklist_lock);
88500+ for (t = next_thread(current); t != current;
88501+ t = next_thread(t)) {
88502+ /* we'll check if the thread has uid 0 in
88503+ * the delayed worker routine
88504+ */
88505+ if (task_securebits(t) == oldsecurebits &&
88506+ t->delayed_cred == NULL) {
88507+ t->delayed_cred = get_cred(new);
88508+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
88509+ set_tsk_need_resched(t);
88510+ }
88511+ }
88512+ read_unlock(&tasklist_lock);
88513+ rcu_read_unlock();
88514+ }
88515+
88516+ return ret;
88517+#else
88518+ return __commit_creds(new);
88519+#endif
88520+}
88521+
88522 EXPORT_SYMBOL(commit_creds);
88523
88524 /**
88525diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
88526index ac5c0f9..4b1c6c2 100644
88527--- a/kernel/debug/debug_core.c
88528+++ b/kernel/debug/debug_core.c
88529@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
88530 */
88531 static atomic_t masters_in_kgdb;
88532 static atomic_t slaves_in_kgdb;
88533-static atomic_t kgdb_break_tasklet_var;
88534+static atomic_unchecked_t kgdb_break_tasklet_var;
88535 atomic_t kgdb_setting_breakpoint;
88536
88537 struct task_struct *kgdb_usethread;
88538@@ -137,7 +137,7 @@ int kgdb_single_step;
88539 static pid_t kgdb_sstep_pid;
88540
88541 /* to keep track of the CPU which is doing the single stepping*/
88542-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88543+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88544
88545 /*
88546 * If you are debugging a problem where roundup (the collection of
88547@@ -552,7 +552,7 @@ return_normal:
88548 * kernel will only try for the value of sstep_tries before
88549 * giving up and continuing on.
88550 */
88551- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
88552+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
88553 (kgdb_info[cpu].task &&
88554 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
88555 atomic_set(&kgdb_active, -1);
88556@@ -654,8 +654,8 @@ cpu_master_loop:
88557 }
88558
88559 kgdb_restore:
88560- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
88561- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
88562+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
88563+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
88564 if (kgdb_info[sstep_cpu].task)
88565 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
88566 else
88567@@ -932,18 +932,18 @@ static void kgdb_unregister_callbacks(void)
88568 static void kgdb_tasklet_bpt(unsigned long ing)
88569 {
88570 kgdb_breakpoint();
88571- atomic_set(&kgdb_break_tasklet_var, 0);
88572+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
88573 }
88574
88575 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
88576
88577 void kgdb_schedule_breakpoint(void)
88578 {
88579- if (atomic_read(&kgdb_break_tasklet_var) ||
88580+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
88581 atomic_read(&kgdb_active) != -1 ||
88582 atomic_read(&kgdb_setting_breakpoint))
88583 return;
88584- atomic_inc(&kgdb_break_tasklet_var);
88585+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
88586 tasklet_schedule(&kgdb_tasklet_breakpoint);
88587 }
88588 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
88589diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
88590index 60f6bb8..104bb07 100644
88591--- a/kernel/debug/kdb/kdb_main.c
88592+++ b/kernel/debug/kdb/kdb_main.c
88593@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
88594 continue;
88595
88596 kdb_printf("%-20s%8u 0x%p ", mod->name,
88597- mod->core_size, (void *)mod);
88598+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
88599 #ifdef CONFIG_MODULE_UNLOAD
88600 kdb_printf("%4d ", module_refcount(mod));
88601 #endif
88602@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
88603 kdb_printf(" (Loading)");
88604 else
88605 kdb_printf(" (Live)");
88606- kdb_printf(" 0x%p", mod->module_core);
88607+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
88608
88609 #ifdef CONFIG_MODULE_UNLOAD
88610 {
88611diff --git a/kernel/events/core.c b/kernel/events/core.c
88612index 19efcf133..7c05c93 100644
88613--- a/kernel/events/core.c
88614+++ b/kernel/events/core.c
88615@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
88616 * 0 - disallow raw tracepoint access for unpriv
88617 * 1 - disallow cpu events for unpriv
88618 * 2 - disallow kernel profiling for unpriv
88619+ * 3 - disallow all unpriv perf event use
88620 */
88621-int sysctl_perf_event_paranoid __read_mostly = 1;
88622+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88623+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
88624+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
88625+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
88626+#else
88627+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
88628+#endif
88629
88630 /* Minimum for 512 kiB + 1 user control page */
88631 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
88632@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
88633
88634 tmp *= sysctl_perf_cpu_time_max_percent;
88635 do_div(tmp, 100);
88636- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
88637+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
88638 }
88639
88640 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
88641@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
88642 }
88643 }
88644
88645-static atomic64_t perf_event_id;
88646+static atomic64_unchecked_t perf_event_id;
88647
88648 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
88649 enum event_type_t event_type);
88650@@ -3102,7 +3109,7 @@ static void __perf_event_read(void *info)
88651
88652 static inline u64 perf_event_count(struct perf_event *event)
88653 {
88654- return local64_read(&event->count) + atomic64_read(&event->child_count);
88655+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
88656 }
88657
88658 static u64 perf_event_read(struct perf_event *event)
88659@@ -3528,9 +3535,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
88660 mutex_lock(&event->child_mutex);
88661 total += perf_event_read(event);
88662 *enabled += event->total_time_enabled +
88663- atomic64_read(&event->child_total_time_enabled);
88664+ atomic64_read_unchecked(&event->child_total_time_enabled);
88665 *running += event->total_time_running +
88666- atomic64_read(&event->child_total_time_running);
88667+ atomic64_read_unchecked(&event->child_total_time_running);
88668
88669 list_for_each_entry(child, &event->child_list, child_list) {
88670 total += perf_event_read(child);
88671@@ -3994,10 +4001,10 @@ void perf_event_update_userpage(struct perf_event *event)
88672 userpg->offset -= local64_read(&event->hw.prev_count);
88673
88674 userpg->time_enabled = enabled +
88675- atomic64_read(&event->child_total_time_enabled);
88676+ atomic64_read_unchecked(&event->child_total_time_enabled);
88677
88678 userpg->time_running = running +
88679- atomic64_read(&event->child_total_time_running);
88680+ atomic64_read_unchecked(&event->child_total_time_running);
88681
88682 arch_perf_update_userpage(userpg, now);
88683
88684@@ -4568,7 +4575,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
88685
88686 /* Data. */
88687 sp = perf_user_stack_pointer(regs);
88688- rem = __output_copy_user(handle, (void *) sp, dump_size);
88689+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
88690 dyn_size = dump_size - rem;
88691
88692 perf_output_skip(handle, rem);
88693@@ -4659,11 +4666,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
88694 values[n++] = perf_event_count(event);
88695 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
88696 values[n++] = enabled +
88697- atomic64_read(&event->child_total_time_enabled);
88698+ atomic64_read_unchecked(&event->child_total_time_enabled);
88699 }
88700 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
88701 values[n++] = running +
88702- atomic64_read(&event->child_total_time_running);
88703+ atomic64_read_unchecked(&event->child_total_time_running);
88704 }
88705 if (read_format & PERF_FORMAT_ID)
88706 values[n++] = primary_event_id(event);
88707@@ -6994,7 +7001,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
88708 event->parent = parent_event;
88709
88710 event->ns = get_pid_ns(task_active_pid_ns(current));
88711- event->id = atomic64_inc_return(&perf_event_id);
88712+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
88713
88714 event->state = PERF_EVENT_STATE_INACTIVE;
88715
88716@@ -7275,6 +7282,11 @@ SYSCALL_DEFINE5(perf_event_open,
88717 if (flags & ~PERF_FLAG_ALL)
88718 return -EINVAL;
88719
88720+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88721+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
88722+ return -EACCES;
88723+#endif
88724+
88725 err = perf_copy_attr(attr_uptr, &attr);
88726 if (err)
88727 return err;
88728@@ -7642,10 +7654,10 @@ static void sync_child_event(struct perf_event *child_event,
88729 /*
88730 * Add back the child's count to the parent's count:
88731 */
88732- atomic64_add(child_val, &parent_event->child_count);
88733- atomic64_add(child_event->total_time_enabled,
88734+ atomic64_add_unchecked(child_val, &parent_event->child_count);
88735+ atomic64_add_unchecked(child_event->total_time_enabled,
88736 &parent_event->child_total_time_enabled);
88737- atomic64_add(child_event->total_time_running,
88738+ atomic64_add_unchecked(child_event->total_time_running,
88739 &parent_event->child_total_time_running);
88740
88741 /*
88742diff --git a/kernel/events/internal.h b/kernel/events/internal.h
88743index 569b2187..19940d9 100644
88744--- a/kernel/events/internal.h
88745+++ b/kernel/events/internal.h
88746@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
88747 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
88748 }
88749
88750-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
88751+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
88752 static inline unsigned long \
88753 func_name(struct perf_output_handle *handle, \
88754- const void *buf, unsigned long len) \
88755+ const void user *buf, unsigned long len) \
88756 { \
88757 unsigned long size, written; \
88758 \
88759@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
88760 return 0;
88761 }
88762
88763-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
88764+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
88765
88766 static inline unsigned long
88767 memcpy_skip(void *dst, const void *src, unsigned long n)
88768@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
88769 return 0;
88770 }
88771
88772-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
88773+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
88774
88775 #ifndef arch_perf_out_copy_user
88776 #define arch_perf_out_copy_user arch_perf_out_copy_user
88777@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
88778 }
88779 #endif
88780
88781-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
88782+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
88783
88784 /* Callchain handling */
88785 extern struct perf_callchain_entry *
88786diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
88787index cb346f2..e4dc317 100644
88788--- a/kernel/events/uprobes.c
88789+++ b/kernel/events/uprobes.c
88790@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
88791 {
88792 struct page *page;
88793 uprobe_opcode_t opcode;
88794- int result;
88795+ long result;
88796
88797 pagefault_disable();
88798 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
88799diff --git a/kernel/exit.c b/kernel/exit.c
88800index 6806c55..a5fb128 100644
88801--- a/kernel/exit.c
88802+++ b/kernel/exit.c
88803@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
88804 struct task_struct *leader;
88805 int zap_leader;
88806 repeat:
88807+#ifdef CONFIG_NET
88808+ gr_del_task_from_ip_table(p);
88809+#endif
88810+
88811 /* don't need to get the RCU readlock here - the process is dead and
88812 * can't be modifying its own credentials. But shut RCU-lockdep up */
88813 rcu_read_lock();
88814@@ -655,6 +659,8 @@ void do_exit(long code)
88815 int group_dead;
88816 TASKS_RCU(int tasks_rcu_i);
88817
88818+ set_fs(USER_DS);
88819+
88820 profile_task_exit(tsk);
88821
88822 WARN_ON(blk_needs_flush_plug(tsk));
88823@@ -671,7 +677,6 @@ void do_exit(long code)
88824 * mm_release()->clear_child_tid() from writing to a user-controlled
88825 * kernel address.
88826 */
88827- set_fs(USER_DS);
88828
88829 ptrace_event(PTRACE_EVENT_EXIT, code);
88830
88831@@ -729,6 +734,9 @@ void do_exit(long code)
88832 tsk->exit_code = code;
88833 taskstats_exit(tsk, group_dead);
88834
88835+ gr_acl_handle_psacct(tsk, code);
88836+ gr_acl_handle_exit();
88837+
88838 exit_mm(tsk);
88839
88840 if (group_dead)
88841@@ -848,7 +856,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
88842 * Take down every thread in the group. This is called by fatal signals
88843 * as well as by sys_exit_group (below).
88844 */
88845-void
88846+__noreturn void
88847 do_group_exit(int exit_code)
88848 {
88849 struct signal_struct *sig = current->signal;
88850diff --git a/kernel/fork.c b/kernel/fork.c
88851index 4dc2dda..651add0 100644
88852--- a/kernel/fork.c
88853+++ b/kernel/fork.c
88854@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
88855 void thread_info_cache_init(void)
88856 {
88857 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
88858- THREAD_SIZE, 0, NULL);
88859+ THREAD_SIZE, SLAB_USERCOPY, NULL);
88860 BUG_ON(thread_info_cache == NULL);
88861 }
88862 # endif
88863 #endif
88864
88865+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88866+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
88867+ int node, void **lowmem_stack)
88868+{
88869+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
88870+ void *ret = NULL;
88871+ unsigned int i;
88872+
88873+ *lowmem_stack = alloc_thread_info_node(tsk, node);
88874+ if (*lowmem_stack == NULL)
88875+ goto out;
88876+
88877+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
88878+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
88879+
88880+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
88881+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
88882+ if (ret == NULL) {
88883+ free_thread_info(*lowmem_stack);
88884+ *lowmem_stack = NULL;
88885+ }
88886+
88887+out:
88888+ return ret;
88889+}
88890+
88891+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
88892+{
88893+ unmap_process_stacks(tsk);
88894+}
88895+#else
88896+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
88897+ int node, void **lowmem_stack)
88898+{
88899+ return alloc_thread_info_node(tsk, node);
88900+}
88901+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
88902+{
88903+ free_thread_info(ti);
88904+}
88905+#endif
88906+
88907 /* SLAB cache for signal_struct structures (tsk->signal) */
88908 static struct kmem_cache *signal_cachep;
88909
88910@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
88911 /* SLAB cache for mm_struct structures (tsk->mm) */
88912 static struct kmem_cache *mm_cachep;
88913
88914-static void account_kernel_stack(struct thread_info *ti, int account)
88915+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
88916 {
88917+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88918+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
88919+#else
88920 struct zone *zone = page_zone(virt_to_page(ti));
88921+#endif
88922
88923 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
88924 }
88925
88926 void free_task(struct task_struct *tsk)
88927 {
88928- account_kernel_stack(tsk->stack, -1);
88929+ account_kernel_stack(tsk, tsk->stack, -1);
88930 arch_release_thread_info(tsk->stack);
88931- free_thread_info(tsk->stack);
88932+ gr_free_thread_info(tsk, tsk->stack);
88933 rt_mutex_debug_task_free(tsk);
88934 ftrace_graph_exit_task(tsk);
88935 put_seccomp_filter(tsk);
88936@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88937 {
88938 struct task_struct *tsk;
88939 struct thread_info *ti;
88940+ void *lowmem_stack;
88941 int node = tsk_fork_get_node(orig);
88942 int err;
88943
88944@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88945 if (!tsk)
88946 return NULL;
88947
88948- ti = alloc_thread_info_node(tsk, node);
88949+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
88950 if (!ti)
88951 goto free_tsk;
88952
88953@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88954 goto free_ti;
88955
88956 tsk->stack = ti;
88957+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88958+ tsk->lowmem_stack = lowmem_stack;
88959+#endif
88960 #ifdef CONFIG_SECCOMP
88961 /*
88962 * We must handle setting up seccomp filters once we're under
88963@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88964 set_task_stack_end_magic(tsk);
88965
88966 #ifdef CONFIG_CC_STACKPROTECTOR
88967- tsk->stack_canary = get_random_int();
88968+ tsk->stack_canary = pax_get_random_long();
88969 #endif
88970
88971 /*
88972@@ -352,24 +402,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88973 tsk->splice_pipe = NULL;
88974 tsk->task_frag.page = NULL;
88975
88976- account_kernel_stack(ti, 1);
88977+ account_kernel_stack(tsk, ti, 1);
88978
88979 return tsk;
88980
88981 free_ti:
88982- free_thread_info(ti);
88983+ gr_free_thread_info(tsk, ti);
88984 free_tsk:
88985 free_task_struct(tsk);
88986 return NULL;
88987 }
88988
88989 #ifdef CONFIG_MMU
88990-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
88991+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
88992+{
88993+ struct vm_area_struct *tmp;
88994+ unsigned long charge;
88995+ struct file *file;
88996+ int retval;
88997+
88998+ charge = 0;
88999+ if (mpnt->vm_flags & VM_ACCOUNT) {
89000+ unsigned long len = vma_pages(mpnt);
89001+
89002+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89003+ goto fail_nomem;
89004+ charge = len;
89005+ }
89006+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89007+ if (!tmp)
89008+ goto fail_nomem;
89009+ *tmp = *mpnt;
89010+ tmp->vm_mm = mm;
89011+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89012+ retval = vma_dup_policy(mpnt, tmp);
89013+ if (retval)
89014+ goto fail_nomem_policy;
89015+ if (anon_vma_fork(tmp, mpnt))
89016+ goto fail_nomem_anon_vma_fork;
89017+ tmp->vm_flags &= ~VM_LOCKED;
89018+ tmp->vm_next = tmp->vm_prev = NULL;
89019+ tmp->vm_mirror = NULL;
89020+ file = tmp->vm_file;
89021+ if (file) {
89022+ struct inode *inode = file_inode(file);
89023+ struct address_space *mapping = file->f_mapping;
89024+
89025+ get_file(file);
89026+ if (tmp->vm_flags & VM_DENYWRITE)
89027+ atomic_dec(&inode->i_writecount);
89028+ i_mmap_lock_write(mapping);
89029+ if (tmp->vm_flags & VM_SHARED)
89030+ atomic_inc(&mapping->i_mmap_writable);
89031+ flush_dcache_mmap_lock(mapping);
89032+ /* insert tmp into the share list, just after mpnt */
89033+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89034+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
89035+ else
89036+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89037+ flush_dcache_mmap_unlock(mapping);
89038+ i_mmap_unlock_write(mapping);
89039+ }
89040+
89041+ /*
89042+ * Clear hugetlb-related page reserves for children. This only
89043+ * affects MAP_PRIVATE mappings. Faults generated by the child
89044+ * are not guaranteed to succeed, even if read-only
89045+ */
89046+ if (is_vm_hugetlb_page(tmp))
89047+ reset_vma_resv_huge_pages(tmp);
89048+
89049+ return tmp;
89050+
89051+fail_nomem_anon_vma_fork:
89052+ mpol_put(vma_policy(tmp));
89053+fail_nomem_policy:
89054+ kmem_cache_free(vm_area_cachep, tmp);
89055+fail_nomem:
89056+ vm_unacct_memory(charge);
89057+ return NULL;
89058+}
89059+
89060+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89061 {
89062 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89063 struct rb_node **rb_link, *rb_parent;
89064 int retval;
89065- unsigned long charge;
89066
89067 uprobe_start_dup_mmap();
89068 down_write(&oldmm->mmap_sem);
89069@@ -397,55 +515,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89070
89071 prev = NULL;
89072 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89073- struct file *file;
89074-
89075 if (mpnt->vm_flags & VM_DONTCOPY) {
89076 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89077 -vma_pages(mpnt));
89078 continue;
89079 }
89080- charge = 0;
89081- if (mpnt->vm_flags & VM_ACCOUNT) {
89082- unsigned long len = vma_pages(mpnt);
89083-
89084- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89085- goto fail_nomem;
89086- charge = len;
89087- }
89088- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89089- if (!tmp)
89090- goto fail_nomem;
89091- *tmp = *mpnt;
89092- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89093- retval = vma_dup_policy(mpnt, tmp);
89094- if (retval)
89095- goto fail_nomem_policy;
89096- tmp->vm_mm = mm;
89097- if (anon_vma_fork(tmp, mpnt))
89098- goto fail_nomem_anon_vma_fork;
89099- tmp->vm_flags &= ~VM_LOCKED;
89100- tmp->vm_next = tmp->vm_prev = NULL;
89101- file = tmp->vm_file;
89102- if (file) {
89103- struct inode *inode = file_inode(file);
89104- struct address_space *mapping = file->f_mapping;
89105-
89106- get_file(file);
89107- if (tmp->vm_flags & VM_DENYWRITE)
89108- atomic_dec(&inode->i_writecount);
89109- i_mmap_lock_write(mapping);
89110- if (tmp->vm_flags & VM_SHARED)
89111- atomic_inc(&mapping->i_mmap_writable);
89112- flush_dcache_mmap_lock(mapping);
89113- /* insert tmp into the share list, just after mpnt */
89114- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89115- vma_nonlinear_insert(tmp,
89116- &mapping->i_mmap_nonlinear);
89117- else
89118- vma_interval_tree_insert_after(tmp, mpnt,
89119- &mapping->i_mmap);
89120- flush_dcache_mmap_unlock(mapping);
89121- i_mmap_unlock_write(mapping);
89122+ tmp = dup_vma(mm, oldmm, mpnt);
89123+ if (!tmp) {
89124+ retval = -ENOMEM;
89125+ goto out;
89126 }
89127
89128 /*
89129@@ -477,6 +555,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89130 if (retval)
89131 goto out;
89132 }
89133+
89134+#ifdef CONFIG_PAX_SEGMEXEC
89135+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89136+ struct vm_area_struct *mpnt_m;
89137+
89138+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89139+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89140+
89141+ if (!mpnt->vm_mirror)
89142+ continue;
89143+
89144+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89145+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89146+ mpnt->vm_mirror = mpnt_m;
89147+ } else {
89148+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89149+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89150+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89151+ mpnt->vm_mirror->vm_mirror = mpnt;
89152+ }
89153+ }
89154+ BUG_ON(mpnt_m);
89155+ }
89156+#endif
89157+
89158 /* a new mm has just been created */
89159 arch_dup_mmap(oldmm, mm);
89160 retval = 0;
89161@@ -486,14 +589,6 @@ out:
89162 up_write(&oldmm->mmap_sem);
89163 uprobe_end_dup_mmap();
89164 return retval;
89165-fail_nomem_anon_vma_fork:
89166- mpol_put(vma_policy(tmp));
89167-fail_nomem_policy:
89168- kmem_cache_free(vm_area_cachep, tmp);
89169-fail_nomem:
89170- retval = -ENOMEM;
89171- vm_unacct_memory(charge);
89172- goto out;
89173 }
89174
89175 static inline int mm_alloc_pgd(struct mm_struct *mm)
89176@@ -734,8 +829,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89177 return ERR_PTR(err);
89178
89179 mm = get_task_mm(task);
89180- if (mm && mm != current->mm &&
89181- !ptrace_may_access(task, mode)) {
89182+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89183+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89184 mmput(mm);
89185 mm = ERR_PTR(-EACCES);
89186 }
89187@@ -938,13 +1033,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89188 spin_unlock(&fs->lock);
89189 return -EAGAIN;
89190 }
89191- fs->users++;
89192+ atomic_inc(&fs->users);
89193 spin_unlock(&fs->lock);
89194 return 0;
89195 }
89196 tsk->fs = copy_fs_struct(fs);
89197 if (!tsk->fs)
89198 return -ENOMEM;
89199+ /* Carry through gr_chroot_dentry and is_chrooted instead
89200+ of recomputing it here. Already copied when the task struct
89201+ is duplicated. This allows pivot_root to not be treated as
89202+ a chroot
89203+ */
89204+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89205+
89206 return 0;
89207 }
89208
89209@@ -1182,7 +1284,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89210 * parts of the process environment (as per the clone
89211 * flags). The actual kick-off is left to the caller.
89212 */
89213-static struct task_struct *copy_process(unsigned long clone_flags,
89214+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89215 unsigned long stack_start,
89216 unsigned long stack_size,
89217 int __user *child_tidptr,
89218@@ -1253,6 +1355,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89219 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89220 #endif
89221 retval = -EAGAIN;
89222+
89223+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89224+
89225 if (atomic_read(&p->real_cred->user->processes) >=
89226 task_rlimit(p, RLIMIT_NPROC)) {
89227 if (p->real_cred->user != INIT_USER &&
89228@@ -1502,6 +1607,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89229 goto bad_fork_free_pid;
89230 }
89231
89232+ /* synchronizes with gr_set_acls()
89233+ we need to call this past the point of no return for fork()
89234+ */
89235+ gr_copy_label(p);
89236+
89237 if (likely(p->pid)) {
89238 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89239
89240@@ -1592,6 +1702,8 @@ bad_fork_cleanup_count:
89241 bad_fork_free:
89242 free_task(p);
89243 fork_out:
89244+ gr_log_forkfail(retval);
89245+
89246 return ERR_PTR(retval);
89247 }
89248
89249@@ -1653,6 +1765,7 @@ long do_fork(unsigned long clone_flags,
89250
89251 p = copy_process(clone_flags, stack_start, stack_size,
89252 child_tidptr, NULL, trace);
89253+ add_latent_entropy();
89254 /*
89255 * Do this prior waking up the new thread - the thread pointer
89256 * might get invalid after that point, if the thread exits quickly.
89257@@ -1669,6 +1782,8 @@ long do_fork(unsigned long clone_flags,
89258 if (clone_flags & CLONE_PARENT_SETTID)
89259 put_user(nr, parent_tidptr);
89260
89261+ gr_handle_brute_check();
89262+
89263 if (clone_flags & CLONE_VFORK) {
89264 p->vfork_done = &vfork;
89265 init_completion(&vfork);
89266@@ -1787,7 +1902,7 @@ void __init proc_caches_init(void)
89267 mm_cachep = kmem_cache_create("mm_struct",
89268 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89269 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89270- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89271+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89272 mmap_init();
89273 nsproxy_cache_init();
89274 }
89275@@ -1827,7 +1942,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89276 return 0;
89277
89278 /* don't need lock here; in the worst case we'll do useless copy */
89279- if (fs->users == 1)
89280+ if (atomic_read(&fs->users) == 1)
89281 return 0;
89282
89283 *new_fsp = copy_fs_struct(fs);
89284@@ -1939,7 +2054,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89285 fs = current->fs;
89286 spin_lock(&fs->lock);
89287 current->fs = new_fs;
89288- if (--fs->users)
89289+ gr_set_chroot_entries(current, &current->fs->root);
89290+ if (atomic_dec_return(&fs->users))
89291 new_fs = NULL;
89292 else
89293 new_fs = fs;
89294diff --git a/kernel/futex.c b/kernel/futex.c
89295index 63678b5..512f9af 100644
89296--- a/kernel/futex.c
89297+++ b/kernel/futex.c
89298@@ -201,7 +201,7 @@ struct futex_pi_state {
89299 atomic_t refcount;
89300
89301 union futex_key key;
89302-};
89303+} __randomize_layout;
89304
89305 /**
89306 * struct futex_q - The hashed futex queue entry, one per waiting task
89307@@ -235,7 +235,7 @@ struct futex_q {
89308 struct rt_mutex_waiter *rt_waiter;
89309 union futex_key *requeue_pi_key;
89310 u32 bitset;
89311-};
89312+} __randomize_layout;
89313
89314 static const struct futex_q futex_q_init = {
89315 /* list gets initialized in queue_me()*/
89316@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89317 struct page *page, *page_head;
89318 int err, ro = 0;
89319
89320+#ifdef CONFIG_PAX_SEGMEXEC
89321+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
89322+ return -EFAULT;
89323+#endif
89324+
89325 /*
89326 * The futex address must be "naturally" aligned.
89327 */
89328@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
89329
89330 static int get_futex_value_locked(u32 *dest, u32 __user *from)
89331 {
89332- int ret;
89333+ unsigned long ret;
89334
89335 pagefault_disable();
89336 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
89337@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
89338 {
89339 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
89340 u32 curval;
89341+ mm_segment_t oldfs;
89342
89343 /*
89344 * This will fail and we want it. Some arch implementations do
89345@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
89346 * implementation, the non-functional ones will return
89347 * -ENOSYS.
89348 */
89349+ oldfs = get_fs();
89350+ set_fs(USER_DS);
89351 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
89352 futex_cmpxchg_enabled = 1;
89353+ set_fs(oldfs);
89354 #endif
89355 }
89356
89357diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
89358index 55c8c93..9ba7ad6 100644
89359--- a/kernel/futex_compat.c
89360+++ b/kernel/futex_compat.c
89361@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
89362 return 0;
89363 }
89364
89365-static void __user *futex_uaddr(struct robust_list __user *entry,
89366+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
89367 compat_long_t futex_offset)
89368 {
89369 compat_uptr_t base = ptr_to_compat(entry);
89370diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
89371index b358a80..fc25240 100644
89372--- a/kernel/gcov/base.c
89373+++ b/kernel/gcov/base.c
89374@@ -114,11 +114,6 @@ void gcov_enable_events(void)
89375 }
89376
89377 #ifdef CONFIG_MODULES
89378-static inline int within(void *addr, void *start, unsigned long size)
89379-{
89380- return ((addr >= start) && (addr < start + size));
89381-}
89382-
89383 /* Update list and generate events when modules are unloaded. */
89384 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89385 void *data)
89386@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89387
89388 /* Remove entries located in module from linked list. */
89389 while ((info = gcov_info_next(info))) {
89390- if (within(info, mod->module_core, mod->core_size)) {
89391+ if (within_module_core_rw((unsigned long)info, mod)) {
89392 gcov_info_unlink(prev, info);
89393 if (gcov_events_enabled)
89394 gcov_event(GCOV_REMOVE, info);
89395diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
89396index 8069237..fe712d0 100644
89397--- a/kernel/irq/manage.c
89398+++ b/kernel/irq/manage.c
89399@@ -871,7 +871,7 @@ static int irq_thread(void *data)
89400
89401 action_ret = handler_fn(desc, action);
89402 if (action_ret == IRQ_HANDLED)
89403- atomic_inc(&desc->threads_handled);
89404+ atomic_inc_unchecked(&desc->threads_handled);
89405
89406 wake_threads_waitq(desc);
89407 }
89408diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
89409index e2514b0..de3dfe0 100644
89410--- a/kernel/irq/spurious.c
89411+++ b/kernel/irq/spurious.c
89412@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
89413 * count. We just care about the count being
89414 * different than the one we saw before.
89415 */
89416- handled = atomic_read(&desc->threads_handled);
89417+ handled = atomic_read_unchecked(&desc->threads_handled);
89418 handled |= SPURIOUS_DEFERRED;
89419 if (handled != desc->threads_handled_last) {
89420 action_ret = IRQ_HANDLED;
89421diff --git a/kernel/jump_label.c b/kernel/jump_label.c
89422index 9019f15..9a3c42e 100644
89423--- a/kernel/jump_label.c
89424+++ b/kernel/jump_label.c
89425@@ -14,6 +14,7 @@
89426 #include <linux/err.h>
89427 #include <linux/static_key.h>
89428 #include <linux/jump_label_ratelimit.h>
89429+#include <linux/mm.h>
89430
89431 #ifdef HAVE_JUMP_LABEL
89432
89433@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
89434
89435 size = (((unsigned long)stop - (unsigned long)start)
89436 / sizeof(struct jump_entry));
89437+ pax_open_kernel();
89438 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
89439+ pax_close_kernel();
89440 }
89441
89442 static void jump_label_update(struct static_key *key, int enable);
89443@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
89444 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
89445 struct jump_entry *iter;
89446
89447+ pax_open_kernel();
89448 for (iter = iter_start; iter < iter_stop; iter++) {
89449 if (within_module_init(iter->code, mod))
89450 iter->code = 0;
89451 }
89452+ pax_close_kernel();
89453 }
89454
89455 static int
89456diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
89457index 5c5987f..bc502b0 100644
89458--- a/kernel/kallsyms.c
89459+++ b/kernel/kallsyms.c
89460@@ -11,6 +11,9 @@
89461 * Changed the compression method from stem compression to "table lookup"
89462 * compression (see scripts/kallsyms.c for a more complete description)
89463 */
89464+#ifdef CONFIG_GRKERNSEC_HIDESYM
89465+#define __INCLUDED_BY_HIDESYM 1
89466+#endif
89467 #include <linux/kallsyms.h>
89468 #include <linux/module.h>
89469 #include <linux/init.h>
89470@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
89471
89472 static inline int is_kernel_inittext(unsigned long addr)
89473 {
89474+ if (system_state != SYSTEM_BOOTING)
89475+ return 0;
89476+
89477 if (addr >= (unsigned long)_sinittext
89478 && addr <= (unsigned long)_einittext)
89479 return 1;
89480 return 0;
89481 }
89482
89483+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89484+#ifdef CONFIG_MODULES
89485+static inline int is_module_text(unsigned long addr)
89486+{
89487+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
89488+ return 1;
89489+
89490+ addr = ktla_ktva(addr);
89491+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
89492+}
89493+#else
89494+static inline int is_module_text(unsigned long addr)
89495+{
89496+ return 0;
89497+}
89498+#endif
89499+#endif
89500+
89501 static inline int is_kernel_text(unsigned long addr)
89502 {
89503 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
89504@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
89505
89506 static inline int is_kernel(unsigned long addr)
89507 {
89508+
89509+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89510+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
89511+ return 1;
89512+
89513+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
89514+#else
89515 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
89516+#endif
89517+
89518 return 1;
89519 return in_gate_area_no_mm(addr);
89520 }
89521
89522 static int is_ksym_addr(unsigned long addr)
89523 {
89524+
89525+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89526+ if (is_module_text(addr))
89527+ return 0;
89528+#endif
89529+
89530 if (all_var)
89531 return is_kernel(addr);
89532
89533@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
89534
89535 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
89536 {
89537- iter->name[0] = '\0';
89538 iter->nameoff = get_symbol_offset(new_pos);
89539 iter->pos = new_pos;
89540 }
89541@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
89542 {
89543 struct kallsym_iter *iter = m->private;
89544
89545+#ifdef CONFIG_GRKERNSEC_HIDESYM
89546+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
89547+ return 0;
89548+#endif
89549+
89550 /* Some debugging symbols have no name. Ignore them. */
89551 if (!iter->name[0])
89552 return 0;
89553@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
89554 */
89555 type = iter->exported ? toupper(iter->type) :
89556 tolower(iter->type);
89557+
89558 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
89559 type, iter->name, iter->module_name);
89560 } else
89561diff --git a/kernel/kcmp.c b/kernel/kcmp.c
89562index 0aa69ea..a7fcafb 100644
89563--- a/kernel/kcmp.c
89564+++ b/kernel/kcmp.c
89565@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
89566 struct task_struct *task1, *task2;
89567 int ret;
89568
89569+#ifdef CONFIG_GRKERNSEC
89570+ return -ENOSYS;
89571+#endif
89572+
89573 rcu_read_lock();
89574
89575 /*
89576diff --git a/kernel/kexec.c b/kernel/kexec.c
89577index 9a8a01a..3c35dd6 100644
89578--- a/kernel/kexec.c
89579+++ b/kernel/kexec.c
89580@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
89581 compat_ulong_t, flags)
89582 {
89583 struct compat_kexec_segment in;
89584- struct kexec_segment out, __user *ksegments;
89585+ struct kexec_segment out;
89586+ struct kexec_segment __user *ksegments;
89587 unsigned long i, result;
89588
89589 /* Don't allow clients that don't understand the native
89590diff --git a/kernel/kmod.c b/kernel/kmod.c
89591index 2777f40..a26e825 100644
89592--- a/kernel/kmod.c
89593+++ b/kernel/kmod.c
89594@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
89595 kfree(info->argv);
89596 }
89597
89598-static int call_modprobe(char *module_name, int wait)
89599+static int call_modprobe(char *module_name, char *module_param, int wait)
89600 {
89601 struct subprocess_info *info;
89602 static char *envp[] = {
89603@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
89604 NULL
89605 };
89606
89607- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
89608+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
89609 if (!argv)
89610 goto out;
89611
89612@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
89613 argv[1] = "-q";
89614 argv[2] = "--";
89615 argv[3] = module_name; /* check free_modprobe_argv() */
89616- argv[4] = NULL;
89617+ argv[4] = module_param;
89618+ argv[5] = NULL;
89619
89620 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
89621 NULL, free_modprobe_argv, NULL);
89622@@ -122,9 +123,8 @@ out:
89623 * If module auto-loading support is disabled then this function
89624 * becomes a no-operation.
89625 */
89626-int __request_module(bool wait, const char *fmt, ...)
89627+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
89628 {
89629- va_list args;
89630 char module_name[MODULE_NAME_LEN];
89631 unsigned int max_modprobes;
89632 int ret;
89633@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
89634 if (!modprobe_path[0])
89635 return 0;
89636
89637- va_start(args, fmt);
89638- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89639- va_end(args);
89640+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
89641 if (ret >= MODULE_NAME_LEN)
89642 return -ENAMETOOLONG;
89643
89644@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
89645 if (ret)
89646 return ret;
89647
89648+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89649+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89650+ /* hack to workaround consolekit/udisks stupidity */
89651+ read_lock(&tasklist_lock);
89652+ if (!strcmp(current->comm, "mount") &&
89653+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
89654+ read_unlock(&tasklist_lock);
89655+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
89656+ return -EPERM;
89657+ }
89658+ read_unlock(&tasklist_lock);
89659+ }
89660+#endif
89661+
89662 /* If modprobe needs a service that is in a module, we get a recursive
89663 * loop. Limit the number of running kmod threads to max_threads/2 or
89664 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
89665@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
89666
89667 trace_module_request(module_name, wait, _RET_IP_);
89668
89669- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89670+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89671
89672 atomic_dec(&kmod_concurrent);
89673 return ret;
89674 }
89675+
89676+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
89677+{
89678+ va_list args;
89679+ int ret;
89680+
89681+ va_start(args, fmt);
89682+ ret = ____request_module(wait, module_param, fmt, args);
89683+ va_end(args);
89684+
89685+ return ret;
89686+}
89687+
89688+int __request_module(bool wait, const char *fmt, ...)
89689+{
89690+ va_list args;
89691+ int ret;
89692+
89693+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89694+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89695+ char module_param[MODULE_NAME_LEN];
89696+
89697+ memset(module_param, 0, sizeof(module_param));
89698+
89699+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
89700+
89701+ va_start(args, fmt);
89702+ ret = ____request_module(wait, module_param, fmt, args);
89703+ va_end(args);
89704+
89705+ return ret;
89706+ }
89707+#endif
89708+
89709+ va_start(args, fmt);
89710+ ret = ____request_module(wait, NULL, fmt, args);
89711+ va_end(args);
89712+
89713+ return ret;
89714+}
89715+
89716 EXPORT_SYMBOL(__request_module);
89717 #endif /* CONFIG_MODULES */
89718
89719 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
89720 {
89721+#ifdef CONFIG_GRKERNSEC
89722+ kfree(info->path);
89723+ info->path = info->origpath;
89724+#endif
89725 if (info->cleanup)
89726 (*info->cleanup)(info);
89727 kfree(info);
89728@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
89729 */
89730 set_user_nice(current, 0);
89731
89732+#ifdef CONFIG_GRKERNSEC
89733+ /* this is race-free as far as userland is concerned as we copied
89734+ out the path to be used prior to this point and are now operating
89735+ on that copy
89736+ */
89737+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
89738+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
89739+ strncmp(sub_info->path, "/usr/libexec/", 13) &&
89740+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
89741+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
89742+ retval = -EPERM;
89743+ goto out;
89744+ }
89745+#endif
89746+
89747 retval = -ENOMEM;
89748 new = prepare_kernel_cred(current);
89749 if (!new)
89750@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
89751 commit_creds(new);
89752
89753 retval = do_execve(getname_kernel(sub_info->path),
89754- (const char __user *const __user *)sub_info->argv,
89755- (const char __user *const __user *)sub_info->envp);
89756+ (const char __user *const __force_user *)sub_info->argv,
89757+ (const char __user *const __force_user *)sub_info->envp);
89758 out:
89759 sub_info->retval = retval;
89760 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
89761@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
89762 *
89763 * Thus the __user pointer cast is valid here.
89764 */
89765- sys_wait4(pid, (int __user *)&ret, 0, NULL);
89766+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
89767
89768 /*
89769 * If ret is 0, either ____call_usermodehelper failed and the
89770@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
89771 goto out;
89772
89773 INIT_WORK(&sub_info->work, __call_usermodehelper);
89774+#ifdef CONFIG_GRKERNSEC
89775+ sub_info->origpath = path;
89776+ sub_info->path = kstrdup(path, gfp_mask);
89777+#else
89778 sub_info->path = path;
89779+#endif
89780 sub_info->argv = argv;
89781 sub_info->envp = envp;
89782
89783@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
89784 static int proc_cap_handler(struct ctl_table *table, int write,
89785 void __user *buffer, size_t *lenp, loff_t *ppos)
89786 {
89787- struct ctl_table t;
89788+ ctl_table_no_const t;
89789 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
89790 kernel_cap_t new_cap;
89791 int err, i;
89792diff --git a/kernel/kprobes.c b/kernel/kprobes.c
89793index ee61992..62142b1 100644
89794--- a/kernel/kprobes.c
89795+++ b/kernel/kprobes.c
89796@@ -31,6 +31,9 @@
89797 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
89798 * <prasanna@in.ibm.com> added function-return probes.
89799 */
89800+#ifdef CONFIG_GRKERNSEC_HIDESYM
89801+#define __INCLUDED_BY_HIDESYM 1
89802+#endif
89803 #include <linux/kprobes.h>
89804 #include <linux/hash.h>
89805 #include <linux/init.h>
89806@@ -122,12 +125,12 @@ enum kprobe_slot_state {
89807
89808 static void *alloc_insn_page(void)
89809 {
89810- return module_alloc(PAGE_SIZE);
89811+ return module_alloc_exec(PAGE_SIZE);
89812 }
89813
89814 static void free_insn_page(void *page)
89815 {
89816- module_memfree(page);
89817+ module_memfree_exec(page);
89818 }
89819
89820 struct kprobe_insn_cache kprobe_insn_slots = {
89821@@ -2191,11 +2194,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
89822 kprobe_type = "k";
89823
89824 if (sym)
89825- seq_printf(pi, "%p %s %s+0x%x %s ",
89826+ seq_printf(pi, "%pK %s %s+0x%x %s ",
89827 p->addr, kprobe_type, sym, offset,
89828 (modname ? modname : " "));
89829 else
89830- seq_printf(pi, "%p %s %p ",
89831+ seq_printf(pi, "%pK %s %pK ",
89832 p->addr, kprobe_type, p->addr);
89833
89834 if (!pp)
89835diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
89836index 6683cce..daf8999 100644
89837--- a/kernel/ksysfs.c
89838+++ b/kernel/ksysfs.c
89839@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
89840 {
89841 if (count+1 > UEVENT_HELPER_PATH_LEN)
89842 return -ENOENT;
89843+ if (!capable(CAP_SYS_ADMIN))
89844+ return -EPERM;
89845 memcpy(uevent_helper, buf, count);
89846 uevent_helper[count] = '\0';
89847 if (count && uevent_helper[count-1] == '\n')
89848@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
89849 return count;
89850 }
89851
89852-static struct bin_attribute notes_attr = {
89853+static bin_attribute_no_const notes_attr __read_only = {
89854 .attr = {
89855 .name = "notes",
89856 .mode = S_IRUGO,
89857diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
89858index 88d0d44..e9ce0ee 100644
89859--- a/kernel/locking/lockdep.c
89860+++ b/kernel/locking/lockdep.c
89861@@ -599,6 +599,10 @@ static int static_obj(void *obj)
89862 end = (unsigned long) &_end,
89863 addr = (unsigned long) obj;
89864
89865+#ifdef CONFIG_PAX_KERNEXEC
89866+ start = ktla_ktva(start);
89867+#endif
89868+
89869 /*
89870 * static variable?
89871 */
89872@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
89873 if (!static_obj(lock->key)) {
89874 debug_locks_off();
89875 printk("INFO: trying to register non-static key.\n");
89876+ printk("lock:%pS key:%pS.\n", lock, lock->key);
89877 printk("the code is fine but needs lockdep annotation.\n");
89878 printk("turning off the locking correctness validator.\n");
89879 dump_stack();
89880@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
89881 if (!class)
89882 return 0;
89883 }
89884- atomic_inc((atomic_t *)&class->ops);
89885+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
89886 if (very_verbose(class)) {
89887 printk("\nacquire class [%p] %s", class->key, class->name);
89888 if (class->name_version > 1)
89889diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
89890index ef43ac4..2720dfa 100644
89891--- a/kernel/locking/lockdep_proc.c
89892+++ b/kernel/locking/lockdep_proc.c
89893@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
89894 return 0;
89895 }
89896
89897- seq_printf(m, "%p", class->key);
89898+ seq_printf(m, "%pK", class->key);
89899 #ifdef CONFIG_DEBUG_LOCKDEP
89900 seq_printf(m, " OPS:%8ld", class->ops);
89901 #endif
89902@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
89903
89904 list_for_each_entry(entry, &class->locks_after, entry) {
89905 if (entry->distance == 1) {
89906- seq_printf(m, " -> [%p] ", entry->class->key);
89907+ seq_printf(m, " -> [%pK] ", entry->class->key);
89908 print_name(m, entry->class);
89909 seq_puts(m, "\n");
89910 }
89911@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
89912 if (!class->key)
89913 continue;
89914
89915- seq_printf(m, "[%p] ", class->key);
89916+ seq_printf(m, "[%pK] ", class->key);
89917 print_name(m, class);
89918 seq_puts(m, "\n");
89919 }
89920@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
89921 if (!i)
89922 seq_line(m, '-', 40-namelen, namelen);
89923
89924- snprintf(ip, sizeof(ip), "[<%p>]",
89925+ snprintf(ip, sizeof(ip), "[<%pK>]",
89926 (void *)class->contention_point[i]);
89927 seq_printf(m, "%40s %14lu %29s %pS\n",
89928 name, stats->contention_point[i],
89929@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
89930 if (!i)
89931 seq_line(m, '-', 40-namelen, namelen);
89932
89933- snprintf(ip, sizeof(ip), "[<%p>]",
89934+ snprintf(ip, sizeof(ip), "[<%pK>]",
89935 (void *)class->contending_point[i]);
89936 seq_printf(m, "%40s %14lu %29s %pS\n",
89937 name, stats->contending_point[i],
89938diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
89939index 9887a90..0cd2b1d 100644
89940--- a/kernel/locking/mcs_spinlock.c
89941+++ b/kernel/locking/mcs_spinlock.c
89942@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
89943
89944 prev = decode_cpu(old);
89945 node->prev = prev;
89946- ACCESS_ONCE(prev->next) = node;
89947+ ACCESS_ONCE_RW(prev->next) = node;
89948
89949 /*
89950 * Normally @prev is untouchable after the above store; because at that
89951@@ -172,8 +172,8 @@ unqueue:
89952 * it will wait in Step-A.
89953 */
89954
89955- ACCESS_ONCE(next->prev) = prev;
89956- ACCESS_ONCE(prev->next) = next;
89957+ ACCESS_ONCE_RW(next->prev) = prev;
89958+ ACCESS_ONCE_RW(prev->next) = next;
89959
89960 return false;
89961 }
89962@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
89963 node = this_cpu_ptr(&osq_node);
89964 next = xchg(&node->next, NULL);
89965 if (next) {
89966- ACCESS_ONCE(next->locked) = 1;
89967+ ACCESS_ONCE_RW(next->locked) = 1;
89968 return;
89969 }
89970
89971 next = osq_wait_next(lock, node, NULL);
89972 if (next)
89973- ACCESS_ONCE(next->locked) = 1;
89974+ ACCESS_ONCE_RW(next->locked) = 1;
89975 }
89976
89977 #endif
89978diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
89979index 4d60986..5d351c1 100644
89980--- a/kernel/locking/mcs_spinlock.h
89981+++ b/kernel/locking/mcs_spinlock.h
89982@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
89983 */
89984 return;
89985 }
89986- ACCESS_ONCE(prev->next) = node;
89987+ ACCESS_ONCE_RW(prev->next) = node;
89988
89989 /* Wait until the lock holder passes the lock down. */
89990 arch_mcs_spin_lock_contended(&node->locked);
89991diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
89992index 3ef3736..9c951fa 100644
89993--- a/kernel/locking/mutex-debug.c
89994+++ b/kernel/locking/mutex-debug.c
89995@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
89996 }
89997
89998 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
89999- struct thread_info *ti)
90000+ struct task_struct *task)
90001 {
90002 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90003
90004 /* Mark the current thread as blocked on the lock: */
90005- ti->task->blocked_on = waiter;
90006+ task->blocked_on = waiter;
90007 }
90008
90009 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90010- struct thread_info *ti)
90011+ struct task_struct *task)
90012 {
90013 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90014- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90015- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90016- ti->task->blocked_on = NULL;
90017+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90018+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90019+ task->blocked_on = NULL;
90020
90021 list_del_init(&waiter->list);
90022 waiter->task = NULL;
90023diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90024index 0799fd3..d06ae3b 100644
90025--- a/kernel/locking/mutex-debug.h
90026+++ b/kernel/locking/mutex-debug.h
90027@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90028 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90029 extern void debug_mutex_add_waiter(struct mutex *lock,
90030 struct mutex_waiter *waiter,
90031- struct thread_info *ti);
90032+ struct task_struct *task);
90033 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90034- struct thread_info *ti);
90035+ struct task_struct *task);
90036 extern void debug_mutex_unlock(struct mutex *lock);
90037 extern void debug_mutex_init(struct mutex *lock, const char *name,
90038 struct lock_class_key *key);
90039diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90040index 4541951..39fe90a 100644
90041--- a/kernel/locking/mutex.c
90042+++ b/kernel/locking/mutex.c
90043@@ -524,7 +524,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90044 goto skip_wait;
90045
90046 debug_mutex_lock_common(lock, &waiter);
90047- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90048+ debug_mutex_add_waiter(lock, &waiter, task);
90049
90050 /* add waiting tasks to the end of the waitqueue (FIFO): */
90051 list_add_tail(&waiter.list, &lock->wait_list);
90052@@ -569,7 +569,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90053 schedule_preempt_disabled();
90054 spin_lock_mutex(&lock->wait_lock, flags);
90055 }
90056- mutex_remove_waiter(lock, &waiter, current_thread_info());
90057+ mutex_remove_waiter(lock, &waiter, task);
90058 /* set it to 0 if there are no waiters left: */
90059 if (likely(list_empty(&lock->wait_list)))
90060 atomic_set(&lock->count, 0);
90061@@ -606,7 +606,7 @@ skip_wait:
90062 return 0;
90063
90064 err:
90065- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90066+ mutex_remove_waiter(lock, &waiter, task);
90067 spin_unlock_mutex(&lock->wait_lock, flags);
90068 debug_mutex_free_waiter(&waiter);
90069 mutex_release(&lock->dep_map, 1, ip);
90070diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90071index 1d96dd0..994ff19 100644
90072--- a/kernel/locking/rtmutex-tester.c
90073+++ b/kernel/locking/rtmutex-tester.c
90074@@ -22,7 +22,7 @@
90075 #define MAX_RT_TEST_MUTEXES 8
90076
90077 static spinlock_t rttest_lock;
90078-static atomic_t rttest_event;
90079+static atomic_unchecked_t rttest_event;
90080
90081 struct test_thread_data {
90082 int opcode;
90083@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90084
90085 case RTTEST_LOCKCONT:
90086 td->mutexes[td->opdata] = 1;
90087- td->event = atomic_add_return(1, &rttest_event);
90088+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90089 return 0;
90090
90091 case RTTEST_RESET:
90092@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90093 return 0;
90094
90095 case RTTEST_RESETEVENT:
90096- atomic_set(&rttest_event, 0);
90097+ atomic_set_unchecked(&rttest_event, 0);
90098 return 0;
90099
90100 default:
90101@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90102 return ret;
90103
90104 td->mutexes[id] = 1;
90105- td->event = atomic_add_return(1, &rttest_event);
90106+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90107 rt_mutex_lock(&mutexes[id]);
90108- td->event = atomic_add_return(1, &rttest_event);
90109+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90110 td->mutexes[id] = 4;
90111 return 0;
90112
90113@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90114 return ret;
90115
90116 td->mutexes[id] = 1;
90117- td->event = atomic_add_return(1, &rttest_event);
90118+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90119 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90120- td->event = atomic_add_return(1, &rttest_event);
90121+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90122 td->mutexes[id] = ret ? 0 : 4;
90123 return ret ? -EINTR : 0;
90124
90125@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90126 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90127 return ret;
90128
90129- td->event = atomic_add_return(1, &rttest_event);
90130+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90131 rt_mutex_unlock(&mutexes[id]);
90132- td->event = atomic_add_return(1, &rttest_event);
90133+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90134 td->mutexes[id] = 0;
90135 return 0;
90136
90137@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90138 break;
90139
90140 td->mutexes[dat] = 2;
90141- td->event = atomic_add_return(1, &rttest_event);
90142+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90143 break;
90144
90145 default:
90146@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90147 return;
90148
90149 td->mutexes[dat] = 3;
90150- td->event = atomic_add_return(1, &rttest_event);
90151+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90152 break;
90153
90154 case RTTEST_LOCKNOWAIT:
90155@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90156 return;
90157
90158 td->mutexes[dat] = 1;
90159- td->event = atomic_add_return(1, &rttest_event);
90160+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90161 return;
90162
90163 default:
90164diff --git a/kernel/module.c b/kernel/module.c
90165index d856e96..b82225c 100644
90166--- a/kernel/module.c
90167+++ b/kernel/module.c
90168@@ -59,6 +59,7 @@
90169 #include <linux/jump_label.h>
90170 #include <linux/pfn.h>
90171 #include <linux/bsearch.h>
90172+#include <linux/grsecurity.h>
90173 #include <uapi/linux/module.h>
90174 #include "module-internal.h"
90175
90176@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90177
90178 /* Bounds of module allocation, for speeding __module_address.
90179 * Protected by module_mutex. */
90180-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90181+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90182+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90183
90184 int register_module_notifier(struct notifier_block *nb)
90185 {
90186@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90187 return true;
90188
90189 list_for_each_entry_rcu(mod, &modules, list) {
90190- struct symsearch arr[] = {
90191+ struct symsearch modarr[] = {
90192 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90193 NOT_GPL_ONLY, false },
90194 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90195@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90196 if (mod->state == MODULE_STATE_UNFORMED)
90197 continue;
90198
90199- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90200+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90201 return true;
90202 }
90203 return false;
90204@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90205 if (!pcpusec->sh_size)
90206 return 0;
90207
90208- if (align > PAGE_SIZE) {
90209+ if (align-1 >= PAGE_SIZE) {
90210 pr_warn("%s: per-cpu alignment %li > %li\n",
90211 mod->name, align, PAGE_SIZE);
90212 align = PAGE_SIZE;
90213@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
90214 static ssize_t show_coresize(struct module_attribute *mattr,
90215 struct module_kobject *mk, char *buffer)
90216 {
90217- return sprintf(buffer, "%u\n", mk->mod->core_size);
90218+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90219 }
90220
90221 static struct module_attribute modinfo_coresize =
90222@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
90223 static ssize_t show_initsize(struct module_attribute *mattr,
90224 struct module_kobject *mk, char *buffer)
90225 {
90226- return sprintf(buffer, "%u\n", mk->mod->init_size);
90227+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90228 }
90229
90230 static struct module_attribute modinfo_initsize =
90231@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
90232 goto bad_version;
90233 }
90234
90235+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90236+ /*
90237+ * avoid potentially printing jibberish on attempted load
90238+ * of a module randomized with a different seed
90239+ */
90240+ pr_warn("no symbol version for %s\n", symname);
90241+#else
90242 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90243+#endif
90244 return 0;
90245
90246 bad_version:
90247+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90248+ /*
90249+ * avoid potentially printing jibberish on attempted load
90250+ * of a module randomized with a different seed
90251+ */
90252+ pr_warn("attempted module disagrees about version of symbol %s\n",
90253+ symname);
90254+#else
90255 pr_warn("%s: disagrees about version of symbol %s\n",
90256 mod->name, symname);
90257+#endif
90258 return 0;
90259 }
90260
90261@@ -1275,7 +1294,7 @@ resolve_symbol_wait(struct module *mod,
90262 */
90263 #ifdef CONFIG_SYSFS
90264
90265-#ifdef CONFIG_KALLSYMS
90266+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90267 static inline bool sect_empty(const Elf_Shdr *sect)
90268 {
90269 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90270@@ -1413,7 +1432,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90271 {
90272 unsigned int notes, loaded, i;
90273 struct module_notes_attrs *notes_attrs;
90274- struct bin_attribute *nattr;
90275+ bin_attribute_no_const *nattr;
90276
90277 /* failed to create section attributes, so can't create notes */
90278 if (!mod->sect_attrs)
90279@@ -1525,7 +1544,7 @@ static void del_usage_links(struct module *mod)
90280 static int module_add_modinfo_attrs(struct module *mod)
90281 {
90282 struct module_attribute *attr;
90283- struct module_attribute *temp_attr;
90284+ module_attribute_no_const *temp_attr;
90285 int error = 0;
90286 int i;
90287
90288@@ -1735,21 +1754,21 @@ static void set_section_ro_nx(void *base,
90289
90290 static void unset_module_core_ro_nx(struct module *mod)
90291 {
90292- set_page_attributes(mod->module_core + mod->core_text_size,
90293- mod->module_core + mod->core_size,
90294+ set_page_attributes(mod->module_core_rw,
90295+ mod->module_core_rw + mod->core_size_rw,
90296 set_memory_x);
90297- set_page_attributes(mod->module_core,
90298- mod->module_core + mod->core_ro_size,
90299+ set_page_attributes(mod->module_core_rx,
90300+ mod->module_core_rx + mod->core_size_rx,
90301 set_memory_rw);
90302 }
90303
90304 static void unset_module_init_ro_nx(struct module *mod)
90305 {
90306- set_page_attributes(mod->module_init + mod->init_text_size,
90307- mod->module_init + mod->init_size,
90308+ set_page_attributes(mod->module_init_rw,
90309+ mod->module_init_rw + mod->init_size_rw,
90310 set_memory_x);
90311- set_page_attributes(mod->module_init,
90312- mod->module_init + mod->init_ro_size,
90313+ set_page_attributes(mod->module_init_rx,
90314+ mod->module_init_rx + mod->init_size_rx,
90315 set_memory_rw);
90316 }
90317
90318@@ -1762,14 +1781,14 @@ void set_all_modules_text_rw(void)
90319 list_for_each_entry_rcu(mod, &modules, list) {
90320 if (mod->state == MODULE_STATE_UNFORMED)
90321 continue;
90322- if ((mod->module_core) && (mod->core_text_size)) {
90323- set_page_attributes(mod->module_core,
90324- mod->module_core + mod->core_text_size,
90325+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90326+ set_page_attributes(mod->module_core_rx,
90327+ mod->module_core_rx + mod->core_size_rx,
90328 set_memory_rw);
90329 }
90330- if ((mod->module_init) && (mod->init_text_size)) {
90331- set_page_attributes(mod->module_init,
90332- mod->module_init + mod->init_text_size,
90333+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90334+ set_page_attributes(mod->module_init_rx,
90335+ mod->module_init_rx + mod->init_size_rx,
90336 set_memory_rw);
90337 }
90338 }
90339@@ -1785,14 +1804,14 @@ void set_all_modules_text_ro(void)
90340 list_for_each_entry_rcu(mod, &modules, list) {
90341 if (mod->state == MODULE_STATE_UNFORMED)
90342 continue;
90343- if ((mod->module_core) && (mod->core_text_size)) {
90344- set_page_attributes(mod->module_core,
90345- mod->module_core + mod->core_text_size,
90346+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90347+ set_page_attributes(mod->module_core_rx,
90348+ mod->module_core_rx + mod->core_size_rx,
90349 set_memory_ro);
90350 }
90351- if ((mod->module_init) && (mod->init_text_size)) {
90352- set_page_attributes(mod->module_init,
90353- mod->module_init + mod->init_text_size,
90354+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90355+ set_page_attributes(mod->module_init_rx,
90356+ mod->module_init_rx + mod->init_size_rx,
90357 set_memory_ro);
90358 }
90359 }
90360@@ -1801,7 +1820,15 @@ void set_all_modules_text_ro(void)
90361 #else
90362 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
90363 static void unset_module_core_ro_nx(struct module *mod) { }
90364-static void unset_module_init_ro_nx(struct module *mod) { }
90365+static void unset_module_init_ro_nx(struct module *mod)
90366+{
90367+
90368+#ifdef CONFIG_PAX_KERNEXEC
90369+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90370+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90371+#endif
90372+
90373+}
90374 #endif
90375
90376 void __weak module_memfree(void *module_region)
90377@@ -1855,16 +1882,19 @@ static void free_module(struct module *mod)
90378 /* This may be NULL, but that's OK */
90379 unset_module_init_ro_nx(mod);
90380 module_arch_freeing_init(mod);
90381- module_memfree(mod->module_init);
90382+ module_memfree(mod->module_init_rw);
90383+ module_memfree_exec(mod->module_init_rx);
90384 kfree(mod->args);
90385 percpu_modfree(mod);
90386
90387 /* Free lock-classes: */
90388- lockdep_free_key_range(mod->module_core, mod->core_size);
90389+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
90390+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
90391
90392 /* Finally, free the core (containing the module structure) */
90393 unset_module_core_ro_nx(mod);
90394- module_memfree(mod->module_core);
90395+ module_memfree_exec(mod->module_core_rx);
90396+ module_memfree(mod->module_core_rw);
90397
90398 #ifdef CONFIG_MPU
90399 update_protections(current->mm);
90400@@ -1933,9 +1963,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90401 int ret = 0;
90402 const struct kernel_symbol *ksym;
90403
90404+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90405+ int is_fs_load = 0;
90406+ int register_filesystem_found = 0;
90407+ char *p;
90408+
90409+ p = strstr(mod->args, "grsec_modharden_fs");
90410+ if (p) {
90411+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
90412+ /* copy \0 as well */
90413+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
90414+ is_fs_load = 1;
90415+ }
90416+#endif
90417+
90418 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
90419 const char *name = info->strtab + sym[i].st_name;
90420
90421+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90422+ /* it's a real shame this will never get ripped and copied
90423+ upstream! ;(
90424+ */
90425+ if (is_fs_load && !strcmp(name, "register_filesystem"))
90426+ register_filesystem_found = 1;
90427+#endif
90428+
90429 switch (sym[i].st_shndx) {
90430 case SHN_COMMON:
90431 /* Ignore common symbols */
90432@@ -1960,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90433 ksym = resolve_symbol_wait(mod, info, name);
90434 /* Ok if resolved. */
90435 if (ksym && !IS_ERR(ksym)) {
90436+ pax_open_kernel();
90437 sym[i].st_value = ksym->value;
90438+ pax_close_kernel();
90439 break;
90440 }
90441
90442@@ -1979,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90443 secbase = (unsigned long)mod_percpu(mod);
90444 else
90445 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
90446+ pax_open_kernel();
90447 sym[i].st_value += secbase;
90448+ pax_close_kernel();
90449 break;
90450 }
90451 }
90452
90453+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90454+ if (is_fs_load && !register_filesystem_found) {
90455+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
90456+ ret = -EPERM;
90457+ }
90458+#endif
90459+
90460 return ret;
90461 }
90462
90463@@ -2067,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
90464 || s->sh_entsize != ~0UL
90465 || strstarts(sname, ".init"))
90466 continue;
90467- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
90468+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90469+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
90470+ else
90471+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
90472 pr_debug("\t%s\n", sname);
90473 }
90474- switch (m) {
90475- case 0: /* executable */
90476- mod->core_size = debug_align(mod->core_size);
90477- mod->core_text_size = mod->core_size;
90478- break;
90479- case 1: /* RO: text and ro-data */
90480- mod->core_size = debug_align(mod->core_size);
90481- mod->core_ro_size = mod->core_size;
90482- break;
90483- case 3: /* whole core */
90484- mod->core_size = debug_align(mod->core_size);
90485- break;
90486- }
90487 }
90488
90489 pr_debug("Init section allocation order:\n");
90490@@ -2096,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
90491 || s->sh_entsize != ~0UL
90492 || !strstarts(sname, ".init"))
90493 continue;
90494- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
90495- | INIT_OFFSET_MASK);
90496+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90497+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
90498+ else
90499+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
90500+ s->sh_entsize |= INIT_OFFSET_MASK;
90501 pr_debug("\t%s\n", sname);
90502 }
90503- switch (m) {
90504- case 0: /* executable */
90505- mod->init_size = debug_align(mod->init_size);
90506- mod->init_text_size = mod->init_size;
90507- break;
90508- case 1: /* RO: text and ro-data */
90509- mod->init_size = debug_align(mod->init_size);
90510- mod->init_ro_size = mod->init_size;
90511- break;
90512- case 3: /* whole init */
90513- mod->init_size = debug_align(mod->init_size);
90514- break;
90515- }
90516 }
90517 }
90518
90519@@ -2285,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90520
90521 /* Put symbol section at end of init part of module. */
90522 symsect->sh_flags |= SHF_ALLOC;
90523- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
90524+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
90525 info->index.sym) | INIT_OFFSET_MASK;
90526 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
90527
90528@@ -2302,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90529 }
90530
90531 /* Append room for core symbols at end of core part. */
90532- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
90533- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
90534- mod->core_size += strtab_size;
90535+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
90536+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
90537+ mod->core_size_rx += strtab_size;
90538
90539 /* Put string table section at end of init part of module. */
90540 strsect->sh_flags |= SHF_ALLOC;
90541- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
90542+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
90543 info->index.str) | INIT_OFFSET_MASK;
90544 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
90545 }
90546@@ -2326,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90547 /* Make sure we get permanent strtab: don't use info->strtab. */
90548 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
90549
90550+ pax_open_kernel();
90551+
90552 /* Set types up while we still have access to sections. */
90553 for (i = 0; i < mod->num_symtab; i++)
90554 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
90555
90556- mod->core_symtab = dst = mod->module_core + info->symoffs;
90557- mod->core_strtab = s = mod->module_core + info->stroffs;
90558+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
90559+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
90560 src = mod->symtab;
90561 for (ndst = i = 0; i < mod->num_symtab; i++) {
90562 if (i == 0 ||
90563@@ -2343,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90564 }
90565 }
90566 mod->core_num_syms = ndst;
90567+
90568+ pax_close_kernel();
90569 }
90570 #else
90571 static inline void layout_symtab(struct module *mod, struct load_info *info)
90572@@ -2376,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
90573 return vmalloc_exec(size);
90574 }
90575
90576-static void *module_alloc_update_bounds(unsigned long size)
90577+static void *module_alloc_update_bounds_rw(unsigned long size)
90578 {
90579 void *ret = module_alloc(size);
90580
90581 if (ret) {
90582 mutex_lock(&module_mutex);
90583 /* Update module bounds. */
90584- if ((unsigned long)ret < module_addr_min)
90585- module_addr_min = (unsigned long)ret;
90586- if ((unsigned long)ret + size > module_addr_max)
90587- module_addr_max = (unsigned long)ret + size;
90588+ if ((unsigned long)ret < module_addr_min_rw)
90589+ module_addr_min_rw = (unsigned long)ret;
90590+ if ((unsigned long)ret + size > module_addr_max_rw)
90591+ module_addr_max_rw = (unsigned long)ret + size;
90592+ mutex_unlock(&module_mutex);
90593+ }
90594+ return ret;
90595+}
90596+
90597+static void *module_alloc_update_bounds_rx(unsigned long size)
90598+{
90599+ void *ret = module_alloc_exec(size);
90600+
90601+ if (ret) {
90602+ mutex_lock(&module_mutex);
90603+ /* Update module bounds. */
90604+ if ((unsigned long)ret < module_addr_min_rx)
90605+ module_addr_min_rx = (unsigned long)ret;
90606+ if ((unsigned long)ret + size > module_addr_max_rx)
90607+ module_addr_max_rx = (unsigned long)ret + size;
90608 mutex_unlock(&module_mutex);
90609 }
90610 return ret;
90611@@ -2640,7 +2703,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90612 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
90613
90614 if (info->index.sym == 0) {
90615+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90616+ /*
90617+ * avoid potentially printing jibberish on attempted load
90618+ * of a module randomized with a different seed
90619+ */
90620+ pr_warn("module has no symbols (stripped?)\n");
90621+#else
90622 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
90623+#endif
90624 return ERR_PTR(-ENOEXEC);
90625 }
90626
90627@@ -2656,8 +2727,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90628 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90629 {
90630 const char *modmagic = get_modinfo(info, "vermagic");
90631+ const char *license = get_modinfo(info, "license");
90632 int err;
90633
90634+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
90635+ if (!license || !license_is_gpl_compatible(license))
90636+ return -ENOEXEC;
90637+#endif
90638+
90639 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
90640 modmagic = NULL;
90641
90642@@ -2682,7 +2759,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90643 }
90644
90645 /* Set up license info based on the info section */
90646- set_license(mod, get_modinfo(info, "license"));
90647+ set_license(mod, license);
90648
90649 return 0;
90650 }
90651@@ -2776,7 +2853,7 @@ static int move_module(struct module *mod, struct load_info *info)
90652 void *ptr;
90653
90654 /* Do the allocs. */
90655- ptr = module_alloc_update_bounds(mod->core_size);
90656+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
90657 /*
90658 * The pointer to this block is stored in the module structure
90659 * which is inside the block. Just mark it as not being a
90660@@ -2786,11 +2863,11 @@ static int move_module(struct module *mod, struct load_info *info)
90661 if (!ptr)
90662 return -ENOMEM;
90663
90664- memset(ptr, 0, mod->core_size);
90665- mod->module_core = ptr;
90666+ memset(ptr, 0, mod->core_size_rw);
90667+ mod->module_core_rw = ptr;
90668
90669- if (mod->init_size) {
90670- ptr = module_alloc_update_bounds(mod->init_size);
90671+ if (mod->init_size_rw) {
90672+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
90673 /*
90674 * The pointer to this block is stored in the module structure
90675 * which is inside the block. This block doesn't need to be
90676@@ -2799,13 +2876,45 @@ static int move_module(struct module *mod, struct load_info *info)
90677 */
90678 kmemleak_ignore(ptr);
90679 if (!ptr) {
90680- module_memfree(mod->module_core);
90681+ module_memfree(mod->module_core_rw);
90682 return -ENOMEM;
90683 }
90684- memset(ptr, 0, mod->init_size);
90685- mod->module_init = ptr;
90686+ memset(ptr, 0, mod->init_size_rw);
90687+ mod->module_init_rw = ptr;
90688 } else
90689- mod->module_init = NULL;
90690+ mod->module_init_rw = NULL;
90691+
90692+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
90693+ kmemleak_not_leak(ptr);
90694+ if (!ptr) {
90695+ if (mod->module_init_rw)
90696+ module_memfree(mod->module_init_rw);
90697+ module_memfree(mod->module_core_rw);
90698+ return -ENOMEM;
90699+ }
90700+
90701+ pax_open_kernel();
90702+ memset(ptr, 0, mod->core_size_rx);
90703+ pax_close_kernel();
90704+ mod->module_core_rx = ptr;
90705+
90706+ if (mod->init_size_rx) {
90707+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
90708+ kmemleak_ignore(ptr);
90709+ if (!ptr && mod->init_size_rx) {
90710+ module_memfree_exec(mod->module_core_rx);
90711+ if (mod->module_init_rw)
90712+ module_memfree(mod->module_init_rw);
90713+ module_memfree(mod->module_core_rw);
90714+ return -ENOMEM;
90715+ }
90716+
90717+ pax_open_kernel();
90718+ memset(ptr, 0, mod->init_size_rx);
90719+ pax_close_kernel();
90720+ mod->module_init_rx = ptr;
90721+ } else
90722+ mod->module_init_rx = NULL;
90723
90724 /* Transfer each section which specifies SHF_ALLOC */
90725 pr_debug("final section addresses:\n");
90726@@ -2816,16 +2925,45 @@ static int move_module(struct module *mod, struct load_info *info)
90727 if (!(shdr->sh_flags & SHF_ALLOC))
90728 continue;
90729
90730- if (shdr->sh_entsize & INIT_OFFSET_MASK)
90731- dest = mod->module_init
90732- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90733- else
90734- dest = mod->module_core + shdr->sh_entsize;
90735+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
90736+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90737+ dest = mod->module_init_rw
90738+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90739+ else
90740+ dest = mod->module_init_rx
90741+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90742+ } else {
90743+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90744+ dest = mod->module_core_rw + shdr->sh_entsize;
90745+ else
90746+ dest = mod->module_core_rx + shdr->sh_entsize;
90747+ }
90748+
90749+ if (shdr->sh_type != SHT_NOBITS) {
90750+
90751+#ifdef CONFIG_PAX_KERNEXEC
90752+#ifdef CONFIG_X86_64
90753+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
90754+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
90755+#endif
90756+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
90757+ pax_open_kernel();
90758+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90759+ pax_close_kernel();
90760+ } else
90761+#endif
90762
90763- if (shdr->sh_type != SHT_NOBITS)
90764 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90765+ }
90766 /* Update sh_addr to point to copy in image. */
90767- shdr->sh_addr = (unsigned long)dest;
90768+
90769+#ifdef CONFIG_PAX_KERNEXEC
90770+ if (shdr->sh_flags & SHF_EXECINSTR)
90771+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
90772+ else
90773+#endif
90774+
90775+ shdr->sh_addr = (unsigned long)dest;
90776 pr_debug("\t0x%lx %s\n",
90777 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
90778 }
90779@@ -2882,12 +3020,12 @@ static void flush_module_icache(const struct module *mod)
90780 * Do it before processing of module parameters, so the module
90781 * can provide parameter accessor functions of its own.
90782 */
90783- if (mod->module_init)
90784- flush_icache_range((unsigned long)mod->module_init,
90785- (unsigned long)mod->module_init
90786- + mod->init_size);
90787- flush_icache_range((unsigned long)mod->module_core,
90788- (unsigned long)mod->module_core + mod->core_size);
90789+ if (mod->module_init_rx)
90790+ flush_icache_range((unsigned long)mod->module_init_rx,
90791+ (unsigned long)mod->module_init_rx
90792+ + mod->init_size_rx);
90793+ flush_icache_range((unsigned long)mod->module_core_rx,
90794+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
90795
90796 set_fs(old_fs);
90797 }
90798@@ -2945,8 +3083,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
90799 {
90800 percpu_modfree(mod);
90801 module_arch_freeing_init(mod);
90802- module_memfree(mod->module_init);
90803- module_memfree(mod->module_core);
90804+ module_memfree_exec(mod->module_init_rx);
90805+ module_memfree_exec(mod->module_core_rx);
90806+ module_memfree(mod->module_init_rw);
90807+ module_memfree(mod->module_core_rw);
90808 }
90809
90810 int __weak module_finalize(const Elf_Ehdr *hdr,
90811@@ -2959,7 +3099,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
90812 static int post_relocation(struct module *mod, const struct load_info *info)
90813 {
90814 /* Sort exception table now relocations are done. */
90815+ pax_open_kernel();
90816 sort_extable(mod->extable, mod->extable + mod->num_exentries);
90817+ pax_close_kernel();
90818
90819 /* Copy relocated percpu area over. */
90820 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
90821@@ -3001,13 +3143,15 @@ static void do_mod_ctors(struct module *mod)
90822 /* For freeing module_init on success, in case kallsyms traversing */
90823 struct mod_initfree {
90824 struct rcu_head rcu;
90825- void *module_init;
90826+ void *module_init_rw;
90827+ void *module_init_rx;
90828 };
90829
90830 static void do_free_init(struct rcu_head *head)
90831 {
90832 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
90833- module_memfree(m->module_init);
90834+ module_memfree(m->module_init_rw);
90835+ module_memfree_exec(m->module_init_rx);
90836 kfree(m);
90837 }
90838
90839@@ -3022,7 +3166,8 @@ static int do_init_module(struct module *mod)
90840 ret = -ENOMEM;
90841 goto fail;
90842 }
90843- freeinit->module_init = mod->module_init;
90844+ freeinit->module_init_rw = mod->module_init_rw;
90845+ freeinit->module_init_rx = mod->module_init_rx;
90846
90847 /*
90848 * We want to find out whether @mod uses async during init. Clear
90849@@ -3081,10 +3226,10 @@ static int do_init_module(struct module *mod)
90850 #endif
90851 unset_module_init_ro_nx(mod);
90852 module_arch_freeing_init(mod);
90853- mod->module_init = NULL;
90854- mod->init_size = 0;
90855- mod->init_ro_size = 0;
90856- mod->init_text_size = 0;
90857+ mod->module_init_rw = NULL;
90858+ mod->module_init_rx = NULL;
90859+ mod->init_size_rw = 0;
90860+ mod->init_size_rx = 0;
90861 /*
90862 * We want to free module_init, but be aware that kallsyms may be
90863 * walking this with preempt disabled. In all the failure paths,
90864@@ -3198,16 +3343,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
90865 module_bug_finalize(info->hdr, info->sechdrs, mod);
90866
90867 /* Set RO and NX regions for core */
90868- set_section_ro_nx(mod->module_core,
90869- mod->core_text_size,
90870- mod->core_ro_size,
90871- mod->core_size);
90872+ set_section_ro_nx(mod->module_core_rx,
90873+ mod->core_size_rx,
90874+ mod->core_size_rx,
90875+ mod->core_size_rx);
90876
90877 /* Set RO and NX regions for init */
90878- set_section_ro_nx(mod->module_init,
90879- mod->init_text_size,
90880- mod->init_ro_size,
90881- mod->init_size);
90882+ set_section_ro_nx(mod->module_init_rx,
90883+ mod->init_size_rx,
90884+ mod->init_size_rx,
90885+ mod->init_size_rx);
90886
90887 /* Mark state as coming so strong_try_module_get() ignores us,
90888 * but kallsyms etc. can see us. */
90889@@ -3291,9 +3436,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
90890 if (err)
90891 goto free_unload;
90892
90893+ /* Now copy in args */
90894+ mod->args = strndup_user(uargs, ~0UL >> 1);
90895+ if (IS_ERR(mod->args)) {
90896+ err = PTR_ERR(mod->args);
90897+ goto free_unload;
90898+ }
90899+
90900 /* Set up MODINFO_ATTR fields */
90901 setup_modinfo(mod, info);
90902
90903+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90904+ {
90905+ char *p, *p2;
90906+
90907+ if (strstr(mod->args, "grsec_modharden_netdev")) {
90908+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
90909+ err = -EPERM;
90910+ goto free_modinfo;
90911+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
90912+ p += sizeof("grsec_modharden_normal") - 1;
90913+ p2 = strstr(p, "_");
90914+ if (p2) {
90915+ *p2 = '\0';
90916+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
90917+ *p2 = '_';
90918+ }
90919+ err = -EPERM;
90920+ goto free_modinfo;
90921+ }
90922+ }
90923+#endif
90924+
90925 /* Fix up syms, so that st_value is a pointer to location. */
90926 err = simplify_symbols(mod, info);
90927 if (err < 0)
90928@@ -3309,13 +3483,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
90929
90930 flush_module_icache(mod);
90931
90932- /* Now copy in args */
90933- mod->args = strndup_user(uargs, ~0UL >> 1);
90934- if (IS_ERR(mod->args)) {
90935- err = PTR_ERR(mod->args);
90936- goto free_arch_cleanup;
90937- }
90938-
90939 dynamic_debug_setup(info->debug, info->num_debug);
90940
90941 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
90942@@ -3363,11 +3530,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
90943 ddebug_cleanup:
90944 dynamic_debug_remove(info->debug);
90945 synchronize_sched();
90946- kfree(mod->args);
90947- free_arch_cleanup:
90948 module_arch_cleanup(mod);
90949 free_modinfo:
90950 free_modinfo(mod);
90951+ kfree(mod->args);
90952 free_unload:
90953 module_unload_free(mod);
90954 unlink_mod:
90955@@ -3454,10 +3620,16 @@ static const char *get_ksymbol(struct module *mod,
90956 unsigned long nextval;
90957
90958 /* At worse, next value is at end of module */
90959- if (within_module_init(addr, mod))
90960- nextval = (unsigned long)mod->module_init+mod->init_text_size;
90961+ if (within_module_init_rx(addr, mod))
90962+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
90963+ else if (within_module_init_rw(addr, mod))
90964+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
90965+ else if (within_module_core_rx(addr, mod))
90966+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
90967+ else if (within_module_core_rw(addr, mod))
90968+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
90969 else
90970- nextval = (unsigned long)mod->module_core+mod->core_text_size;
90971+ return NULL;
90972
90973 /* Scan for closest preceding symbol, and next symbol. (ELF
90974 starts real symbols at 1). */
90975@@ -3705,7 +3877,7 @@ static int m_show(struct seq_file *m, void *p)
90976 return 0;
90977
90978 seq_printf(m, "%s %u",
90979- mod->name, mod->init_size + mod->core_size);
90980+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
90981 print_unload_info(m, mod);
90982
90983 /* Informative for users. */
90984@@ -3714,7 +3886,7 @@ static int m_show(struct seq_file *m, void *p)
90985 mod->state == MODULE_STATE_COMING ? "Loading" :
90986 "Live");
90987 /* Used by oprofile and other similar tools. */
90988- seq_printf(m, " 0x%pK", mod->module_core);
90989+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
90990
90991 /* Taints info */
90992 if (mod->taints)
90993@@ -3750,7 +3922,17 @@ static const struct file_operations proc_modules_operations = {
90994
90995 static int __init proc_modules_init(void)
90996 {
90997+#ifndef CONFIG_GRKERNSEC_HIDESYM
90998+#ifdef CONFIG_GRKERNSEC_PROC_USER
90999+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91000+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91001+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91002+#else
91003 proc_create("modules", 0, NULL, &proc_modules_operations);
91004+#endif
91005+#else
91006+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91007+#endif
91008 return 0;
91009 }
91010 module_init(proc_modules_init);
91011@@ -3811,7 +3993,8 @@ struct module *__module_address(unsigned long addr)
91012 {
91013 struct module *mod;
91014
91015- if (addr < module_addr_min || addr > module_addr_max)
91016+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91017+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91018 return NULL;
91019
91020 list_for_each_entry_rcu(mod, &modules, list) {
91021@@ -3852,11 +4035,20 @@ bool is_module_text_address(unsigned long addr)
91022 */
91023 struct module *__module_text_address(unsigned long addr)
91024 {
91025- struct module *mod = __module_address(addr);
91026+ struct module *mod;
91027+
91028+#ifdef CONFIG_X86_32
91029+ addr = ktla_ktva(addr);
91030+#endif
91031+
91032+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91033+ return NULL;
91034+
91035+ mod = __module_address(addr);
91036+
91037 if (mod) {
91038 /* Make sure it's within the text section. */
91039- if (!within(addr, mod->module_init, mod->init_text_size)
91040- && !within(addr, mod->module_core, mod->core_text_size))
91041+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91042 mod = NULL;
91043 }
91044 return mod;
91045diff --git a/kernel/notifier.c b/kernel/notifier.c
91046index 4803da6..1c5eea6 100644
91047--- a/kernel/notifier.c
91048+++ b/kernel/notifier.c
91049@@ -5,6 +5,7 @@
91050 #include <linux/rcupdate.h>
91051 #include <linux/vmalloc.h>
91052 #include <linux/reboot.h>
91053+#include <linux/mm.h>
91054
91055 /*
91056 * Notifier list for kernel code which wants to be called
91057@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91058 while ((*nl) != NULL) {
91059 if (n->priority > (*nl)->priority)
91060 break;
91061- nl = &((*nl)->next);
91062+ nl = (struct notifier_block **)&((*nl)->next);
91063 }
91064- n->next = *nl;
91065+ pax_open_kernel();
91066+ *(const void **)&n->next = *nl;
91067 rcu_assign_pointer(*nl, n);
91068+ pax_close_kernel();
91069 return 0;
91070 }
91071
91072@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91073 return 0;
91074 if (n->priority > (*nl)->priority)
91075 break;
91076- nl = &((*nl)->next);
91077+ nl = (struct notifier_block **)&((*nl)->next);
91078 }
91079- n->next = *nl;
91080+ pax_open_kernel();
91081+ *(const void **)&n->next = *nl;
91082 rcu_assign_pointer(*nl, n);
91083+ pax_close_kernel();
91084 return 0;
91085 }
91086
91087@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91088 {
91089 while ((*nl) != NULL) {
91090 if ((*nl) == n) {
91091+ pax_open_kernel();
91092 rcu_assign_pointer(*nl, n->next);
91093+ pax_close_kernel();
91094 return 0;
91095 }
91096- nl = &((*nl)->next);
91097+ nl = (struct notifier_block **)&((*nl)->next);
91098 }
91099 return -ENOENT;
91100 }
91101diff --git a/kernel/padata.c b/kernel/padata.c
91102index 161402f..598814c 100644
91103--- a/kernel/padata.c
91104+++ b/kernel/padata.c
91105@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91106 * seq_nr mod. number of cpus in use.
91107 */
91108
91109- seq_nr = atomic_inc_return(&pd->seq_nr);
91110+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91111 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91112
91113 return padata_index_to_cpu(pd, cpu_index);
91114@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91115 padata_init_pqueues(pd);
91116 padata_init_squeues(pd);
91117 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91118- atomic_set(&pd->seq_nr, -1);
91119+ atomic_set_unchecked(&pd->seq_nr, -1);
91120 atomic_set(&pd->reorder_objects, 0);
91121 atomic_set(&pd->refcnt, 0);
91122 pd->pinst = pinst;
91123diff --git a/kernel/panic.c b/kernel/panic.c
91124index 4d8d6f9..97b9b9c 100644
91125--- a/kernel/panic.c
91126+++ b/kernel/panic.c
91127@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
91128 /*
91129 * Stop ourself in panic -- architecture code may override this
91130 */
91131-void __weak panic_smp_self_stop(void)
91132+void __weak __noreturn panic_smp_self_stop(void)
91133 {
91134 while (1)
91135 cpu_relax();
91136@@ -423,7 +423,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91137 disable_trace_on_warning();
91138
91139 pr_warn("------------[ cut here ]------------\n");
91140- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91141+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91142 raw_smp_processor_id(), current->pid, file, line, caller);
91143
91144 if (args)
91145@@ -488,7 +488,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91146 */
91147 __visible void __stack_chk_fail(void)
91148 {
91149- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91150+ dump_stack();
91151+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91152 __builtin_return_address(0));
91153 }
91154 EXPORT_SYMBOL(__stack_chk_fail);
91155diff --git a/kernel/pid.c b/kernel/pid.c
91156index cd36a5e..11f185d 100644
91157--- a/kernel/pid.c
91158+++ b/kernel/pid.c
91159@@ -33,6 +33,7 @@
91160 #include <linux/rculist.h>
91161 #include <linux/bootmem.h>
91162 #include <linux/hash.h>
91163+#include <linux/security.h>
91164 #include <linux/pid_namespace.h>
91165 #include <linux/init_task.h>
91166 #include <linux/syscalls.h>
91167@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91168
91169 int pid_max = PID_MAX_DEFAULT;
91170
91171-#define RESERVED_PIDS 300
91172+#define RESERVED_PIDS 500
91173
91174 int pid_max_min = RESERVED_PIDS + 1;
91175 int pid_max_max = PID_MAX_LIMIT;
91176@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
91177 */
91178 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91179 {
91180+ struct task_struct *task;
91181+
91182 rcu_lockdep_assert(rcu_read_lock_held(),
91183 "find_task_by_pid_ns() needs rcu_read_lock()"
91184 " protection");
91185- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91186+
91187+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91188+
91189+ if (gr_pid_is_chrooted(task))
91190+ return NULL;
91191+
91192+ return task;
91193 }
91194
91195 struct task_struct *find_task_by_vpid(pid_t vnr)
91196@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91197 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91198 }
91199
91200+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91201+{
91202+ rcu_lockdep_assert(rcu_read_lock_held(),
91203+ "find_task_by_pid_ns() needs rcu_read_lock()"
91204+ " protection");
91205+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91206+}
91207+
91208 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91209 {
91210 struct pid *pid;
91211diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91212index a65ba13..f600dbb 100644
91213--- a/kernel/pid_namespace.c
91214+++ b/kernel/pid_namespace.c
91215@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91216 void __user *buffer, size_t *lenp, loff_t *ppos)
91217 {
91218 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91219- struct ctl_table tmp = *table;
91220+ ctl_table_no_const tmp = *table;
91221
91222 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91223 return -EPERM;
91224diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91225index 48b28d3..c63ccaf 100644
91226--- a/kernel/power/Kconfig
91227+++ b/kernel/power/Kconfig
91228@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91229 config HIBERNATION
91230 bool "Hibernation (aka 'suspend to disk')"
91231 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91232+ depends on !GRKERNSEC_KMEM
91233+ depends on !PAX_MEMORY_SANITIZE
91234 select HIBERNATE_CALLBACKS
91235 select LZO_COMPRESS
91236 select LZO_DECOMPRESS
91237diff --git a/kernel/power/process.c b/kernel/power/process.c
91238index 5a6ec86..3a8c884 100644
91239--- a/kernel/power/process.c
91240+++ b/kernel/power/process.c
91241@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91242 unsigned int elapsed_msecs;
91243 bool wakeup = false;
91244 int sleep_usecs = USEC_PER_MSEC;
91245+ bool timedout = false;
91246
91247 do_gettimeofday(&start);
91248
91249@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91250
91251 while (true) {
91252 todo = 0;
91253+ if (time_after(jiffies, end_time))
91254+ timedout = true;
91255 read_lock(&tasklist_lock);
91256 for_each_process_thread(g, p) {
91257 if (p == current || !freeze_task(p))
91258 continue;
91259
91260- if (!freezer_should_skip(p))
91261+ if (!freezer_should_skip(p)) {
91262 todo++;
91263+ if (timedout) {
91264+ printk(KERN_ERR "Task refusing to freeze:\n");
91265+ sched_show_task(p);
91266+ }
91267+ }
91268 }
91269 read_unlock(&tasklist_lock);
91270
91271@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91272 todo += wq_busy;
91273 }
91274
91275- if (!todo || time_after(jiffies, end_time))
91276+ if (!todo || timedout)
91277 break;
91278
91279 if (pm_wakeup_pending()) {
91280diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91281index 2cdd353..7df1786 100644
91282--- a/kernel/printk/printk.c
91283+++ b/kernel/printk/printk.c
91284@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
91285 if (from_file && type != SYSLOG_ACTION_OPEN)
91286 return 0;
91287
91288+#ifdef CONFIG_GRKERNSEC_DMESG
91289+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91290+ return -EPERM;
91291+#endif
91292+
91293 if (syslog_action_restricted(type)) {
91294 if (capable(CAP_SYSLOG))
91295 return 0;
91296diff --git a/kernel/profile.c b/kernel/profile.c
91297index 54bf5ba..df6e0a2 100644
91298--- a/kernel/profile.c
91299+++ b/kernel/profile.c
91300@@ -37,7 +37,7 @@ struct profile_hit {
91301 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91302 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91303
91304-static atomic_t *prof_buffer;
91305+static atomic_unchecked_t *prof_buffer;
91306 static unsigned long prof_len, prof_shift;
91307
91308 int prof_on __read_mostly;
91309@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
91310 hits[i].pc = 0;
91311 continue;
91312 }
91313- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91314+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91315 hits[i].hits = hits[i].pc = 0;
91316 }
91317 }
91318@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91319 * Add the current hit(s) and flush the write-queue out
91320 * to the global buffer:
91321 */
91322- atomic_add(nr_hits, &prof_buffer[pc]);
91323+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
91324 for (i = 0; i < NR_PROFILE_HIT; ++i) {
91325- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91326+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91327 hits[i].pc = hits[i].hits = 0;
91328 }
91329 out:
91330@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91331 {
91332 unsigned long pc;
91333 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
91334- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91335+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91336 }
91337 #endif /* !CONFIG_SMP */
91338
91339@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
91340 return -EFAULT;
91341 buf++; p++; count--; read++;
91342 }
91343- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
91344+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
91345 if (copy_to_user(buf, (void *)pnt, count))
91346 return -EFAULT;
91347 read += count;
91348@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
91349 }
91350 #endif
91351 profile_discard_flip_buffers();
91352- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
91353+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
91354 return count;
91355 }
91356
91357diff --git a/kernel/ptrace.c b/kernel/ptrace.c
91358index 1eb9d90..d40d21e 100644
91359--- a/kernel/ptrace.c
91360+++ b/kernel/ptrace.c
91361@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
91362 if (seize)
91363 flags |= PT_SEIZED;
91364 rcu_read_lock();
91365- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91366+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91367 flags |= PT_PTRACE_CAP;
91368 rcu_read_unlock();
91369 task->ptrace = flags;
91370@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
91371 break;
91372 return -EIO;
91373 }
91374- if (copy_to_user(dst, buf, retval))
91375+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
91376 return -EFAULT;
91377 copied += retval;
91378 src += retval;
91379@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
91380 bool seized = child->ptrace & PT_SEIZED;
91381 int ret = -EIO;
91382 siginfo_t siginfo, *si;
91383- void __user *datavp = (void __user *) data;
91384+ void __user *datavp = (__force void __user *) data;
91385 unsigned long __user *datalp = datavp;
91386 unsigned long flags;
91387
91388@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
91389 goto out;
91390 }
91391
91392+ if (gr_handle_ptrace(child, request)) {
91393+ ret = -EPERM;
91394+ goto out_put_task_struct;
91395+ }
91396+
91397 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91398 ret = ptrace_attach(child, request, addr, data);
91399 /*
91400 * Some architectures need to do book-keeping after
91401 * a ptrace attach.
91402 */
91403- if (!ret)
91404+ if (!ret) {
91405 arch_ptrace_attach(child);
91406+ gr_audit_ptrace(child);
91407+ }
91408 goto out_put_task_struct;
91409 }
91410
91411@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
91412 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
91413 if (copied != sizeof(tmp))
91414 return -EIO;
91415- return put_user(tmp, (unsigned long __user *)data);
91416+ return put_user(tmp, (__force unsigned long __user *)data);
91417 }
91418
91419 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
91420@@ -1158,7 +1165,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
91421 }
91422
91423 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91424- compat_long_t, addr, compat_long_t, data)
91425+ compat_ulong_t, addr, compat_ulong_t, data)
91426 {
91427 struct task_struct *child;
91428 long ret;
91429@@ -1174,14 +1181,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91430 goto out;
91431 }
91432
91433+ if (gr_handle_ptrace(child, request)) {
91434+ ret = -EPERM;
91435+ goto out_put_task_struct;
91436+ }
91437+
91438 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91439 ret = ptrace_attach(child, request, addr, data);
91440 /*
91441 * Some architectures need to do book-keeping after
91442 * a ptrace attach.
91443 */
91444- if (!ret)
91445+ if (!ret) {
91446 arch_ptrace_attach(child);
91447+ gr_audit_ptrace(child);
91448+ }
91449 goto out_put_task_struct;
91450 }
91451
91452diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
91453index 4d559ba..053da37 100644
91454--- a/kernel/rcu/rcutorture.c
91455+++ b/kernel/rcu/rcutorture.c
91456@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91457 rcu_torture_count) = { 0 };
91458 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91459 rcu_torture_batch) = { 0 };
91460-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91461-static atomic_t n_rcu_torture_alloc;
91462-static atomic_t n_rcu_torture_alloc_fail;
91463-static atomic_t n_rcu_torture_free;
91464-static atomic_t n_rcu_torture_mberror;
91465-static atomic_t n_rcu_torture_error;
91466+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91467+static atomic_unchecked_t n_rcu_torture_alloc;
91468+static atomic_unchecked_t n_rcu_torture_alloc_fail;
91469+static atomic_unchecked_t n_rcu_torture_free;
91470+static atomic_unchecked_t n_rcu_torture_mberror;
91471+static atomic_unchecked_t n_rcu_torture_error;
91472 static long n_rcu_torture_barrier_error;
91473 static long n_rcu_torture_boost_ktrerror;
91474 static long n_rcu_torture_boost_rterror;
91475@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
91476 static long n_rcu_torture_timers;
91477 static long n_barrier_attempts;
91478 static long n_barrier_successes;
91479-static atomic_long_t n_cbfloods;
91480+static atomic_long_unchecked_t n_cbfloods;
91481 static struct list_head rcu_torture_removed;
91482
91483 static int rcu_torture_writer_state;
91484@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
91485
91486 spin_lock_bh(&rcu_torture_lock);
91487 if (list_empty(&rcu_torture_freelist)) {
91488- atomic_inc(&n_rcu_torture_alloc_fail);
91489+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
91490 spin_unlock_bh(&rcu_torture_lock);
91491 return NULL;
91492 }
91493- atomic_inc(&n_rcu_torture_alloc);
91494+ atomic_inc_unchecked(&n_rcu_torture_alloc);
91495 p = rcu_torture_freelist.next;
91496 list_del_init(p);
91497 spin_unlock_bh(&rcu_torture_lock);
91498@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
91499 static void
91500 rcu_torture_free(struct rcu_torture *p)
91501 {
91502- atomic_inc(&n_rcu_torture_free);
91503+ atomic_inc_unchecked(&n_rcu_torture_free);
91504 spin_lock_bh(&rcu_torture_lock);
91505 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
91506 spin_unlock_bh(&rcu_torture_lock);
91507@@ -312,7 +312,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
91508 i = rp->rtort_pipe_count;
91509 if (i > RCU_TORTURE_PIPE_LEN)
91510 i = RCU_TORTURE_PIPE_LEN;
91511- atomic_inc(&rcu_torture_wcount[i]);
91512+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91513 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
91514 rp->rtort_mbtest = 0;
91515 return true;
91516@@ -799,7 +799,7 @@ rcu_torture_cbflood(void *arg)
91517 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
91518 do {
91519 schedule_timeout_interruptible(cbflood_inter_holdoff);
91520- atomic_long_inc(&n_cbfloods);
91521+ atomic_long_inc_unchecked(&n_cbfloods);
91522 WARN_ON(signal_pending(current));
91523 for (i = 0; i < cbflood_n_burst; i++) {
91524 for (j = 0; j < cbflood_n_per_burst; j++) {
91525@@ -918,7 +918,7 @@ rcu_torture_writer(void *arg)
91526 i = old_rp->rtort_pipe_count;
91527 if (i > RCU_TORTURE_PIPE_LEN)
91528 i = RCU_TORTURE_PIPE_LEN;
91529- atomic_inc(&rcu_torture_wcount[i]);
91530+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91531 old_rp->rtort_pipe_count++;
91532 switch (synctype[torture_random(&rand) % nsynctypes]) {
91533 case RTWS_DEF_FREE:
91534@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
91535 return;
91536 }
91537 if (p->rtort_mbtest == 0)
91538- atomic_inc(&n_rcu_torture_mberror);
91539+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91540 spin_lock(&rand_lock);
91541 cur_ops->read_delay(&rand);
91542 n_rcu_torture_timers++;
91543@@ -1106,7 +1106,7 @@ rcu_torture_reader(void *arg)
91544 continue;
91545 }
91546 if (p->rtort_mbtest == 0)
91547- atomic_inc(&n_rcu_torture_mberror);
91548+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91549 cur_ops->read_delay(&rand);
91550 preempt_disable();
91551 pipe_count = p->rtort_pipe_count;
91552@@ -1173,11 +1173,11 @@ rcu_torture_stats_print(void)
91553 rcu_torture_current,
91554 rcu_torture_current_version,
91555 list_empty(&rcu_torture_freelist),
91556- atomic_read(&n_rcu_torture_alloc),
91557- atomic_read(&n_rcu_torture_alloc_fail),
91558- atomic_read(&n_rcu_torture_free));
91559+ atomic_read_unchecked(&n_rcu_torture_alloc),
91560+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
91561+ atomic_read_unchecked(&n_rcu_torture_free));
91562 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
91563- atomic_read(&n_rcu_torture_mberror),
91564+ atomic_read_unchecked(&n_rcu_torture_mberror),
91565 n_rcu_torture_boost_ktrerror,
91566 n_rcu_torture_boost_rterror);
91567 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
91568@@ -1189,17 +1189,17 @@ rcu_torture_stats_print(void)
91569 n_barrier_successes,
91570 n_barrier_attempts,
91571 n_rcu_torture_barrier_error);
91572- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
91573+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
91574
91575 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91576- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
91577+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
91578 n_rcu_torture_barrier_error != 0 ||
91579 n_rcu_torture_boost_ktrerror != 0 ||
91580 n_rcu_torture_boost_rterror != 0 ||
91581 n_rcu_torture_boost_failure != 0 ||
91582 i > 1) {
91583 pr_cont("%s", "!!! ");
91584- atomic_inc(&n_rcu_torture_error);
91585+ atomic_inc_unchecked(&n_rcu_torture_error);
91586 WARN_ON_ONCE(1);
91587 }
91588 pr_cont("Reader Pipe: ");
91589@@ -1216,7 +1216,7 @@ rcu_torture_stats_print(void)
91590 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91591 pr_cont("Free-Block Circulation: ");
91592 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91593- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
91594+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
91595 }
91596 pr_cont("\n");
91597
91598@@ -1560,7 +1560,7 @@ rcu_torture_cleanup(void)
91599
91600 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
91601
91602- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91603+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91604 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
91605 else if (torture_onoff_failures())
91606 rcu_torture_print_module_parms(cur_ops,
91607@@ -1685,18 +1685,18 @@ rcu_torture_init(void)
91608
91609 rcu_torture_current = NULL;
91610 rcu_torture_current_version = 0;
91611- atomic_set(&n_rcu_torture_alloc, 0);
91612- atomic_set(&n_rcu_torture_alloc_fail, 0);
91613- atomic_set(&n_rcu_torture_free, 0);
91614- atomic_set(&n_rcu_torture_mberror, 0);
91615- atomic_set(&n_rcu_torture_error, 0);
91616+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
91617+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
91618+ atomic_set_unchecked(&n_rcu_torture_free, 0);
91619+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
91620+ atomic_set_unchecked(&n_rcu_torture_error, 0);
91621 n_rcu_torture_barrier_error = 0;
91622 n_rcu_torture_boost_ktrerror = 0;
91623 n_rcu_torture_boost_rterror = 0;
91624 n_rcu_torture_boost_failure = 0;
91625 n_rcu_torture_boosts = 0;
91626 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
91627- atomic_set(&rcu_torture_wcount[i], 0);
91628+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
91629 for_each_possible_cpu(cpu) {
91630 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91631 per_cpu(rcu_torture_count, cpu)[i] = 0;
91632diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
91633index 0db5649..e6ec167 100644
91634--- a/kernel/rcu/tiny.c
91635+++ b/kernel/rcu/tiny.c
91636@@ -42,7 +42,7 @@
91637 /* Forward declarations for tiny_plugin.h. */
91638 struct rcu_ctrlblk;
91639 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
91640-static void rcu_process_callbacks(struct softirq_action *unused);
91641+static void rcu_process_callbacks(void);
91642 static void __call_rcu(struct rcu_head *head,
91643 void (*func)(struct rcu_head *rcu),
91644 struct rcu_ctrlblk *rcp);
91645@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
91646 false));
91647 }
91648
91649-static void rcu_process_callbacks(struct softirq_action *unused)
91650+static __latent_entropy void rcu_process_callbacks(void)
91651 {
91652 __rcu_process_callbacks(&rcu_sched_ctrlblk);
91653 __rcu_process_callbacks(&rcu_bh_ctrlblk);
91654diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
91655index 858c565..7efd915 100644
91656--- a/kernel/rcu/tiny_plugin.h
91657+++ b/kernel/rcu/tiny_plugin.h
91658@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
91659 dump_stack();
91660 }
91661 if (*rcp->curtail && ULONG_CMP_GE(j, js))
91662- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
91663+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
91664 3 * rcu_jiffies_till_stall_check() + 3;
91665 else if (ULONG_CMP_GE(j, js))
91666- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91667+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91668 }
91669
91670 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
91671 {
91672 rcp->ticks_this_gp = 0;
91673 rcp->gp_start = jiffies;
91674- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91675+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91676 }
91677
91678 static void check_cpu_stalls(void)
91679diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
91680index 7680fc2..b8e9161 100644
91681--- a/kernel/rcu/tree.c
91682+++ b/kernel/rcu/tree.c
91683@@ -261,7 +261,7 @@ static void rcu_momentary_dyntick_idle(void)
91684 */
91685 rdtp = this_cpu_ptr(&rcu_dynticks);
91686 smp_mb__before_atomic(); /* Earlier stuff before QS. */
91687- atomic_add(2, &rdtp->dynticks); /* QS. */
91688+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
91689 smp_mb__after_atomic(); /* Later stuff after QS. */
91690 break;
91691 }
91692@@ -521,9 +521,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
91693 rcu_prepare_for_idle();
91694 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91695 smp_mb__before_atomic(); /* See above. */
91696- atomic_inc(&rdtp->dynticks);
91697+ atomic_inc_unchecked(&rdtp->dynticks);
91698 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
91699- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91700+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91701 rcu_dynticks_task_enter();
91702
91703 /*
91704@@ -644,10 +644,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
91705
91706 rcu_dynticks_task_exit();
91707 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
91708- atomic_inc(&rdtp->dynticks);
91709+ atomic_inc_unchecked(&rdtp->dynticks);
91710 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91711 smp_mb__after_atomic(); /* See above. */
91712- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91713+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91714 rcu_cleanup_after_idle();
91715 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
91716 if (!user && !is_idle_task(current)) {
91717@@ -768,14 +768,14 @@ void rcu_nmi_enter(void)
91718 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
91719
91720 if (rdtp->dynticks_nmi_nesting == 0 &&
91721- (atomic_read(&rdtp->dynticks) & 0x1))
91722+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
91723 return;
91724 rdtp->dynticks_nmi_nesting++;
91725 smp_mb__before_atomic(); /* Force delay from prior write. */
91726- atomic_inc(&rdtp->dynticks);
91727+ atomic_inc_unchecked(&rdtp->dynticks);
91728 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91729 smp_mb__after_atomic(); /* See above. */
91730- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91731+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91732 }
91733
91734 /**
91735@@ -794,9 +794,9 @@ void rcu_nmi_exit(void)
91736 return;
91737 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91738 smp_mb__before_atomic(); /* See above. */
91739- atomic_inc(&rdtp->dynticks);
91740+ atomic_inc_unchecked(&rdtp->dynticks);
91741 smp_mb__after_atomic(); /* Force delay to next write. */
91742- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91743+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91744 }
91745
91746 /**
91747@@ -809,7 +809,7 @@ void rcu_nmi_exit(void)
91748 */
91749 bool notrace __rcu_is_watching(void)
91750 {
91751- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91752+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91753 }
91754
91755 /**
91756@@ -892,7 +892,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
91757 static int dyntick_save_progress_counter(struct rcu_data *rdp,
91758 bool *isidle, unsigned long *maxj)
91759 {
91760- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
91761+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91762 rcu_sysidle_check_cpu(rdp, isidle, maxj);
91763 if ((rdp->dynticks_snap & 0x1) == 0) {
91764 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
91765@@ -921,7 +921,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91766 int *rcrmp;
91767 unsigned int snap;
91768
91769- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
91770+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91771 snap = (unsigned int)rdp->dynticks_snap;
91772
91773 /*
91774@@ -984,10 +984,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91775 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
91776 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
91777 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
91778- ACCESS_ONCE(rdp->cond_resched_completed) =
91779+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
91780 ACCESS_ONCE(rdp->mynode->completed);
91781 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
91782- ACCESS_ONCE(*rcrmp) =
91783+ ACCESS_ONCE_RW(*rcrmp) =
91784 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
91785 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
91786 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
91787@@ -1009,7 +1009,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
91788 rsp->gp_start = j;
91789 smp_wmb(); /* Record start time before stall time. */
91790 j1 = rcu_jiffies_till_stall_check();
91791- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
91792+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
91793 rsp->jiffies_resched = j + j1 / 2;
91794 }
91795
91796@@ -1050,7 +1050,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
91797 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91798 return;
91799 }
91800- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91801+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91802 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91803
91804 /*
91805@@ -1127,7 +1127,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
91806
91807 raw_spin_lock_irqsave(&rnp->lock, flags);
91808 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
91809- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
91810+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
91811 3 * rcu_jiffies_till_stall_check() + 3;
91812 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91813
91814@@ -1211,7 +1211,7 @@ void rcu_cpu_stall_reset(void)
91815 struct rcu_state *rsp;
91816
91817 for_each_rcu_flavor(rsp)
91818- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91819+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91820 }
91821
91822 /*
91823@@ -1597,7 +1597,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
91824 raw_spin_unlock_irq(&rnp->lock);
91825 return 0;
91826 }
91827- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91828+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91829
91830 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
91831 /*
91832@@ -1638,9 +1638,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
91833 rdp = this_cpu_ptr(rsp->rda);
91834 rcu_preempt_check_blocked_tasks(rnp);
91835 rnp->qsmask = rnp->qsmaskinit;
91836- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
91837+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
91838 WARN_ON_ONCE(rnp->completed != rsp->completed);
91839- ACCESS_ONCE(rnp->completed) = rsp->completed;
91840+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
91841 if (rnp == rdp->mynode)
91842 (void)__note_gp_changes(rsp, rnp, rdp);
91843 rcu_preempt_boost_start_gp(rnp);
91844@@ -1685,7 +1685,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
91845 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
91846 raw_spin_lock_irq(&rnp->lock);
91847 smp_mb__after_unlock_lock();
91848- ACCESS_ONCE(rsp->gp_flags) =
91849+ ACCESS_ONCE_RW(rsp->gp_flags) =
91850 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
91851 raw_spin_unlock_irq(&rnp->lock);
91852 }
91853@@ -1731,7 +1731,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
91854 rcu_for_each_node_breadth_first(rsp, rnp) {
91855 raw_spin_lock_irq(&rnp->lock);
91856 smp_mb__after_unlock_lock();
91857- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
91858+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
91859 rdp = this_cpu_ptr(rsp->rda);
91860 if (rnp == rdp->mynode)
91861 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
91862@@ -1746,14 +1746,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
91863 rcu_nocb_gp_set(rnp, nocb);
91864
91865 /* Declare grace period done. */
91866- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
91867+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
91868 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
91869 rsp->fqs_state = RCU_GP_IDLE;
91870 rdp = this_cpu_ptr(rsp->rda);
91871 /* Advance CBs to reduce false positives below. */
91872 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
91873 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
91874- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91875+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91876 trace_rcu_grace_period(rsp->name,
91877 ACCESS_ONCE(rsp->gpnum),
91878 TPS("newreq"));
91879@@ -1878,7 +1878,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
91880 */
91881 return false;
91882 }
91883- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91884+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91885 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
91886 TPS("newreq"));
91887
91888@@ -2099,7 +2099,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
91889 rsp->qlen += rdp->qlen;
91890 rdp->n_cbs_orphaned += rdp->qlen;
91891 rdp->qlen_lazy = 0;
91892- ACCESS_ONCE(rdp->qlen) = 0;
91893+ ACCESS_ONCE_RW(rdp->qlen) = 0;
91894 }
91895
91896 /*
91897@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
91898 }
91899 smp_mb(); /* List handling before counting for rcu_barrier(). */
91900 rdp->qlen_lazy -= count_lazy;
91901- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
91902+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
91903 rdp->n_cbs_invoked += count;
91904
91905 /* Reinstate batch limit if we have worked down the excess. */
91906@@ -2507,7 +2507,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
91907 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
91908 return; /* Someone beat us to it. */
91909 }
91910- ACCESS_ONCE(rsp->gp_flags) =
91911+ ACCESS_ONCE_RW(rsp->gp_flags) =
91912 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
91913 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
91914 rcu_gp_kthread_wake(rsp);
91915@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
91916 /*
91917 * Do RCU core processing for the current CPU.
91918 */
91919-static void rcu_process_callbacks(struct softirq_action *unused)
91920+static void rcu_process_callbacks(void)
91921 {
91922 struct rcu_state *rsp;
91923
91924@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
91925 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
91926 if (debug_rcu_head_queue(head)) {
91927 /* Probable double call_rcu(), so leak the callback. */
91928- ACCESS_ONCE(head->func) = rcu_leak_callback;
91929+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
91930 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
91931 return;
91932 }
91933@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
91934 local_irq_restore(flags);
91935 return;
91936 }
91937- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
91938+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
91939 if (lazy)
91940 rdp->qlen_lazy++;
91941 else
91942@@ -2966,11 +2966,11 @@ void synchronize_sched_expedited(void)
91943 * counter wrap on a 32-bit system. Quite a few more CPUs would of
91944 * course be required on a 64-bit system.
91945 */
91946- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
91947+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
91948 (ulong)atomic_long_read(&rsp->expedited_done) +
91949 ULONG_MAX / 8)) {
91950 synchronize_sched();
91951- atomic_long_inc(&rsp->expedited_wrap);
91952+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
91953 return;
91954 }
91955
91956@@ -2978,12 +2978,12 @@ void synchronize_sched_expedited(void)
91957 * Take a ticket. Note that atomic_inc_return() implies a
91958 * full memory barrier.
91959 */
91960- snap = atomic_long_inc_return(&rsp->expedited_start);
91961+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
91962 firstsnap = snap;
91963 if (!try_get_online_cpus()) {
91964 /* CPU hotplug operation in flight, fall back to normal GP. */
91965 wait_rcu_gp(call_rcu_sched);
91966- atomic_long_inc(&rsp->expedited_normal);
91967+ atomic_long_inc_unchecked(&rsp->expedited_normal);
91968 return;
91969 }
91970 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
91971@@ -2996,7 +2996,7 @@ void synchronize_sched_expedited(void)
91972 for_each_cpu(cpu, cm) {
91973 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
91974
91975- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
91976+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
91977 cpumask_clear_cpu(cpu, cm);
91978 }
91979 if (cpumask_weight(cm) == 0)
91980@@ -3011,14 +3011,14 @@ void synchronize_sched_expedited(void)
91981 synchronize_sched_expedited_cpu_stop,
91982 NULL) == -EAGAIN) {
91983 put_online_cpus();
91984- atomic_long_inc(&rsp->expedited_tryfail);
91985+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
91986
91987 /* Check to see if someone else did our work for us. */
91988 s = atomic_long_read(&rsp->expedited_done);
91989 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
91990 /* ensure test happens before caller kfree */
91991 smp_mb__before_atomic(); /* ^^^ */
91992- atomic_long_inc(&rsp->expedited_workdone1);
91993+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
91994 free_cpumask_var(cm);
91995 return;
91996 }
91997@@ -3028,7 +3028,7 @@ void synchronize_sched_expedited(void)
91998 udelay(trycount * num_online_cpus());
91999 } else {
92000 wait_rcu_gp(call_rcu_sched);
92001- atomic_long_inc(&rsp->expedited_normal);
92002+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92003 free_cpumask_var(cm);
92004 return;
92005 }
92006@@ -3038,7 +3038,7 @@ void synchronize_sched_expedited(void)
92007 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92008 /* ensure test happens before caller kfree */
92009 smp_mb__before_atomic(); /* ^^^ */
92010- atomic_long_inc(&rsp->expedited_workdone2);
92011+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92012 free_cpumask_var(cm);
92013 return;
92014 }
92015@@ -3053,14 +3053,14 @@ void synchronize_sched_expedited(void)
92016 if (!try_get_online_cpus()) {
92017 /* CPU hotplug operation in flight, use normal GP. */
92018 wait_rcu_gp(call_rcu_sched);
92019- atomic_long_inc(&rsp->expedited_normal);
92020+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92021 free_cpumask_var(cm);
92022 return;
92023 }
92024- snap = atomic_long_read(&rsp->expedited_start);
92025+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92026 smp_mb(); /* ensure read is before try_stop_cpus(). */
92027 }
92028- atomic_long_inc(&rsp->expedited_stoppedcpus);
92029+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92030
92031 all_cpus_idle:
92032 free_cpumask_var(cm);
92033@@ -3072,16 +3072,16 @@ all_cpus_idle:
92034 * than we did already did their update.
92035 */
92036 do {
92037- atomic_long_inc(&rsp->expedited_done_tries);
92038+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92039 s = atomic_long_read(&rsp->expedited_done);
92040 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92041 /* ensure test happens before caller kfree */
92042 smp_mb__before_atomic(); /* ^^^ */
92043- atomic_long_inc(&rsp->expedited_done_lost);
92044+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92045 break;
92046 }
92047 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92048- atomic_long_inc(&rsp->expedited_done_exit);
92049+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92050
92051 put_online_cpus();
92052 }
92053@@ -3287,7 +3287,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92054 * ACCESS_ONCE() to prevent the compiler from speculating
92055 * the increment to precede the early-exit check.
92056 */
92057- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92058+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92059 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92060 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92061 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92062@@ -3342,7 +3342,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92063
92064 /* Increment ->n_barrier_done to prevent duplicate work. */
92065 smp_mb(); /* Keep increment after above mechanism. */
92066- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92067+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92068 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92069 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92070 smp_mb(); /* Keep increment before caller's subsequent code. */
92071@@ -3387,10 +3387,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92072 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92073 init_callback_list(rdp);
92074 rdp->qlen_lazy = 0;
92075- ACCESS_ONCE(rdp->qlen) = 0;
92076+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92077 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92078 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92079- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92080+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92081 rdp->cpu = cpu;
92082 rdp->rsp = rsp;
92083 rcu_boot_init_nocb_percpu_data(rdp);
92084@@ -3423,8 +3423,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92085 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92086 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92087 rcu_sysidle_init_percpu_data(rdp->dynticks);
92088- atomic_set(&rdp->dynticks->dynticks,
92089- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92090+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92091+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92092 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92093
92094 /* Add CPU to rcu_node bitmasks. */
92095diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92096index 8e7b184..9c55768 100644
92097--- a/kernel/rcu/tree.h
92098+++ b/kernel/rcu/tree.h
92099@@ -87,11 +87,11 @@ struct rcu_dynticks {
92100 long long dynticks_nesting; /* Track irq/process nesting level. */
92101 /* Process level is worth LLONG_MAX/2. */
92102 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92103- atomic_t dynticks; /* Even value for idle, else odd. */
92104+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92105 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92106 long long dynticks_idle_nesting;
92107 /* irq/process nesting level from idle. */
92108- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92109+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92110 /* "Idle" excludes userspace execution. */
92111 unsigned long dynticks_idle_jiffies;
92112 /* End of last non-NMI non-idle period. */
92113@@ -466,17 +466,17 @@ struct rcu_state {
92114 /* _rcu_barrier(). */
92115 /* End of fields guarded by barrier_mutex. */
92116
92117- atomic_long_t expedited_start; /* Starting ticket. */
92118- atomic_long_t expedited_done; /* Done ticket. */
92119- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92120- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92121- atomic_long_t expedited_workdone1; /* # done by others #1. */
92122- atomic_long_t expedited_workdone2; /* # done by others #2. */
92123- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92124- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92125- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92126- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92127- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92128+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92129+ atomic_long_t expedited_done; /* Done ticket. */
92130+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92131+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92132+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92133+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92134+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92135+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92136+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92137+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92138+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92139
92140 unsigned long jiffies_force_qs; /* Time at which to invoke */
92141 /* force_quiescent_state(). */
92142diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92143index 3ec85cb..3687925 100644
92144--- a/kernel/rcu/tree_plugin.h
92145+++ b/kernel/rcu/tree_plugin.h
92146@@ -709,7 +709,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92147 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92148 {
92149 return !rcu_preempted_readers_exp(rnp) &&
92150- ACCESS_ONCE(rnp->expmask) == 0;
92151+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92152 }
92153
92154 /*
92155@@ -870,7 +870,7 @@ void synchronize_rcu_expedited(void)
92156
92157 /* Clean up and exit. */
92158 smp_mb(); /* ensure expedited GP seen before counter increment. */
92159- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
92160+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
92161 sync_rcu_preempt_exp_count + 1;
92162 unlock_mb_ret:
92163 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92164@@ -1426,7 +1426,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92165 free_cpumask_var(cm);
92166 }
92167
92168-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92169+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92170 .store = &rcu_cpu_kthread_task,
92171 .thread_should_run = rcu_cpu_kthread_should_run,
92172 .thread_fn = rcu_cpu_kthread,
92173@@ -1900,7 +1900,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92174 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92175 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
92176 cpu, ticks_value, ticks_title,
92177- atomic_read(&rdtp->dynticks) & 0xfff,
92178+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92179 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92180 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92181 fast_no_hz);
92182@@ -2044,7 +2044,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92183 return;
92184 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92185 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
92186- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92187+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92188 wake_up(&rdp_leader->nocb_wq);
92189 }
92190 }
92191@@ -2096,7 +2096,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92192
92193 /* Enqueue the callback on the nocb list and update counts. */
92194 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92195- ACCESS_ONCE(*old_rhpp) = rhp;
92196+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92197 atomic_long_add(rhcount, &rdp->nocb_q_count);
92198 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92199 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
92200@@ -2286,7 +2286,7 @@ wait_again:
92201 continue; /* No CBs here, try next follower. */
92202
92203 /* Move callbacks to wait-for-GP list, which is empty. */
92204- ACCESS_ONCE(rdp->nocb_head) = NULL;
92205+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92206 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92207 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
92208 rdp->nocb_gp_count_lazy =
92209@@ -2413,7 +2413,7 @@ static int rcu_nocb_kthread(void *arg)
92210 list = ACCESS_ONCE(rdp->nocb_follower_head);
92211 BUG_ON(!list);
92212 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92213- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92214+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
92215 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
92216 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
92217 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
92218@@ -2443,8 +2443,8 @@ static int rcu_nocb_kthread(void *arg)
92219 list = next;
92220 }
92221 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
92222- ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92223- ACCESS_ONCE(rdp->nocb_p_count_lazy) =
92224+ ACCESS_ONCE_RW(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92225+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) =
92226 rdp->nocb_p_count_lazy - cl;
92227 rdp->n_nocbs_invoked += c;
92228 }
92229@@ -2465,7 +2465,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
92230 if (!rcu_nocb_need_deferred_wakeup(rdp))
92231 return;
92232 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
92233- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92234+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92235 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
92236 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
92237 }
92238@@ -2588,7 +2588,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
92239 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
92240 "rcuo%c/%d", rsp->abbr, cpu);
92241 BUG_ON(IS_ERR(t));
92242- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
92243+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
92244 }
92245
92246 /*
92247@@ -2793,11 +2793,11 @@ static void rcu_sysidle_enter(int irq)
92248
92249 /* Record start of fully idle period. */
92250 j = jiffies;
92251- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
92252+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
92253 smp_mb__before_atomic();
92254- atomic_inc(&rdtp->dynticks_idle);
92255+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92256 smp_mb__after_atomic();
92257- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
92258+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
92259 }
92260
92261 /*
92262@@ -2868,9 +2868,9 @@ static void rcu_sysidle_exit(int irq)
92263
92264 /* Record end of idle period. */
92265 smp_mb__before_atomic();
92266- atomic_inc(&rdtp->dynticks_idle);
92267+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92268 smp_mb__after_atomic();
92269- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
92270+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
92271
92272 /*
92273 * If we are the timekeeping CPU, we are permitted to be non-idle
92274@@ -2915,7 +2915,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
92275 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
92276
92277 /* Pick up current idle and NMI-nesting counter and check. */
92278- cur = atomic_read(&rdtp->dynticks_idle);
92279+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
92280 if (cur & 0x1) {
92281 *isidle = false; /* We are not idle! */
92282 return;
92283@@ -2964,7 +2964,7 @@ static void rcu_sysidle(unsigned long j)
92284 case RCU_SYSIDLE_NOT:
92285
92286 /* First time all are idle, so note a short idle period. */
92287- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92288+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92289 break;
92290
92291 case RCU_SYSIDLE_SHORT:
92292@@ -3002,7 +3002,7 @@ static void rcu_sysidle_cancel(void)
92293 {
92294 smp_mb();
92295 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
92296- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
92297+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
92298 }
92299
92300 /*
92301@@ -3054,7 +3054,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
92302 smp_mb(); /* grace period precedes setting inuse. */
92303
92304 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
92305- ACCESS_ONCE(rshp->inuse) = 0;
92306+ ACCESS_ONCE_RW(rshp->inuse) = 0;
92307 }
92308
92309 /*
92310@@ -3207,7 +3207,7 @@ static void rcu_bind_gp_kthread(void)
92311 static void rcu_dynticks_task_enter(void)
92312 {
92313 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92314- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
92315+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
92316 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92317 }
92318
92319@@ -3215,6 +3215,6 @@ static void rcu_dynticks_task_enter(void)
92320 static void rcu_dynticks_task_exit(void)
92321 {
92322 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92323- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
92324+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
92325 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92326 }
92327diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
92328index 5cdc62e..cc52e88 100644
92329--- a/kernel/rcu/tree_trace.c
92330+++ b/kernel/rcu/tree_trace.c
92331@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
92332 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
92333 rdp->passed_quiesce, rdp->qs_pending);
92334 seq_printf(m, " dt=%d/%llx/%d df=%lu",
92335- atomic_read(&rdp->dynticks->dynticks),
92336+ atomic_read_unchecked(&rdp->dynticks->dynticks),
92337 rdp->dynticks->dynticks_nesting,
92338 rdp->dynticks->dynticks_nmi_nesting,
92339 rdp->dynticks_fqs);
92340@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
92341 struct rcu_state *rsp = (struct rcu_state *)m->private;
92342
92343 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
92344- atomic_long_read(&rsp->expedited_start),
92345+ atomic_long_read_unchecked(&rsp->expedited_start),
92346 atomic_long_read(&rsp->expedited_done),
92347- atomic_long_read(&rsp->expedited_wrap),
92348- atomic_long_read(&rsp->expedited_tryfail),
92349- atomic_long_read(&rsp->expedited_workdone1),
92350- atomic_long_read(&rsp->expedited_workdone2),
92351- atomic_long_read(&rsp->expedited_normal),
92352- atomic_long_read(&rsp->expedited_stoppedcpus),
92353- atomic_long_read(&rsp->expedited_done_tries),
92354- atomic_long_read(&rsp->expedited_done_lost),
92355- atomic_long_read(&rsp->expedited_done_exit));
92356+ atomic_long_read_unchecked(&rsp->expedited_wrap),
92357+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
92358+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
92359+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
92360+ atomic_long_read_unchecked(&rsp->expedited_normal),
92361+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
92362+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
92363+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
92364+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
92365 return 0;
92366 }
92367
92368diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
92369index e0d31a3..f4dafe3 100644
92370--- a/kernel/rcu/update.c
92371+++ b/kernel/rcu/update.c
92372@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
92373 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
92374 */
92375 if (till_stall_check < 3) {
92376- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
92377+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
92378 till_stall_check = 3;
92379 } else if (till_stall_check > 300) {
92380- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
92381+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
92382 till_stall_check = 300;
92383 }
92384 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
92385@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
92386 !ACCESS_ONCE(t->on_rq) ||
92387 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
92388 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
92389- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
92390+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
92391 list_del_init(&t->rcu_tasks_holdout_list);
92392 put_task_struct(t);
92393 return;
92394@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
92395 !is_idle_task(t)) {
92396 get_task_struct(t);
92397 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
92398- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
92399+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
92400 list_add(&t->rcu_tasks_holdout_list,
92401 &rcu_tasks_holdouts);
92402 }
92403@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
92404 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
92405 BUG_ON(IS_ERR(t));
92406 smp_mb(); /* Ensure others see full kthread. */
92407- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
92408+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
92409 mutex_unlock(&rcu_tasks_kthread_mutex);
92410 }
92411
92412diff --git a/kernel/resource.c b/kernel/resource.c
92413index 0bcebff..e7cd5b2 100644
92414--- a/kernel/resource.c
92415+++ b/kernel/resource.c
92416@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
92417
92418 static int __init ioresources_init(void)
92419 {
92420+#ifdef CONFIG_GRKERNSEC_PROC_ADD
92421+#ifdef CONFIG_GRKERNSEC_PROC_USER
92422+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
92423+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
92424+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92425+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
92426+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
92427+#endif
92428+#else
92429 proc_create("ioports", 0, NULL, &proc_ioports_operations);
92430 proc_create("iomem", 0, NULL, &proc_iomem_operations);
92431+#endif
92432 return 0;
92433 }
92434 __initcall(ioresources_init);
92435diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
92436index eae160d..c9aa22e 100644
92437--- a/kernel/sched/auto_group.c
92438+++ b/kernel/sched/auto_group.c
92439@@ -11,7 +11,7 @@
92440
92441 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
92442 static struct autogroup autogroup_default;
92443-static atomic_t autogroup_seq_nr;
92444+static atomic_unchecked_t autogroup_seq_nr;
92445
92446 void __init autogroup_init(struct task_struct *init_task)
92447 {
92448@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
92449
92450 kref_init(&ag->kref);
92451 init_rwsem(&ag->lock);
92452- ag->id = atomic_inc_return(&autogroup_seq_nr);
92453+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
92454 ag->tg = tg;
92455 #ifdef CONFIG_RT_GROUP_SCHED
92456 /*
92457diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
92458index 607f852..486bc87 100644
92459--- a/kernel/sched/completion.c
92460+++ b/kernel/sched/completion.c
92461@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
92462 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92463 * or number of jiffies left till timeout) if completed.
92464 */
92465-long __sched
92466+long __sched __intentional_overflow(-1)
92467 wait_for_completion_interruptible_timeout(struct completion *x,
92468 unsigned long timeout)
92469 {
92470@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
92471 *
92472 * Return: -ERESTARTSYS if interrupted, 0 if completed.
92473 */
92474-int __sched wait_for_completion_killable(struct completion *x)
92475+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
92476 {
92477 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
92478 if (t == -ERESTARTSYS)
92479@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
92480 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92481 * or number of jiffies left till timeout) if completed.
92482 */
92483-long __sched
92484+long __sched __intentional_overflow(-1)
92485 wait_for_completion_killable_timeout(struct completion *x,
92486 unsigned long timeout)
92487 {
92488diff --git a/kernel/sched/core.c b/kernel/sched/core.c
92489index 44dfc8b..56d160d 100644
92490--- a/kernel/sched/core.c
92491+++ b/kernel/sched/core.c
92492@@ -1902,7 +1902,7 @@ void set_numabalancing_state(bool enabled)
92493 int sysctl_numa_balancing(struct ctl_table *table, int write,
92494 void __user *buffer, size_t *lenp, loff_t *ppos)
92495 {
92496- struct ctl_table t;
92497+ ctl_table_no_const t;
92498 int err;
92499 int state = numabalancing_enabled;
92500
92501@@ -2352,8 +2352,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
92502 next->active_mm = oldmm;
92503 atomic_inc(&oldmm->mm_count);
92504 enter_lazy_tlb(oldmm, next);
92505- } else
92506+ } else {
92507 switch_mm(oldmm, mm, next);
92508+ populate_stack();
92509+ }
92510
92511 if (!prev->mm) {
92512 prev->active_mm = NULL;
92513@@ -3152,6 +3154,8 @@ int can_nice(const struct task_struct *p, const int nice)
92514 /* convert nice value [19,-20] to rlimit style value [1,40] */
92515 int nice_rlim = nice_to_rlimit(nice);
92516
92517+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
92518+
92519 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
92520 capable(CAP_SYS_NICE));
92521 }
92522@@ -3178,7 +3182,8 @@ SYSCALL_DEFINE1(nice, int, increment)
92523 nice = task_nice(current) + increment;
92524
92525 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
92526- if (increment < 0 && !can_nice(current, nice))
92527+ if (increment < 0 && (!can_nice(current, nice) ||
92528+ gr_handle_chroot_nice()))
92529 return -EPERM;
92530
92531 retval = security_task_setnice(current, nice);
92532@@ -3473,6 +3478,7 @@ recheck:
92533 if (policy != p->policy && !rlim_rtprio)
92534 return -EPERM;
92535
92536+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
92537 /* can't increase priority */
92538 if (attr->sched_priority > p->rt_priority &&
92539 attr->sched_priority > rlim_rtprio)
92540@@ -4973,6 +4979,7 @@ void idle_task_exit(void)
92541
92542 if (mm != &init_mm) {
92543 switch_mm(mm, &init_mm, current);
92544+ populate_stack();
92545 finish_arch_post_lock_switch();
92546 }
92547 mmdrop(mm);
92548@@ -5068,7 +5075,7 @@ static void migrate_tasks(unsigned int dead_cpu)
92549
92550 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
92551
92552-static struct ctl_table sd_ctl_dir[] = {
92553+static ctl_table_no_const sd_ctl_dir[] __read_only = {
92554 {
92555 .procname = "sched_domain",
92556 .mode = 0555,
92557@@ -5085,17 +5092,17 @@ static struct ctl_table sd_ctl_root[] = {
92558 {}
92559 };
92560
92561-static struct ctl_table *sd_alloc_ctl_entry(int n)
92562+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
92563 {
92564- struct ctl_table *entry =
92565+ ctl_table_no_const *entry =
92566 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
92567
92568 return entry;
92569 }
92570
92571-static void sd_free_ctl_entry(struct ctl_table **tablep)
92572+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
92573 {
92574- struct ctl_table *entry;
92575+ ctl_table_no_const *entry;
92576
92577 /*
92578 * In the intermediate directories, both the child directory and
92579@@ -5103,22 +5110,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
92580 * will always be set. In the lowest directory the names are
92581 * static strings and all have proc handlers.
92582 */
92583- for (entry = *tablep; entry->mode; entry++) {
92584- if (entry->child)
92585- sd_free_ctl_entry(&entry->child);
92586+ for (entry = tablep; entry->mode; entry++) {
92587+ if (entry->child) {
92588+ sd_free_ctl_entry(entry->child);
92589+ pax_open_kernel();
92590+ entry->child = NULL;
92591+ pax_close_kernel();
92592+ }
92593 if (entry->proc_handler == NULL)
92594 kfree(entry->procname);
92595 }
92596
92597- kfree(*tablep);
92598- *tablep = NULL;
92599+ kfree(tablep);
92600 }
92601
92602 static int min_load_idx = 0;
92603 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
92604
92605 static void
92606-set_table_entry(struct ctl_table *entry,
92607+set_table_entry(ctl_table_no_const *entry,
92608 const char *procname, void *data, int maxlen,
92609 umode_t mode, proc_handler *proc_handler,
92610 bool load_idx)
92611@@ -5138,7 +5148,7 @@ set_table_entry(struct ctl_table *entry,
92612 static struct ctl_table *
92613 sd_alloc_ctl_domain_table(struct sched_domain *sd)
92614 {
92615- struct ctl_table *table = sd_alloc_ctl_entry(14);
92616+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
92617
92618 if (table == NULL)
92619 return NULL;
92620@@ -5176,9 +5186,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
92621 return table;
92622 }
92623
92624-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
92625+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
92626 {
92627- struct ctl_table *entry, *table;
92628+ ctl_table_no_const *entry, *table;
92629 struct sched_domain *sd;
92630 int domain_num = 0, i;
92631 char buf[32];
92632@@ -5205,11 +5215,13 @@ static struct ctl_table_header *sd_sysctl_header;
92633 static void register_sched_domain_sysctl(void)
92634 {
92635 int i, cpu_num = num_possible_cpus();
92636- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
92637+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
92638 char buf[32];
92639
92640 WARN_ON(sd_ctl_dir[0].child);
92641+ pax_open_kernel();
92642 sd_ctl_dir[0].child = entry;
92643+ pax_close_kernel();
92644
92645 if (entry == NULL)
92646 return;
92647@@ -5232,8 +5244,12 @@ static void unregister_sched_domain_sysctl(void)
92648 if (sd_sysctl_header)
92649 unregister_sysctl_table(sd_sysctl_header);
92650 sd_sysctl_header = NULL;
92651- if (sd_ctl_dir[0].child)
92652- sd_free_ctl_entry(&sd_ctl_dir[0].child);
92653+ if (sd_ctl_dir[0].child) {
92654+ sd_free_ctl_entry(sd_ctl_dir[0].child);
92655+ pax_open_kernel();
92656+ sd_ctl_dir[0].child = NULL;
92657+ pax_close_kernel();
92658+ }
92659 }
92660 #else
92661 static void register_sched_domain_sysctl(void)
92662diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
92663index fe331fc..29d620e 100644
92664--- a/kernel/sched/fair.c
92665+++ b/kernel/sched/fair.c
92666@@ -2089,7 +2089,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
92667
92668 static void reset_ptenuma_scan(struct task_struct *p)
92669 {
92670- ACCESS_ONCE(p->mm->numa_scan_seq)++;
92671+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
92672 p->mm->numa_scan_offset = 0;
92673 }
92674
92675@@ -7651,7 +7651,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
92676 * run_rebalance_domains is triggered when needed from the scheduler tick.
92677 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
92678 */
92679-static void run_rebalance_domains(struct softirq_action *h)
92680+static __latent_entropy void run_rebalance_domains(void)
92681 {
92682 struct rq *this_rq = this_rq();
92683 enum cpu_idle_type idle = this_rq->idle_balance ?
92684diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
92685index 9a2a45c..bb91ace 100644
92686--- a/kernel/sched/sched.h
92687+++ b/kernel/sched/sched.h
92688@@ -1182,7 +1182,7 @@ struct sched_class {
92689 #ifdef CONFIG_FAIR_GROUP_SCHED
92690 void (*task_move_group) (struct task_struct *p, int on_rq);
92691 #endif
92692-};
92693+} __do_const;
92694
92695 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
92696 {
92697diff --git a/kernel/seccomp.c b/kernel/seccomp.c
92698index 4ef9687..4f44028 100644
92699--- a/kernel/seccomp.c
92700+++ b/kernel/seccomp.c
92701@@ -629,7 +629,9 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd)
92702
92703 switch (action) {
92704 case SECCOMP_RET_ERRNO:
92705- /* Set the low-order 16-bits as a errno. */
92706+ /* Set low-order bits as an errno, capped at MAX_ERRNO. */
92707+ if (data > MAX_ERRNO)
92708+ data = MAX_ERRNO;
92709 syscall_set_return_value(current, task_pt_regs(current),
92710 -data, 0);
92711 goto skip;
92712diff --git a/kernel/signal.c b/kernel/signal.c
92713index 16a30529..25ad033 100644
92714--- a/kernel/signal.c
92715+++ b/kernel/signal.c
92716@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
92717
92718 int print_fatal_signals __read_mostly;
92719
92720-static void __user *sig_handler(struct task_struct *t, int sig)
92721+static __sighandler_t sig_handler(struct task_struct *t, int sig)
92722 {
92723 return t->sighand->action[sig - 1].sa.sa_handler;
92724 }
92725
92726-static int sig_handler_ignored(void __user *handler, int sig)
92727+static int sig_handler_ignored(__sighandler_t handler, int sig)
92728 {
92729 /* Is it explicitly or implicitly ignored? */
92730 return handler == SIG_IGN ||
92731@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
92732
92733 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
92734 {
92735- void __user *handler;
92736+ __sighandler_t handler;
92737
92738 handler = sig_handler(t, sig);
92739
92740@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
92741 atomic_inc(&user->sigpending);
92742 rcu_read_unlock();
92743
92744+ if (!override_rlimit)
92745+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
92746+
92747 if (override_rlimit ||
92748 atomic_read(&user->sigpending) <=
92749 task_rlimit(t, RLIMIT_SIGPENDING)) {
92750@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
92751
92752 int unhandled_signal(struct task_struct *tsk, int sig)
92753 {
92754- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
92755+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
92756 if (is_global_init(tsk))
92757 return 1;
92758 if (handler != SIG_IGN && handler != SIG_DFL)
92759@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
92760 }
92761 }
92762
92763+ /* allow glibc communication via tgkill to other threads in our
92764+ thread group */
92765+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
92766+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
92767+ && gr_handle_signal(t, sig))
92768+ return -EPERM;
92769+
92770 return security_task_kill(t, info, sig, 0);
92771 }
92772
92773@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92774 return send_signal(sig, info, p, 1);
92775 }
92776
92777-static int
92778+int
92779 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92780 {
92781 return send_signal(sig, info, t, 0);
92782@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92783 unsigned long int flags;
92784 int ret, blocked, ignored;
92785 struct k_sigaction *action;
92786+ int is_unhandled = 0;
92787
92788 spin_lock_irqsave(&t->sighand->siglock, flags);
92789 action = &t->sighand->action[sig-1];
92790@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92791 }
92792 if (action->sa.sa_handler == SIG_DFL)
92793 t->signal->flags &= ~SIGNAL_UNKILLABLE;
92794+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
92795+ is_unhandled = 1;
92796 ret = specific_send_sig_info(sig, info, t);
92797 spin_unlock_irqrestore(&t->sighand->siglock, flags);
92798
92799+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
92800+ normal operation */
92801+ if (is_unhandled) {
92802+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
92803+ gr_handle_crash(t, sig);
92804+ }
92805+
92806 return ret;
92807 }
92808
92809@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92810 ret = check_kill_permission(sig, info, p);
92811 rcu_read_unlock();
92812
92813- if (!ret && sig)
92814+ if (!ret && sig) {
92815 ret = do_send_sig_info(sig, info, p, true);
92816+ if (!ret)
92817+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
92818+ }
92819
92820 return ret;
92821 }
92822@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
92823 int error = -ESRCH;
92824
92825 rcu_read_lock();
92826- p = find_task_by_vpid(pid);
92827+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
92828+ /* allow glibc communication via tgkill to other threads in our
92829+ thread group */
92830+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
92831+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
92832+ p = find_task_by_vpid_unrestricted(pid);
92833+ else
92834+#endif
92835+ p = find_task_by_vpid(pid);
92836 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
92837 error = check_kill_permission(sig, info, p);
92838 /*
92839@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
92840 }
92841 seg = get_fs();
92842 set_fs(KERNEL_DS);
92843- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
92844- (stack_t __force __user *) &uoss,
92845+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
92846+ (stack_t __force_user *) &uoss,
92847 compat_user_stack_pointer());
92848 set_fs(seg);
92849 if (ret >= 0 && uoss_ptr) {
92850diff --git a/kernel/smpboot.c b/kernel/smpboot.c
92851index 40190f2..8861d40 100644
92852--- a/kernel/smpboot.c
92853+++ b/kernel/smpboot.c
92854@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
92855 }
92856 smpboot_unpark_thread(plug_thread, cpu);
92857 }
92858- list_add(&plug_thread->list, &hotplug_threads);
92859+ pax_list_add(&plug_thread->list, &hotplug_threads);
92860 out:
92861 mutex_unlock(&smpboot_threads_lock);
92862 put_online_cpus();
92863@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
92864 {
92865 get_online_cpus();
92866 mutex_lock(&smpboot_threads_lock);
92867- list_del(&plug_thread->list);
92868+ pax_list_del(&plug_thread->list);
92869 smpboot_destroy_threads(plug_thread);
92870 mutex_unlock(&smpboot_threads_lock);
92871 put_online_cpus();
92872diff --git a/kernel/softirq.c b/kernel/softirq.c
92873index c497fcd..e8f90a9 100644
92874--- a/kernel/softirq.c
92875+++ b/kernel/softirq.c
92876@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
92877 EXPORT_SYMBOL(irq_stat);
92878 #endif
92879
92880-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
92881+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
92882
92883 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
92884
92885@@ -266,7 +266,7 @@ restart:
92886 kstat_incr_softirqs_this_cpu(vec_nr);
92887
92888 trace_softirq_entry(vec_nr);
92889- h->action(h);
92890+ h->action();
92891 trace_softirq_exit(vec_nr);
92892 if (unlikely(prev_count != preempt_count())) {
92893 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
92894@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
92895 or_softirq_pending(1UL << nr);
92896 }
92897
92898-void open_softirq(int nr, void (*action)(struct softirq_action *))
92899+void __init open_softirq(int nr, void (*action)(void))
92900 {
92901 softirq_vec[nr].action = action;
92902 }
92903@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
92904 }
92905 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
92906
92907-static void tasklet_action(struct softirq_action *a)
92908+static void tasklet_action(void)
92909 {
92910 struct tasklet_struct *list;
92911
92912@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
92913 }
92914 }
92915
92916-static void tasklet_hi_action(struct softirq_action *a)
92917+static __latent_entropy void tasklet_hi_action(void)
92918 {
92919 struct tasklet_struct *list;
92920
92921@@ -745,7 +745,7 @@ static struct notifier_block cpu_nfb = {
92922 .notifier_call = cpu_callback
92923 };
92924
92925-static struct smp_hotplug_thread softirq_threads = {
92926+static struct smp_hotplug_thread softirq_threads __read_only = {
92927 .store = &ksoftirqd,
92928 .thread_should_run = ksoftirqd_should_run,
92929 .thread_fn = run_ksoftirqd,
92930diff --git a/kernel/sys.c b/kernel/sys.c
92931index ea9c881..2194af5 100644
92932--- a/kernel/sys.c
92933+++ b/kernel/sys.c
92934@@ -154,6 +154,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
92935 error = -EACCES;
92936 goto out;
92937 }
92938+
92939+ if (gr_handle_chroot_setpriority(p, niceval)) {
92940+ error = -EACCES;
92941+ goto out;
92942+ }
92943+
92944 no_nice = security_task_setnice(p, niceval);
92945 if (no_nice) {
92946 error = no_nice;
92947@@ -359,6 +365,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
92948 goto error;
92949 }
92950
92951+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
92952+ goto error;
92953+
92954+ if (!gid_eq(new->gid, old->gid)) {
92955+ /* make sure we generate a learn log for what will
92956+ end up being a role transition after a full-learning
92957+ policy is generated
92958+ CAP_SETGID is required to perform a transition
92959+ we may not log a CAP_SETGID check above, e.g.
92960+ in the case where new rgid = old egid
92961+ */
92962+ gr_learn_cap(current, new, CAP_SETGID);
92963+ }
92964+
92965 if (rgid != (gid_t) -1 ||
92966 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
92967 new->sgid = new->egid;
92968@@ -394,6 +414,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
92969 old = current_cred();
92970
92971 retval = -EPERM;
92972+
92973+ if (gr_check_group_change(kgid, kgid, kgid))
92974+ goto error;
92975+
92976 if (ns_capable(old->user_ns, CAP_SETGID))
92977 new->gid = new->egid = new->sgid = new->fsgid = kgid;
92978 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
92979@@ -411,7 +435,7 @@ error:
92980 /*
92981 * change the user struct in a credentials set to match the new UID
92982 */
92983-static int set_user(struct cred *new)
92984+int set_user(struct cred *new)
92985 {
92986 struct user_struct *new_user;
92987
92988@@ -491,7 +515,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
92989 goto error;
92990 }
92991
92992+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
92993+ goto error;
92994+
92995 if (!uid_eq(new->uid, old->uid)) {
92996+ /* make sure we generate a learn log for what will
92997+ end up being a role transition after a full-learning
92998+ policy is generated
92999+ CAP_SETUID is required to perform a transition
93000+ we may not log a CAP_SETUID check above, e.g.
93001+ in the case where new ruid = old euid
93002+ */
93003+ gr_learn_cap(current, new, CAP_SETUID);
93004 retval = set_user(new);
93005 if (retval < 0)
93006 goto error;
93007@@ -541,6 +576,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93008 old = current_cred();
93009
93010 retval = -EPERM;
93011+
93012+ if (gr_check_crash_uid(kuid))
93013+ goto error;
93014+ if (gr_check_user_change(kuid, kuid, kuid))
93015+ goto error;
93016+
93017 if (ns_capable(old->user_ns, CAP_SETUID)) {
93018 new->suid = new->uid = kuid;
93019 if (!uid_eq(kuid, old->uid)) {
93020@@ -610,6 +651,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93021 goto error;
93022 }
93023
93024+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93025+ goto error;
93026+
93027 if (ruid != (uid_t) -1) {
93028 new->uid = kruid;
93029 if (!uid_eq(kruid, old->uid)) {
93030@@ -694,6 +738,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93031 goto error;
93032 }
93033
93034+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93035+ goto error;
93036+
93037 if (rgid != (gid_t) -1)
93038 new->gid = krgid;
93039 if (egid != (gid_t) -1)
93040@@ -758,12 +805,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93041 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93042 ns_capable(old->user_ns, CAP_SETUID)) {
93043 if (!uid_eq(kuid, old->fsuid)) {
93044+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93045+ goto error;
93046+
93047 new->fsuid = kuid;
93048 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93049 goto change_okay;
93050 }
93051 }
93052
93053+error:
93054 abort_creds(new);
93055 return old_fsuid;
93056
93057@@ -796,12 +847,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93058 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93059 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93060 ns_capable(old->user_ns, CAP_SETGID)) {
93061+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93062+ goto error;
93063+
93064 if (!gid_eq(kgid, old->fsgid)) {
93065 new->fsgid = kgid;
93066 goto change_okay;
93067 }
93068 }
93069
93070+error:
93071 abort_creds(new);
93072 return old_fsgid;
93073
93074@@ -1178,19 +1233,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93075 return -EFAULT;
93076
93077 down_read(&uts_sem);
93078- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93079+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93080 __OLD_UTS_LEN);
93081 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93082- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93083+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93084 __OLD_UTS_LEN);
93085 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93086- error |= __copy_to_user(&name->release, &utsname()->release,
93087+ error |= __copy_to_user(name->release, &utsname()->release,
93088 __OLD_UTS_LEN);
93089 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93090- error |= __copy_to_user(&name->version, &utsname()->version,
93091+ error |= __copy_to_user(name->version, &utsname()->version,
93092 __OLD_UTS_LEN);
93093 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93094- error |= __copy_to_user(&name->machine, &utsname()->machine,
93095+ error |= __copy_to_user(name->machine, &utsname()->machine,
93096 __OLD_UTS_LEN);
93097 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93098 up_read(&uts_sem);
93099@@ -1391,6 +1446,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93100 */
93101 new_rlim->rlim_cur = 1;
93102 }
93103+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93104+ is changed to a lower value. Since tasks can be created by the same
93105+ user in between this limit change and an execve by this task, force
93106+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93107+ */
93108+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93109+ tsk->flags |= PF_NPROC_EXCEEDED;
93110 }
93111 if (!retval) {
93112 if (old_rlim)
93113diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93114index 88ea2d6..88acc77 100644
93115--- a/kernel/sysctl.c
93116+++ b/kernel/sysctl.c
93117@@ -94,7 +94,6 @@
93118
93119
93120 #if defined(CONFIG_SYSCTL)
93121-
93122 /* External variables not in a header file. */
93123 extern int max_threads;
93124 extern int suid_dumpable;
93125@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93126
93127 /* Constants used for minimum and maximum */
93128 #ifdef CONFIG_LOCKUP_DETECTOR
93129-static int sixty = 60;
93130+static int sixty __read_only = 60;
93131 #endif
93132
93133-static int __maybe_unused neg_one = -1;
93134+static int __maybe_unused neg_one __read_only = -1;
93135
93136-static int zero;
93137-static int __maybe_unused one = 1;
93138-static int __maybe_unused two = 2;
93139-static int __maybe_unused four = 4;
93140-static unsigned long one_ul = 1;
93141-static int one_hundred = 100;
93142+static int zero __read_only = 0;
93143+static int __maybe_unused one __read_only = 1;
93144+static int __maybe_unused two __read_only = 2;
93145+static int __maybe_unused three __read_only = 3;
93146+static int __maybe_unused four __read_only = 4;
93147+static unsigned long one_ul __read_only = 1;
93148+static int one_hundred __read_only = 100;
93149 #ifdef CONFIG_PRINTK
93150-static int ten_thousand = 10000;
93151+static int ten_thousand __read_only = 10000;
93152 #endif
93153
93154 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93155@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93156 void __user *buffer, size_t *lenp, loff_t *ppos);
93157 #endif
93158
93159-#ifdef CONFIG_PRINTK
93160 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93161 void __user *buffer, size_t *lenp, loff_t *ppos);
93162-#endif
93163
93164 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93165 void __user *buffer, size_t *lenp, loff_t *ppos);
93166@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93167
93168 #endif
93169
93170+extern struct ctl_table grsecurity_table[];
93171+
93172 static struct ctl_table kern_table[];
93173 static struct ctl_table vm_table[];
93174 static struct ctl_table fs_table[];
93175@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93176 int sysctl_legacy_va_layout;
93177 #endif
93178
93179+#ifdef CONFIG_PAX_SOFTMODE
93180+static struct ctl_table pax_table[] = {
93181+ {
93182+ .procname = "softmode",
93183+ .data = &pax_softmode,
93184+ .maxlen = sizeof(unsigned int),
93185+ .mode = 0600,
93186+ .proc_handler = &proc_dointvec,
93187+ },
93188+
93189+ { }
93190+};
93191+#endif
93192+
93193 /* The default sysctl tables: */
93194
93195 static struct ctl_table sysctl_base_table[] = {
93196@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93197 #endif
93198
93199 static struct ctl_table kern_table[] = {
93200+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93201+ {
93202+ .procname = "grsecurity",
93203+ .mode = 0500,
93204+ .child = grsecurity_table,
93205+ },
93206+#endif
93207+
93208+#ifdef CONFIG_PAX_SOFTMODE
93209+ {
93210+ .procname = "pax",
93211+ .mode = 0500,
93212+ .child = pax_table,
93213+ },
93214+#endif
93215+
93216 {
93217 .procname = "sched_child_runs_first",
93218 .data = &sysctl_sched_child_runs_first,
93219@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
93220 .data = &modprobe_path,
93221 .maxlen = KMOD_PATH_LEN,
93222 .mode = 0644,
93223- .proc_handler = proc_dostring,
93224+ .proc_handler = proc_dostring_modpriv,
93225 },
93226 {
93227 .procname = "modules_disabled",
93228@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
93229 .extra1 = &zero,
93230 .extra2 = &one,
93231 },
93232+#endif
93233 {
93234 .procname = "kptr_restrict",
93235 .data = &kptr_restrict,
93236 .maxlen = sizeof(int),
93237 .mode = 0644,
93238 .proc_handler = proc_dointvec_minmax_sysadmin,
93239+#ifdef CONFIG_GRKERNSEC_HIDESYM
93240+ .extra1 = &two,
93241+#else
93242 .extra1 = &zero,
93243+#endif
93244 .extra2 = &two,
93245 },
93246-#endif
93247 {
93248 .procname = "ngroups_max",
93249 .data = &ngroups_max,
93250@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
93251 */
93252 {
93253 .procname = "perf_event_paranoid",
93254- .data = &sysctl_perf_event_paranoid,
93255- .maxlen = sizeof(sysctl_perf_event_paranoid),
93256+ .data = &sysctl_perf_event_legitimately_concerned,
93257+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
93258 .mode = 0644,
93259- .proc_handler = proc_dointvec,
93260+ /* go ahead, be a hero */
93261+ .proc_handler = proc_dointvec_minmax_sysadmin,
93262+ .extra1 = &neg_one,
93263+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
93264+ .extra2 = &three,
93265+#else
93266+ .extra2 = &two,
93267+#endif
93268 },
93269 {
93270 .procname = "perf_event_mlock_kb",
93271@@ -1340,6 +1381,13 @@ static struct ctl_table vm_table[] = {
93272 .proc_handler = proc_dointvec_minmax,
93273 .extra1 = &zero,
93274 },
93275+ {
93276+ .procname = "heap_stack_gap",
93277+ .data = &sysctl_heap_stack_gap,
93278+ .maxlen = sizeof(sysctl_heap_stack_gap),
93279+ .mode = 0644,
93280+ .proc_handler = proc_doulongvec_minmax,
93281+ },
93282 #else
93283 {
93284 .procname = "nr_trim_pages",
93285@@ -1822,6 +1870,16 @@ int proc_dostring(struct ctl_table *table, int write,
93286 (char __user *)buffer, lenp, ppos);
93287 }
93288
93289+int proc_dostring_modpriv(struct ctl_table *table, int write,
93290+ void __user *buffer, size_t *lenp, loff_t *ppos)
93291+{
93292+ if (write && !capable(CAP_SYS_MODULE))
93293+ return -EPERM;
93294+
93295+ return _proc_do_string(table->data, table->maxlen, write,
93296+ buffer, lenp, ppos);
93297+}
93298+
93299 static size_t proc_skip_spaces(char **buf)
93300 {
93301 size_t ret;
93302@@ -1927,6 +1985,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
93303 len = strlen(tmp);
93304 if (len > *size)
93305 len = *size;
93306+ if (len > sizeof(tmp))
93307+ len = sizeof(tmp);
93308 if (copy_to_user(*buf, tmp, len))
93309 return -EFAULT;
93310 *size -= len;
93311@@ -2104,7 +2164,7 @@ int proc_dointvec(struct ctl_table *table, int write,
93312 static int proc_taint(struct ctl_table *table, int write,
93313 void __user *buffer, size_t *lenp, loff_t *ppos)
93314 {
93315- struct ctl_table t;
93316+ ctl_table_no_const t;
93317 unsigned long tmptaint = get_taint();
93318 int err;
93319
93320@@ -2132,7 +2192,6 @@ static int proc_taint(struct ctl_table *table, int write,
93321 return err;
93322 }
93323
93324-#ifdef CONFIG_PRINTK
93325 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93326 void __user *buffer, size_t *lenp, loff_t *ppos)
93327 {
93328@@ -2141,7 +2200,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93329
93330 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
93331 }
93332-#endif
93333
93334 struct do_proc_dointvec_minmax_conv_param {
93335 int *min;
93336@@ -2701,6 +2759,12 @@ int proc_dostring(struct ctl_table *table, int write,
93337 return -ENOSYS;
93338 }
93339
93340+int proc_dostring_modpriv(struct ctl_table *table, int write,
93341+ void __user *buffer, size_t *lenp, loff_t *ppos)
93342+{
93343+ return -ENOSYS;
93344+}
93345+
93346 int proc_dointvec(struct ctl_table *table, int write,
93347 void __user *buffer, size_t *lenp, loff_t *ppos)
93348 {
93349@@ -2757,5 +2821,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
93350 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
93351 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
93352 EXPORT_SYMBOL(proc_dostring);
93353+EXPORT_SYMBOL(proc_dostring_modpriv);
93354 EXPORT_SYMBOL(proc_doulongvec_minmax);
93355 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
93356diff --git a/kernel/taskstats.c b/kernel/taskstats.c
93357index 670fff8..a247812 100644
93358--- a/kernel/taskstats.c
93359+++ b/kernel/taskstats.c
93360@@ -28,9 +28,12 @@
93361 #include <linux/fs.h>
93362 #include <linux/file.h>
93363 #include <linux/pid_namespace.h>
93364+#include <linux/grsecurity.h>
93365 #include <net/genetlink.h>
93366 #include <linux/atomic.h>
93367
93368+extern int gr_is_taskstats_denied(int pid);
93369+
93370 /*
93371 * Maximum length of a cpumask that can be specified in
93372 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
93373@@ -576,6 +579,9 @@ err:
93374
93375 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
93376 {
93377+ if (gr_is_taskstats_denied(current->pid))
93378+ return -EACCES;
93379+
93380 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
93381 return cmd_attr_register_cpumask(info);
93382 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
93383diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
93384index a7077d3..dd48a49 100644
93385--- a/kernel/time/alarmtimer.c
93386+++ b/kernel/time/alarmtimer.c
93387@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
93388 struct platform_device *pdev;
93389 int error = 0;
93390 int i;
93391- struct k_clock alarm_clock = {
93392+ static struct k_clock alarm_clock = {
93393 .clock_getres = alarm_clock_getres,
93394 .clock_get = alarm_clock_get,
93395 .timer_create = alarm_timer_create,
93396diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
93397index d8c724c..6b331a4 100644
93398--- a/kernel/time/hrtimer.c
93399+++ b/kernel/time/hrtimer.c
93400@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
93401 local_irq_restore(flags);
93402 }
93403
93404-static void run_hrtimer_softirq(struct softirq_action *h)
93405+static __latent_entropy void run_hrtimer_softirq(void)
93406 {
93407 hrtimer_peek_ahead_timers();
93408 }
93409diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
93410index a16b678..8c5bd9d 100644
93411--- a/kernel/time/posix-cpu-timers.c
93412+++ b/kernel/time/posix-cpu-timers.c
93413@@ -1450,14 +1450,14 @@ struct k_clock clock_posix_cpu = {
93414
93415 static __init int init_posix_cpu_timers(void)
93416 {
93417- struct k_clock process = {
93418+ static struct k_clock process = {
93419 .clock_getres = process_cpu_clock_getres,
93420 .clock_get = process_cpu_clock_get,
93421 .timer_create = process_cpu_timer_create,
93422 .nsleep = process_cpu_nsleep,
93423 .nsleep_restart = process_cpu_nsleep_restart,
93424 };
93425- struct k_clock thread = {
93426+ static struct k_clock thread = {
93427 .clock_getres = thread_cpu_clock_getres,
93428 .clock_get = thread_cpu_clock_get,
93429 .timer_create = thread_cpu_timer_create,
93430diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
93431index 31ea01f..7fc61ef 100644
93432--- a/kernel/time/posix-timers.c
93433+++ b/kernel/time/posix-timers.c
93434@@ -43,6 +43,7 @@
93435 #include <linux/hash.h>
93436 #include <linux/posix-clock.h>
93437 #include <linux/posix-timers.h>
93438+#include <linux/grsecurity.h>
93439 #include <linux/syscalls.h>
93440 #include <linux/wait.h>
93441 #include <linux/workqueue.h>
93442@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
93443 * which we beg off on and pass to do_sys_settimeofday().
93444 */
93445
93446-static struct k_clock posix_clocks[MAX_CLOCKS];
93447+static struct k_clock *posix_clocks[MAX_CLOCKS];
93448
93449 /*
93450 * These ones are defined below.
93451@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93452 */
93453 static __init int init_posix_timers(void)
93454 {
93455- struct k_clock clock_realtime = {
93456+ static struct k_clock clock_realtime = {
93457 .clock_getres = hrtimer_get_res,
93458 .clock_get = posix_clock_realtime_get,
93459 .clock_set = posix_clock_realtime_set,
93460@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
93461 .timer_get = common_timer_get,
93462 .timer_del = common_timer_del,
93463 };
93464- struct k_clock clock_monotonic = {
93465+ static struct k_clock clock_monotonic = {
93466 .clock_getres = hrtimer_get_res,
93467 .clock_get = posix_ktime_get_ts,
93468 .nsleep = common_nsleep,
93469@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
93470 .timer_get = common_timer_get,
93471 .timer_del = common_timer_del,
93472 };
93473- struct k_clock clock_monotonic_raw = {
93474+ static struct k_clock clock_monotonic_raw = {
93475 .clock_getres = hrtimer_get_res,
93476 .clock_get = posix_get_monotonic_raw,
93477 };
93478- struct k_clock clock_realtime_coarse = {
93479+ static struct k_clock clock_realtime_coarse = {
93480 .clock_getres = posix_get_coarse_res,
93481 .clock_get = posix_get_realtime_coarse,
93482 };
93483- struct k_clock clock_monotonic_coarse = {
93484+ static struct k_clock clock_monotonic_coarse = {
93485 .clock_getres = posix_get_coarse_res,
93486 .clock_get = posix_get_monotonic_coarse,
93487 };
93488- struct k_clock clock_tai = {
93489+ static struct k_clock clock_tai = {
93490 .clock_getres = hrtimer_get_res,
93491 .clock_get = posix_get_tai,
93492 .nsleep = common_nsleep,
93493@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
93494 .timer_get = common_timer_get,
93495 .timer_del = common_timer_del,
93496 };
93497- struct k_clock clock_boottime = {
93498+ static struct k_clock clock_boottime = {
93499 .clock_getres = hrtimer_get_res,
93500 .clock_get = posix_get_boottime,
93501 .nsleep = common_nsleep,
93502@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93503 return;
93504 }
93505
93506- posix_clocks[clock_id] = *new_clock;
93507+ posix_clocks[clock_id] = new_clock;
93508 }
93509 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93510
93511@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93512 return (id & CLOCKFD_MASK) == CLOCKFD ?
93513 &clock_posix_dynamic : &clock_posix_cpu;
93514
93515- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93516+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93517 return NULL;
93518- return &posix_clocks[id];
93519+ return posix_clocks[id];
93520 }
93521
93522 static int common_timer_create(struct k_itimer *new_timer)
93523@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93524 struct k_clock *kc = clockid_to_kclock(which_clock);
93525 struct k_itimer *new_timer;
93526 int error, new_timer_id;
93527- sigevent_t event;
93528+ sigevent_t event = { };
93529 int it_id_set = IT_ID_NOT_SET;
93530
93531 if (!kc)
93532@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93533 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93534 return -EFAULT;
93535
93536+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93537+ have their clock_set fptr set to a nosettime dummy function
93538+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93539+ call common_clock_set, which calls do_sys_settimeofday, which
93540+ we hook
93541+ */
93542+
93543 return kc->clock_set(which_clock, &new_tp);
93544 }
93545
93546diff --git a/kernel/time/time.c b/kernel/time/time.c
93547index 2c85b77..6530536 100644
93548--- a/kernel/time/time.c
93549+++ b/kernel/time/time.c
93550@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
93551 return error;
93552
93553 if (tz) {
93554+ /* we log in do_settimeofday called below, so don't log twice
93555+ */
93556+ if (!tv)
93557+ gr_log_timechange();
93558+
93559 sys_tz = *tz;
93560 update_vsyscall_tz();
93561 if (firsttime) {
93562diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
93563index 6a93185..288c331 100644
93564--- a/kernel/time/timekeeping.c
93565+++ b/kernel/time/timekeeping.c
93566@@ -15,6 +15,7 @@
93567 #include <linux/init.h>
93568 #include <linux/mm.h>
93569 #include <linux/sched.h>
93570+#include <linux/grsecurity.h>
93571 #include <linux/syscore_ops.h>
93572 #include <linux/clocksource.h>
93573 #include <linux/jiffies.h>
93574@@ -775,6 +776,8 @@ int do_settimeofday64(const struct timespec64 *ts)
93575 if (!timespec64_valid_strict(ts))
93576 return -EINVAL;
93577
93578+ gr_log_timechange();
93579+
93580 raw_spin_lock_irqsave(&timekeeper_lock, flags);
93581 write_seqcount_begin(&tk_core.seq);
93582
93583diff --git a/kernel/time/timer.c b/kernel/time/timer.c
93584index 2d3f5c5..7ed7dc5 100644
93585--- a/kernel/time/timer.c
93586+++ b/kernel/time/timer.c
93587@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
93588 /*
93589 * This function runs timers and the timer-tq in bottom half context.
93590 */
93591-static void run_timer_softirq(struct softirq_action *h)
93592+static __latent_entropy void run_timer_softirq(void)
93593 {
93594 struct tvec_base *base = __this_cpu_read(tvec_bases);
93595
93596@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
93597 *
93598 * In all cases the return value is guaranteed to be non-negative.
93599 */
93600-signed long __sched schedule_timeout(signed long timeout)
93601+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
93602 {
93603 struct timer_list timer;
93604 unsigned long expire;
93605diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
93606index 61ed862..3b52c65 100644
93607--- a/kernel/time/timer_list.c
93608+++ b/kernel/time/timer_list.c
93609@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
93610
93611 static void print_name_offset(struct seq_file *m, void *sym)
93612 {
93613+#ifdef CONFIG_GRKERNSEC_HIDESYM
93614+ SEQ_printf(m, "<%p>", NULL);
93615+#else
93616 char symname[KSYM_NAME_LEN];
93617
93618 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
93619 SEQ_printf(m, "<%pK>", sym);
93620 else
93621 SEQ_printf(m, "%s", symname);
93622+#endif
93623 }
93624
93625 static void
93626@@ -119,7 +123,11 @@ next_one:
93627 static void
93628 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
93629 {
93630+#ifdef CONFIG_GRKERNSEC_HIDESYM
93631+ SEQ_printf(m, " .base: %p\n", NULL);
93632+#else
93633 SEQ_printf(m, " .base: %pK\n", base);
93634+#endif
93635 SEQ_printf(m, " .index: %d\n",
93636 base->index);
93637 SEQ_printf(m, " .resolution: %Lu nsecs\n",
93638@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
93639 {
93640 struct proc_dir_entry *pe;
93641
93642+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93643+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
93644+#else
93645 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
93646+#endif
93647 if (!pe)
93648 return -ENOMEM;
93649 return 0;
93650diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
93651index 1fb08f2..ca4bb1e 100644
93652--- a/kernel/time/timer_stats.c
93653+++ b/kernel/time/timer_stats.c
93654@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
93655 static unsigned long nr_entries;
93656 static struct entry entries[MAX_ENTRIES];
93657
93658-static atomic_t overflow_count;
93659+static atomic_unchecked_t overflow_count;
93660
93661 /*
93662 * The entries are in a hash-table, for fast lookup:
93663@@ -140,7 +140,7 @@ static void reset_entries(void)
93664 nr_entries = 0;
93665 memset(entries, 0, sizeof(entries));
93666 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
93667- atomic_set(&overflow_count, 0);
93668+ atomic_set_unchecked(&overflow_count, 0);
93669 }
93670
93671 static struct entry *alloc_entry(void)
93672@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93673 if (likely(entry))
93674 entry->count++;
93675 else
93676- atomic_inc(&overflow_count);
93677+ atomic_inc_unchecked(&overflow_count);
93678
93679 out_unlock:
93680 raw_spin_unlock_irqrestore(lock, flags);
93681@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93682
93683 static void print_name_offset(struct seq_file *m, unsigned long addr)
93684 {
93685+#ifdef CONFIG_GRKERNSEC_HIDESYM
93686+ seq_printf(m, "<%p>", NULL);
93687+#else
93688 char symname[KSYM_NAME_LEN];
93689
93690 if (lookup_symbol_name(addr, symname) < 0)
93691- seq_printf(m, "<%p>", (void *)addr);
93692+ seq_printf(m, "<%pK>", (void *)addr);
93693 else
93694 seq_printf(m, "%s", symname);
93695+#endif
93696 }
93697
93698 static int tstats_show(struct seq_file *m, void *v)
93699@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
93700
93701 seq_puts(m, "Timer Stats Version: v0.3\n");
93702 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
93703- if (atomic_read(&overflow_count))
93704- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
93705+ if (atomic_read_unchecked(&overflow_count))
93706+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
93707 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
93708
93709 for (i = 0; i < nr_entries; i++) {
93710@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
93711 {
93712 struct proc_dir_entry *pe;
93713
93714+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93715+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
93716+#else
93717 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
93718+#endif
93719 if (!pe)
93720 return -ENOMEM;
93721 return 0;
93722diff --git a/kernel/torture.c b/kernel/torture.c
93723index dd70993..0bf694b 100644
93724--- a/kernel/torture.c
93725+++ b/kernel/torture.c
93726@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
93727 mutex_lock(&fullstop_mutex);
93728 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
93729 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
93730- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
93731+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
93732 } else {
93733 pr_warn("Concurrent rmmod and shutdown illegal!\n");
93734 }
93735@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
93736 if (!torture_must_stop()) {
93737 if (stutter > 1) {
93738 schedule_timeout_interruptible(stutter - 1);
93739- ACCESS_ONCE(stutter_pause_test) = 2;
93740+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
93741 }
93742 schedule_timeout_interruptible(1);
93743- ACCESS_ONCE(stutter_pause_test) = 1;
93744+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
93745 }
93746 if (!torture_must_stop())
93747 schedule_timeout_interruptible(stutter);
93748- ACCESS_ONCE(stutter_pause_test) = 0;
93749+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
93750 torture_shutdown_absorb("torture_stutter");
93751 } while (!torture_must_stop());
93752 torture_kthread_stopping("torture_stutter");
93753@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
93754 schedule_timeout_uninterruptible(10);
93755 return true;
93756 }
93757- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
93758+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
93759 mutex_unlock(&fullstop_mutex);
93760 torture_shutdown_cleanup();
93761 torture_shuffle_cleanup();
93762diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
93763index 483cecf..ac46091 100644
93764--- a/kernel/trace/blktrace.c
93765+++ b/kernel/trace/blktrace.c
93766@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
93767 struct blk_trace *bt = filp->private_data;
93768 char buf[16];
93769
93770- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
93771+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
93772
93773 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
93774 }
93775@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
93776 return 1;
93777
93778 bt = buf->chan->private_data;
93779- atomic_inc(&bt->dropped);
93780+ atomic_inc_unchecked(&bt->dropped);
93781 return 0;
93782 }
93783
93784@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
93785
93786 bt->dir = dir;
93787 bt->dev = dev;
93788- atomic_set(&bt->dropped, 0);
93789+ atomic_set_unchecked(&bt->dropped, 0);
93790 INIT_LIST_HEAD(&bt->running_list);
93791
93792 ret = -EIO;
93793diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
93794index af5bffd..57664b8 100644
93795--- a/kernel/trace/ftrace.c
93796+++ b/kernel/trace/ftrace.c
93797@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
93798 if (unlikely(ftrace_disabled))
93799 return 0;
93800
93801+ ret = ftrace_arch_code_modify_prepare();
93802+ FTRACE_WARN_ON(ret);
93803+ if (ret)
93804+ return 0;
93805+
93806 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
93807+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
93808 if (ret) {
93809 ftrace_bug(ret, rec);
93810- return 0;
93811 }
93812- return 1;
93813+ return ret ? 0 : 1;
93814 }
93815
93816 /*
93817@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
93818 if (!count)
93819 return 0;
93820
93821+ pax_open_kernel();
93822 sort(start, count, sizeof(*start),
93823 ftrace_cmp_ips, ftrace_swap_ips);
93824+ pax_close_kernel();
93825
93826 start_pg = ftrace_allocate_pages(count);
93827 if (!start_pg)
93828@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
93829
93830 if (t->ret_stack == NULL) {
93831 atomic_set(&t->tracing_graph_pause, 0);
93832- atomic_set(&t->trace_overrun, 0);
93833+ atomic_set_unchecked(&t->trace_overrun, 0);
93834 t->curr_ret_stack = -1;
93835 /* Make sure the tasks see the -1 first: */
93836 smp_wmb();
93837@@ -5876,7 +5883,7 @@ static void
93838 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
93839 {
93840 atomic_set(&t->tracing_graph_pause, 0);
93841- atomic_set(&t->trace_overrun, 0);
93842+ atomic_set_unchecked(&t->trace_overrun, 0);
93843 t->ftrace_timestamp = 0;
93844 /* make curr_ret_stack visible before we add the ret_stack */
93845 smp_wmb();
93846diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
93847index d2e151c..b68c835 100644
93848--- a/kernel/trace/ring_buffer.c
93849+++ b/kernel/trace/ring_buffer.c
93850@@ -350,9 +350,9 @@ struct buffer_data_page {
93851 */
93852 struct buffer_page {
93853 struct list_head list; /* list of buffer pages */
93854- local_t write; /* index for next write */
93855+ local_unchecked_t write; /* index for next write */
93856 unsigned read; /* index for next read */
93857- local_t entries; /* entries on this page */
93858+ local_unchecked_t entries; /* entries on this page */
93859 unsigned long real_end; /* real end of data */
93860 struct buffer_data_page *page; /* Actual data page */
93861 };
93862@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
93863 unsigned long last_overrun;
93864 local_t entries_bytes;
93865 local_t entries;
93866- local_t overrun;
93867- local_t commit_overrun;
93868+ local_unchecked_t overrun;
93869+ local_unchecked_t commit_overrun;
93870 local_t dropped_events;
93871 local_t committing;
93872 local_t commits;
93873@@ -1047,8 +1047,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
93874 *
93875 * We add a counter to the write field to denote this.
93876 */
93877- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
93878- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
93879+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
93880+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
93881
93882 /*
93883 * Just make sure we have seen our old_write and synchronize
93884@@ -1076,8 +1076,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
93885 * cmpxchg to only update if an interrupt did not already
93886 * do it for us. If the cmpxchg fails, we don't care.
93887 */
93888- (void)local_cmpxchg(&next_page->write, old_write, val);
93889- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
93890+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
93891+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
93892
93893 /*
93894 * No need to worry about races with clearing out the commit.
93895@@ -1445,12 +1445,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
93896
93897 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
93898 {
93899- return local_read(&bpage->entries) & RB_WRITE_MASK;
93900+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
93901 }
93902
93903 static inline unsigned long rb_page_write(struct buffer_page *bpage)
93904 {
93905- return local_read(&bpage->write) & RB_WRITE_MASK;
93906+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
93907 }
93908
93909 static int
93910@@ -1545,7 +1545,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
93911 * bytes consumed in ring buffer from here.
93912 * Increment overrun to account for the lost events.
93913 */
93914- local_add(page_entries, &cpu_buffer->overrun);
93915+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
93916 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
93917 }
93918
93919@@ -2107,7 +2107,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
93920 * it is our responsibility to update
93921 * the counters.
93922 */
93923- local_add(entries, &cpu_buffer->overrun);
93924+ local_add_unchecked(entries, &cpu_buffer->overrun);
93925 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
93926
93927 /*
93928@@ -2257,7 +2257,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93929 if (tail == BUF_PAGE_SIZE)
93930 tail_page->real_end = 0;
93931
93932- local_sub(length, &tail_page->write);
93933+ local_sub_unchecked(length, &tail_page->write);
93934 return;
93935 }
93936
93937@@ -2292,7 +2292,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93938 rb_event_set_padding(event);
93939
93940 /* Set the write back to the previous setting */
93941- local_sub(length, &tail_page->write);
93942+ local_sub_unchecked(length, &tail_page->write);
93943 return;
93944 }
93945
93946@@ -2304,7 +2304,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93947
93948 /* Set write to end of buffer */
93949 length = (tail + length) - BUF_PAGE_SIZE;
93950- local_sub(length, &tail_page->write);
93951+ local_sub_unchecked(length, &tail_page->write);
93952 }
93953
93954 /*
93955@@ -2330,7 +2330,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
93956 * about it.
93957 */
93958 if (unlikely(next_page == commit_page)) {
93959- local_inc(&cpu_buffer->commit_overrun);
93960+ local_inc_unchecked(&cpu_buffer->commit_overrun);
93961 goto out_reset;
93962 }
93963
93964@@ -2386,7 +2386,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
93965 cpu_buffer->tail_page) &&
93966 (cpu_buffer->commit_page ==
93967 cpu_buffer->reader_page))) {
93968- local_inc(&cpu_buffer->commit_overrun);
93969+ local_inc_unchecked(&cpu_buffer->commit_overrun);
93970 goto out_reset;
93971 }
93972 }
93973@@ -2434,7 +2434,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
93974 length += RB_LEN_TIME_EXTEND;
93975
93976 tail_page = cpu_buffer->tail_page;
93977- write = local_add_return(length, &tail_page->write);
93978+ write = local_add_return_unchecked(length, &tail_page->write);
93979
93980 /* set write to only the index of the write */
93981 write &= RB_WRITE_MASK;
93982@@ -2458,7 +2458,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
93983 kmemcheck_annotate_bitfield(event, bitfield);
93984 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
93985
93986- local_inc(&tail_page->entries);
93987+ local_inc_unchecked(&tail_page->entries);
93988
93989 /*
93990 * If this is the first commit on the page, then update
93991@@ -2491,7 +2491,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
93992
93993 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
93994 unsigned long write_mask =
93995- local_read(&bpage->write) & ~RB_WRITE_MASK;
93996+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
93997 unsigned long event_length = rb_event_length(event);
93998 /*
93999 * This is on the tail page. It is possible that
94000@@ -2501,7 +2501,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94001 */
94002 old_index += write_mask;
94003 new_index += write_mask;
94004- index = local_cmpxchg(&bpage->write, old_index, new_index);
94005+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94006 if (index == old_index) {
94007 /* update counters */
94008 local_sub(event_length, &cpu_buffer->entries_bytes);
94009@@ -2904,7 +2904,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94010
94011 /* Do the likely case first */
94012 if (likely(bpage->page == (void *)addr)) {
94013- local_dec(&bpage->entries);
94014+ local_dec_unchecked(&bpage->entries);
94015 return;
94016 }
94017
94018@@ -2916,7 +2916,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94019 start = bpage;
94020 do {
94021 if (bpage->page == (void *)addr) {
94022- local_dec(&bpage->entries);
94023+ local_dec_unchecked(&bpage->entries);
94024 return;
94025 }
94026 rb_inc_page(cpu_buffer, &bpage);
94027@@ -3200,7 +3200,7 @@ static inline unsigned long
94028 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94029 {
94030 return local_read(&cpu_buffer->entries) -
94031- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94032+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94033 }
94034
94035 /**
94036@@ -3289,7 +3289,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94037 return 0;
94038
94039 cpu_buffer = buffer->buffers[cpu];
94040- ret = local_read(&cpu_buffer->overrun);
94041+ ret = local_read_unchecked(&cpu_buffer->overrun);
94042
94043 return ret;
94044 }
94045@@ -3312,7 +3312,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94046 return 0;
94047
94048 cpu_buffer = buffer->buffers[cpu];
94049- ret = local_read(&cpu_buffer->commit_overrun);
94050+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94051
94052 return ret;
94053 }
94054@@ -3397,7 +3397,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94055 /* if you care about this being correct, lock the buffer */
94056 for_each_buffer_cpu(buffer, cpu) {
94057 cpu_buffer = buffer->buffers[cpu];
94058- overruns += local_read(&cpu_buffer->overrun);
94059+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94060 }
94061
94062 return overruns;
94063@@ -3568,8 +3568,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94064 /*
94065 * Reset the reader page to size zero.
94066 */
94067- local_set(&cpu_buffer->reader_page->write, 0);
94068- local_set(&cpu_buffer->reader_page->entries, 0);
94069+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94070+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94071 local_set(&cpu_buffer->reader_page->page->commit, 0);
94072 cpu_buffer->reader_page->real_end = 0;
94073
94074@@ -3603,7 +3603,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94075 * want to compare with the last_overrun.
94076 */
94077 smp_mb();
94078- overwrite = local_read(&(cpu_buffer->overrun));
94079+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94080
94081 /*
94082 * Here's the tricky part.
94083@@ -4175,8 +4175,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94084
94085 cpu_buffer->head_page
94086 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94087- local_set(&cpu_buffer->head_page->write, 0);
94088- local_set(&cpu_buffer->head_page->entries, 0);
94089+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94090+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94091 local_set(&cpu_buffer->head_page->page->commit, 0);
94092
94093 cpu_buffer->head_page->read = 0;
94094@@ -4186,14 +4186,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94095
94096 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94097 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94098- local_set(&cpu_buffer->reader_page->write, 0);
94099- local_set(&cpu_buffer->reader_page->entries, 0);
94100+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94101+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94102 local_set(&cpu_buffer->reader_page->page->commit, 0);
94103 cpu_buffer->reader_page->read = 0;
94104
94105 local_set(&cpu_buffer->entries_bytes, 0);
94106- local_set(&cpu_buffer->overrun, 0);
94107- local_set(&cpu_buffer->commit_overrun, 0);
94108+ local_set_unchecked(&cpu_buffer->overrun, 0);
94109+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94110 local_set(&cpu_buffer->dropped_events, 0);
94111 local_set(&cpu_buffer->entries, 0);
94112 local_set(&cpu_buffer->committing, 0);
94113@@ -4598,8 +4598,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94114 rb_init_page(bpage);
94115 bpage = reader->page;
94116 reader->page = *data_page;
94117- local_set(&reader->write, 0);
94118- local_set(&reader->entries, 0);
94119+ local_set_unchecked(&reader->write, 0);
94120+ local_set_unchecked(&reader->entries, 0);
94121 reader->read = 0;
94122 *data_page = bpage;
94123
94124diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94125index 361a827..6a319a3 100644
94126--- a/kernel/trace/trace.c
94127+++ b/kernel/trace/trace.c
94128@@ -3499,7 +3499,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94129 return 0;
94130 }
94131
94132-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94133+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94134 {
94135 /* do nothing if flag is already set */
94136 if (!!(trace_flags & mask) == !!enabled)
94137diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94138index 8de48ba..3e5b4fa 100644
94139--- a/kernel/trace/trace.h
94140+++ b/kernel/trace/trace.h
94141@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
94142 void trace_printk_init_buffers(void);
94143 void trace_printk_start_comm(void);
94144 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94145-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94146+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94147
94148 /*
94149 * Normal trace_printk() and friends allocates special buffers
94150diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94151index 57b67b1..66082a9 100644
94152--- a/kernel/trace/trace_clock.c
94153+++ b/kernel/trace/trace_clock.c
94154@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94155 return now;
94156 }
94157
94158-static atomic64_t trace_counter;
94159+static atomic64_unchecked_t trace_counter;
94160
94161 /*
94162 * trace_clock_counter(): simply an atomic counter.
94163@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94164 */
94165 u64 notrace trace_clock_counter(void)
94166 {
94167- return atomic64_add_return(1, &trace_counter);
94168+ return atomic64_inc_return_unchecked(&trace_counter);
94169 }
94170diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94171index b03a0ea..2df3168 100644
94172--- a/kernel/trace/trace_events.c
94173+++ b/kernel/trace/trace_events.c
94174@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94175 return 0;
94176 }
94177
94178-struct ftrace_module_file_ops;
94179 static void __add_event_to_tracers(struct ftrace_event_call *call);
94180
94181 /* Add an additional event_call dynamically */
94182diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
94183index ba47600..d0e47fa 100644
94184--- a/kernel/trace/trace_functions_graph.c
94185+++ b/kernel/trace/trace_functions_graph.c
94186@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
94187
94188 /* The return trace stack is full */
94189 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
94190- atomic_inc(&current->trace_overrun);
94191+ atomic_inc_unchecked(&current->trace_overrun);
94192 return -EBUSY;
94193 }
94194
94195@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
94196 *ret = current->ret_stack[index].ret;
94197 trace->func = current->ret_stack[index].func;
94198 trace->calltime = current->ret_stack[index].calltime;
94199- trace->overrun = atomic_read(&current->trace_overrun);
94200+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
94201 trace->depth = index;
94202 }
94203
94204diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
94205index 7a9ba62..2e0e4a1 100644
94206--- a/kernel/trace/trace_mmiotrace.c
94207+++ b/kernel/trace/trace_mmiotrace.c
94208@@ -24,7 +24,7 @@ struct header_iter {
94209 static struct trace_array *mmio_trace_array;
94210 static bool overrun_detected;
94211 static unsigned long prev_overruns;
94212-static atomic_t dropped_count;
94213+static atomic_unchecked_t dropped_count;
94214
94215 static void mmio_reset_data(struct trace_array *tr)
94216 {
94217@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
94218
94219 static unsigned long count_overruns(struct trace_iterator *iter)
94220 {
94221- unsigned long cnt = atomic_xchg(&dropped_count, 0);
94222+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
94223 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
94224
94225 if (over > prev_overruns)
94226@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
94227 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
94228 sizeof(*entry), 0, pc);
94229 if (!event) {
94230- atomic_inc(&dropped_count);
94231+ atomic_inc_unchecked(&dropped_count);
94232 return;
94233 }
94234 entry = ring_buffer_event_data(event);
94235@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
94236 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
94237 sizeof(*entry), 0, pc);
94238 if (!event) {
94239- atomic_inc(&dropped_count);
94240+ atomic_inc_unchecked(&dropped_count);
94241 return;
94242 }
94243 entry = ring_buffer_event_data(event);
94244diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
94245index b77b9a6..82f19bd 100644
94246--- a/kernel/trace/trace_output.c
94247+++ b/kernel/trace/trace_output.c
94248@@ -707,14 +707,16 @@ int register_ftrace_event(struct trace_event *event)
94249 goto out;
94250 }
94251
94252+ pax_open_kernel();
94253 if (event->funcs->trace == NULL)
94254- event->funcs->trace = trace_nop_print;
94255+ *(void **)&event->funcs->trace = trace_nop_print;
94256 if (event->funcs->raw == NULL)
94257- event->funcs->raw = trace_nop_print;
94258+ *(void **)&event->funcs->raw = trace_nop_print;
94259 if (event->funcs->hex == NULL)
94260- event->funcs->hex = trace_nop_print;
94261+ *(void **)&event->funcs->hex = trace_nop_print;
94262 if (event->funcs->binary == NULL)
94263- event->funcs->binary = trace_nop_print;
94264+ *(void **)&event->funcs->binary = trace_nop_print;
94265+ pax_close_kernel();
94266
94267 key = event->type & (EVENT_HASHSIZE - 1);
94268
94269diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
94270index f8b45d8..70ff6c8 100644
94271--- a/kernel/trace/trace_seq.c
94272+++ b/kernel/trace/trace_seq.c
94273@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
94274 return 0;
94275 }
94276
94277- seq_buf_path(&s->seq, path, "\n");
94278+ seq_buf_path(&s->seq, path, "\n\\");
94279
94280 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
94281 s->seq.len = save_len;
94282diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
94283index 16eddb3..758b308 100644
94284--- a/kernel/trace/trace_stack.c
94285+++ b/kernel/trace/trace_stack.c
94286@@ -90,7 +90,7 @@ check_stack(unsigned long ip, unsigned long *stack)
94287 return;
94288
94289 /* we do not handle interrupt stacks yet */
94290- if (!object_is_on_stack(stack))
94291+ if (!object_starts_on_stack(stack))
94292 return;
94293
94294 local_irq_save(flags);
94295diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
94296index c6ee36f..78513f3 100644
94297--- a/kernel/trace/trace_syscalls.c
94298+++ b/kernel/trace/trace_syscalls.c
94299@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
94300 int num;
94301
94302 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94303+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94304+ return -EINVAL;
94305
94306 mutex_lock(&syscall_trace_lock);
94307 if (!sys_perf_refcount_enter)
94308@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
94309 int num;
94310
94311 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94312+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94313+ return;
94314
94315 mutex_lock(&syscall_trace_lock);
94316 sys_perf_refcount_enter--;
94317@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
94318 int num;
94319
94320 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94321+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94322+ return -EINVAL;
94323
94324 mutex_lock(&syscall_trace_lock);
94325 if (!sys_perf_refcount_exit)
94326@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
94327 int num;
94328
94329 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94330+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94331+ return;
94332
94333 mutex_lock(&syscall_trace_lock);
94334 sys_perf_refcount_exit--;
94335diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
94336index 4109f83..fe1f830 100644
94337--- a/kernel/user_namespace.c
94338+++ b/kernel/user_namespace.c
94339@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
94340 !kgid_has_mapping(parent_ns, group))
94341 return -EPERM;
94342
94343+#ifdef CONFIG_GRKERNSEC
94344+ /*
94345+ * This doesn't really inspire confidence:
94346+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
94347+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
94348+ * Increases kernel attack surface in areas developers
94349+ * previously cared little about ("low importance due
94350+ * to requiring "root" capability")
94351+ * To be removed when this code receives *proper* review
94352+ */
94353+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
94354+ !capable(CAP_SETGID))
94355+ return -EPERM;
94356+#endif
94357+
94358 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
94359 if (!ns)
94360 return -ENOMEM;
94361@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
94362 if (atomic_read(&current->mm->mm_users) > 1)
94363 return -EINVAL;
94364
94365- if (current->fs->users != 1)
94366+ if (atomic_read(&current->fs->users) != 1)
94367 return -EINVAL;
94368
94369 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
94370diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
94371index c8eac43..4b5f08f 100644
94372--- a/kernel/utsname_sysctl.c
94373+++ b/kernel/utsname_sysctl.c
94374@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
94375 static int proc_do_uts_string(struct ctl_table *table, int write,
94376 void __user *buffer, size_t *lenp, loff_t *ppos)
94377 {
94378- struct ctl_table uts_table;
94379+ ctl_table_no_const uts_table;
94380 int r;
94381 memcpy(&uts_table, table, sizeof(uts_table));
94382 uts_table.data = get_uts(table, write);
94383diff --git a/kernel/watchdog.c b/kernel/watchdog.c
94384index 70bf118..4be3c37 100644
94385--- a/kernel/watchdog.c
94386+++ b/kernel/watchdog.c
94387@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
94388 static void watchdog_nmi_disable(unsigned int cpu) { return; }
94389 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
94390
94391-static struct smp_hotplug_thread watchdog_threads = {
94392+static struct smp_hotplug_thread watchdog_threads __read_only = {
94393 .store = &softlockup_watchdog,
94394 .thread_should_run = watchdog_should_run,
94395 .thread_fn = watchdog,
94396diff --git a/kernel/workqueue.c b/kernel/workqueue.c
94397index 82d0c8d..37f4222 100644
94398--- a/kernel/workqueue.c
94399+++ b/kernel/workqueue.c
94400@@ -4565,7 +4565,7 @@ static void rebind_workers(struct worker_pool *pool)
94401 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
94402 worker_flags |= WORKER_REBOUND;
94403 worker_flags &= ~WORKER_UNBOUND;
94404- ACCESS_ONCE(worker->flags) = worker_flags;
94405+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
94406 }
94407
94408 spin_unlock_irq(&pool->lock);
94409diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
94410index 5f2ce61..85a0b1b 100644
94411--- a/lib/Kconfig.debug
94412+++ b/lib/Kconfig.debug
94413@@ -910,7 +910,7 @@ config DEBUG_MUTEXES
94414
94415 config DEBUG_WW_MUTEX_SLOWPATH
94416 bool "Wait/wound mutex debugging: Slowpath testing"
94417- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94418+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94419 select DEBUG_LOCK_ALLOC
94420 select DEBUG_SPINLOCK
94421 select DEBUG_MUTEXES
94422@@ -927,7 +927,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
94423
94424 config DEBUG_LOCK_ALLOC
94425 bool "Lock debugging: detect incorrect freeing of live locks"
94426- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94427+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94428 select DEBUG_SPINLOCK
94429 select DEBUG_MUTEXES
94430 select LOCKDEP
94431@@ -941,7 +941,7 @@ config DEBUG_LOCK_ALLOC
94432
94433 config PROVE_LOCKING
94434 bool "Lock debugging: prove locking correctness"
94435- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94436+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94437 select LOCKDEP
94438 select DEBUG_SPINLOCK
94439 select DEBUG_MUTEXES
94440@@ -992,7 +992,7 @@ config LOCKDEP
94441
94442 config LOCK_STAT
94443 bool "Lock usage statistics"
94444- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94445+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94446 select LOCKDEP
94447 select DEBUG_SPINLOCK
94448 select DEBUG_MUTEXES
94449@@ -1453,6 +1453,7 @@ config LATENCYTOP
94450 depends on DEBUG_KERNEL
94451 depends on STACKTRACE_SUPPORT
94452 depends on PROC_FS
94453+ depends on !GRKERNSEC_HIDESYM
94454 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
94455 select KALLSYMS
94456 select KALLSYMS_ALL
94457@@ -1469,7 +1470,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94458 config DEBUG_STRICT_USER_COPY_CHECKS
94459 bool "Strict user copy size checks"
94460 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94461- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
94462+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
94463 help
94464 Enabling this option turns a certain set of sanity checks for user
94465 copy operations into compile time failures.
94466@@ -1597,7 +1598,7 @@ endmenu # runtime tests
94467
94468 config PROVIDE_OHCI1394_DMA_INIT
94469 bool "Remote debugging over FireWire early on boot"
94470- depends on PCI && X86
94471+ depends on PCI && X86 && !GRKERNSEC
94472 help
94473 If you want to debug problems which hang or crash the kernel early
94474 on boot and the crashing machine has a FireWire port, you can use
94475diff --git a/lib/Makefile b/lib/Makefile
94476index 3c3b30b..ca29102 100644
94477--- a/lib/Makefile
94478+++ b/lib/Makefile
94479@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
94480 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
94481 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
94482 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
94483-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
94484+obj-y += list_debug.o
94485 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
94486
94487 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
94488diff --git a/lib/average.c b/lib/average.c
94489index 114d1be..ab0350c 100644
94490--- a/lib/average.c
94491+++ b/lib/average.c
94492@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
94493 {
94494 unsigned long internal = ACCESS_ONCE(avg->internal);
94495
94496- ACCESS_ONCE(avg->internal) = internal ?
94497+ ACCESS_ONCE_RW(avg->internal) = internal ?
94498 (((internal << avg->weight) - internal) +
94499 (val << avg->factor)) >> avg->weight :
94500 (val << avg->factor);
94501diff --git a/lib/bitmap.c b/lib/bitmap.c
94502index 324ea9e..46b1ae2 100644
94503--- a/lib/bitmap.c
94504+++ b/lib/bitmap.c
94505@@ -271,7 +271,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
94506 }
94507 EXPORT_SYMBOL(__bitmap_subset);
94508
94509-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94510+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94511 {
94512 unsigned int k, lim = bits/BITS_PER_LONG;
94513 int w = 0;
94514@@ -437,7 +437,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
94515 {
94516 int c, old_c, totaldigits, ndigits, nchunks, nbits;
94517 u32 chunk;
94518- const char __user __force *ubuf = (const char __user __force *)buf;
94519+ const char __user *ubuf = (const char __force_user *)buf;
94520
94521 bitmap_zero(maskp, nmaskbits);
94522
94523@@ -522,7 +522,7 @@ int bitmap_parse_user(const char __user *ubuf,
94524 {
94525 if (!access_ok(VERIFY_READ, ubuf, ulen))
94526 return -EFAULT;
94527- return __bitmap_parse((const char __force *)ubuf,
94528+ return __bitmap_parse((const char __force_kernel *)ubuf,
94529 ulen, 1, maskp, nmaskbits);
94530
94531 }
94532@@ -640,7 +640,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
94533 {
94534 unsigned a, b;
94535 int c, old_c, totaldigits;
94536- const char __user __force *ubuf = (const char __user __force *)buf;
94537+ const char __user *ubuf = (const char __force_user *)buf;
94538 int exp_digit, in_range;
94539
94540 totaldigits = c = 0;
94541@@ -735,7 +735,7 @@ int bitmap_parselist_user(const char __user *ubuf,
94542 {
94543 if (!access_ok(VERIFY_READ, ubuf, ulen))
94544 return -EFAULT;
94545- return __bitmap_parselist((const char __force *)ubuf,
94546+ return __bitmap_parselist((const char __force_kernel *)ubuf,
94547 ulen, 1, maskp, nmaskbits);
94548 }
94549 EXPORT_SYMBOL(bitmap_parselist_user);
94550diff --git a/lib/bug.c b/lib/bug.c
94551index 0c3bd95..5a615a1 100644
94552--- a/lib/bug.c
94553+++ b/lib/bug.c
94554@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
94555 return BUG_TRAP_TYPE_NONE;
94556
94557 bug = find_bug(bugaddr);
94558+ if (!bug)
94559+ return BUG_TRAP_TYPE_NONE;
94560
94561 file = NULL;
94562 line = 0;
94563diff --git a/lib/debugobjects.c b/lib/debugobjects.c
94564index 547f7f9..a6d4ba0 100644
94565--- a/lib/debugobjects.c
94566+++ b/lib/debugobjects.c
94567@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
94568 if (limit > 4)
94569 return;
94570
94571- is_on_stack = object_is_on_stack(addr);
94572+ is_on_stack = object_starts_on_stack(addr);
94573 if (is_on_stack == onstack)
94574 return;
94575
94576diff --git a/lib/div64.c b/lib/div64.c
94577index 4382ad7..08aa558 100644
94578--- a/lib/div64.c
94579+++ b/lib/div64.c
94580@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
94581 EXPORT_SYMBOL(__div64_32);
94582
94583 #ifndef div_s64_rem
94584-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94585+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94586 {
94587 u64 quotient;
94588
94589@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
94590 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
94591 */
94592 #ifndef div64_u64
94593-u64 div64_u64(u64 dividend, u64 divisor)
94594+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
94595 {
94596 u32 high = divisor >> 32;
94597 u64 quot;
94598diff --git a/lib/dma-debug.c b/lib/dma-debug.c
94599index 9722bd2..0d826f4 100644
94600--- a/lib/dma-debug.c
94601+++ b/lib/dma-debug.c
94602@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
94603
94604 void dma_debug_add_bus(struct bus_type *bus)
94605 {
94606- struct notifier_block *nb;
94607+ notifier_block_no_const *nb;
94608
94609 if (dma_debug_disabled())
94610 return;
94611@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
94612
94613 static void check_for_stack(struct device *dev, void *addr)
94614 {
94615- if (object_is_on_stack(addr))
94616+ if (object_starts_on_stack(addr))
94617 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
94618 "stack [addr=%p]\n", addr);
94619 }
94620diff --git a/lib/inflate.c b/lib/inflate.c
94621index 013a761..c28f3fc 100644
94622--- a/lib/inflate.c
94623+++ b/lib/inflate.c
94624@@ -269,7 +269,7 @@ static void free(void *where)
94625 malloc_ptr = free_mem_ptr;
94626 }
94627 #else
94628-#define malloc(a) kmalloc(a, GFP_KERNEL)
94629+#define malloc(a) kmalloc((a), GFP_KERNEL)
94630 #define free(a) kfree(a)
94631 #endif
94632
94633diff --git a/lib/ioremap.c b/lib/ioremap.c
94634index 0c9216c..863bd89 100644
94635--- a/lib/ioremap.c
94636+++ b/lib/ioremap.c
94637@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
94638 unsigned long next;
94639
94640 phys_addr -= addr;
94641- pmd = pmd_alloc(&init_mm, pud, addr);
94642+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
94643 if (!pmd)
94644 return -ENOMEM;
94645 do {
94646@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
94647 unsigned long next;
94648
94649 phys_addr -= addr;
94650- pud = pud_alloc(&init_mm, pgd, addr);
94651+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
94652 if (!pud)
94653 return -ENOMEM;
94654 do {
94655diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
94656index bd2bea9..6b3c95e 100644
94657--- a/lib/is_single_threaded.c
94658+++ b/lib/is_single_threaded.c
94659@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
94660 struct task_struct *p, *t;
94661 bool ret;
94662
94663+ if (!mm)
94664+ return true;
94665+
94666 if (atomic_read(&task->signal->live) != 1)
94667 return false;
94668
94669diff --git a/lib/kobject.c b/lib/kobject.c
94670index 03d4ab3..46f6374 100644
94671--- a/lib/kobject.c
94672+++ b/lib/kobject.c
94673@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
94674
94675
94676 static DEFINE_SPINLOCK(kobj_ns_type_lock);
94677-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
94678+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
94679
94680-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94681+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94682 {
94683 enum kobj_ns_type type = ops->type;
94684 int error;
94685diff --git a/lib/list_debug.c b/lib/list_debug.c
94686index c24c2f7..f0296f4 100644
94687--- a/lib/list_debug.c
94688+++ b/lib/list_debug.c
94689@@ -11,7 +11,9 @@
94690 #include <linux/bug.h>
94691 #include <linux/kernel.h>
94692 #include <linux/rculist.h>
94693+#include <linux/mm.h>
94694
94695+#ifdef CONFIG_DEBUG_LIST
94696 /*
94697 * Insert a new entry between two known consecutive entries.
94698 *
94699@@ -19,21 +21,40 @@
94700 * the prev/next entries already!
94701 */
94702
94703+static bool __list_add_debug(struct list_head *new,
94704+ struct list_head *prev,
94705+ struct list_head *next)
94706+{
94707+ if (unlikely(next->prev != prev)) {
94708+ printk(KERN_ERR "list_add corruption. next->prev should be "
94709+ "prev (%p), but was %p. (next=%p).\n",
94710+ prev, next->prev, next);
94711+ BUG();
94712+ return false;
94713+ }
94714+ if (unlikely(prev->next != next)) {
94715+ printk(KERN_ERR "list_add corruption. prev->next should be "
94716+ "next (%p), but was %p. (prev=%p).\n",
94717+ next, prev->next, prev);
94718+ BUG();
94719+ return false;
94720+ }
94721+ if (unlikely(new == prev || new == next)) {
94722+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
94723+ new, prev, next);
94724+ BUG();
94725+ return false;
94726+ }
94727+ return true;
94728+}
94729+
94730 void __list_add(struct list_head *new,
94731- struct list_head *prev,
94732- struct list_head *next)
94733+ struct list_head *prev,
94734+ struct list_head *next)
94735 {
94736- WARN(next->prev != prev,
94737- "list_add corruption. next->prev should be "
94738- "prev (%p), but was %p. (next=%p).\n",
94739- prev, next->prev, next);
94740- WARN(prev->next != next,
94741- "list_add corruption. prev->next should be "
94742- "next (%p), but was %p. (prev=%p).\n",
94743- next, prev->next, prev);
94744- WARN(new == prev || new == next,
94745- "list_add double add: new=%p, prev=%p, next=%p.\n",
94746- new, prev, next);
94747+ if (!__list_add_debug(new, prev, next))
94748+ return;
94749+
94750 next->prev = new;
94751 new->next = next;
94752 new->prev = prev;
94753@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
94754 }
94755 EXPORT_SYMBOL(__list_add);
94756
94757-void __list_del_entry(struct list_head *entry)
94758+static bool __list_del_entry_debug(struct list_head *entry)
94759 {
94760 struct list_head *prev, *next;
94761
94762 prev = entry->prev;
94763 next = entry->next;
94764
94765- if (WARN(next == LIST_POISON1,
94766- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94767- entry, LIST_POISON1) ||
94768- WARN(prev == LIST_POISON2,
94769- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94770- entry, LIST_POISON2) ||
94771- WARN(prev->next != entry,
94772- "list_del corruption. prev->next should be %p, "
94773- "but was %p\n", entry, prev->next) ||
94774- WARN(next->prev != entry,
94775- "list_del corruption. next->prev should be %p, "
94776- "but was %p\n", entry, next->prev))
94777+ if (unlikely(next == LIST_POISON1)) {
94778+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94779+ entry, LIST_POISON1);
94780+ BUG();
94781+ return false;
94782+ }
94783+ if (unlikely(prev == LIST_POISON2)) {
94784+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94785+ entry, LIST_POISON2);
94786+ BUG();
94787+ return false;
94788+ }
94789+ if (unlikely(entry->prev->next != entry)) {
94790+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
94791+ "but was %p\n", entry, prev->next);
94792+ BUG();
94793+ return false;
94794+ }
94795+ if (unlikely(entry->next->prev != entry)) {
94796+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
94797+ "but was %p\n", entry, next->prev);
94798+ BUG();
94799+ return false;
94800+ }
94801+ return true;
94802+}
94803+
94804+void __list_del_entry(struct list_head *entry)
94805+{
94806+ if (!__list_del_entry_debug(entry))
94807 return;
94808
94809- __list_del(prev, next);
94810+ __list_del(entry->prev, entry->next);
94811 }
94812 EXPORT_SYMBOL(__list_del_entry);
94813
94814@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
94815 void __list_add_rcu(struct list_head *new,
94816 struct list_head *prev, struct list_head *next)
94817 {
94818- WARN(next->prev != prev,
94819- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
94820- prev, next->prev, next);
94821- WARN(prev->next != next,
94822- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
94823- next, prev->next, prev);
94824+ if (!__list_add_debug(new, prev, next))
94825+ return;
94826+
94827 new->next = next;
94828 new->prev = prev;
94829 rcu_assign_pointer(list_next_rcu(prev), new);
94830 next->prev = new;
94831 }
94832 EXPORT_SYMBOL(__list_add_rcu);
94833+#endif
94834+
94835+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
94836+{
94837+#ifdef CONFIG_DEBUG_LIST
94838+ if (!__list_add_debug(new, prev, next))
94839+ return;
94840+#endif
94841+
94842+ pax_open_kernel();
94843+ next->prev = new;
94844+ new->next = next;
94845+ new->prev = prev;
94846+ prev->next = new;
94847+ pax_close_kernel();
94848+}
94849+EXPORT_SYMBOL(__pax_list_add);
94850+
94851+void pax_list_del(struct list_head *entry)
94852+{
94853+#ifdef CONFIG_DEBUG_LIST
94854+ if (!__list_del_entry_debug(entry))
94855+ return;
94856+#endif
94857+
94858+ pax_open_kernel();
94859+ __list_del(entry->prev, entry->next);
94860+ entry->next = LIST_POISON1;
94861+ entry->prev = LIST_POISON2;
94862+ pax_close_kernel();
94863+}
94864+EXPORT_SYMBOL(pax_list_del);
94865+
94866+void pax_list_del_init(struct list_head *entry)
94867+{
94868+ pax_open_kernel();
94869+ __list_del(entry->prev, entry->next);
94870+ INIT_LIST_HEAD(entry);
94871+ pax_close_kernel();
94872+}
94873+EXPORT_SYMBOL(pax_list_del_init);
94874+
94875+void __pax_list_add_rcu(struct list_head *new,
94876+ struct list_head *prev, struct list_head *next)
94877+{
94878+#ifdef CONFIG_DEBUG_LIST
94879+ if (!__list_add_debug(new, prev, next))
94880+ return;
94881+#endif
94882+
94883+ pax_open_kernel();
94884+ new->next = next;
94885+ new->prev = prev;
94886+ rcu_assign_pointer(list_next_rcu(prev), new);
94887+ next->prev = new;
94888+ pax_close_kernel();
94889+}
94890+EXPORT_SYMBOL(__pax_list_add_rcu);
94891+
94892+void pax_list_del_rcu(struct list_head *entry)
94893+{
94894+#ifdef CONFIG_DEBUG_LIST
94895+ if (!__list_del_entry_debug(entry))
94896+ return;
94897+#endif
94898+
94899+ pax_open_kernel();
94900+ __list_del(entry->prev, entry->next);
94901+ entry->next = LIST_POISON1;
94902+ entry->prev = LIST_POISON2;
94903+ pax_close_kernel();
94904+}
94905+EXPORT_SYMBOL(pax_list_del_rcu);
94906diff --git a/lib/lockref.c b/lib/lockref.c
94907index d2233de..fa1a2f6 100644
94908--- a/lib/lockref.c
94909+++ b/lib/lockref.c
94910@@ -48,13 +48,13 @@
94911 void lockref_get(struct lockref *lockref)
94912 {
94913 CMPXCHG_LOOP(
94914- new.count++;
94915+ __lockref_inc(&new);
94916 ,
94917 return;
94918 );
94919
94920 spin_lock(&lockref->lock);
94921- lockref->count++;
94922+ __lockref_inc(lockref);
94923 spin_unlock(&lockref->lock);
94924 }
94925 EXPORT_SYMBOL(lockref_get);
94926@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
94927 int retval;
94928
94929 CMPXCHG_LOOP(
94930- new.count++;
94931+ __lockref_inc(&new);
94932 if (!old.count)
94933 return 0;
94934 ,
94935@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
94936 spin_lock(&lockref->lock);
94937 retval = 0;
94938 if (lockref->count) {
94939- lockref->count++;
94940+ __lockref_inc(lockref);
94941 retval = 1;
94942 }
94943 spin_unlock(&lockref->lock);
94944@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
94945 int lockref_get_or_lock(struct lockref *lockref)
94946 {
94947 CMPXCHG_LOOP(
94948- new.count++;
94949+ __lockref_inc(&new);
94950 if (!old.count)
94951 break;
94952 ,
94953@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
94954 spin_lock(&lockref->lock);
94955 if (!lockref->count)
94956 return 0;
94957- lockref->count++;
94958+ __lockref_inc(lockref);
94959 spin_unlock(&lockref->lock);
94960 return 1;
94961 }
94962@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
94963 int lockref_put_or_lock(struct lockref *lockref)
94964 {
94965 CMPXCHG_LOOP(
94966- new.count--;
94967+ __lockref_dec(&new);
94968 if (old.count <= 1)
94969 break;
94970 ,
94971@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
94972 spin_lock(&lockref->lock);
94973 if (lockref->count <= 1)
94974 return 0;
94975- lockref->count--;
94976+ __lockref_dec(lockref);
94977 spin_unlock(&lockref->lock);
94978 return 1;
94979 }
94980@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
94981 int retval;
94982
94983 CMPXCHG_LOOP(
94984- new.count++;
94985+ __lockref_inc(&new);
94986 if ((int)old.count < 0)
94987 return 0;
94988 ,
94989@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
94990 spin_lock(&lockref->lock);
94991 retval = 0;
94992 if ((int) lockref->count >= 0) {
94993- lockref->count++;
94994+ __lockref_inc(lockref);
94995 retval = 1;
94996 }
94997 spin_unlock(&lockref->lock);
94998diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
94999index 6111bcb..02e816b 100644
95000--- a/lib/percpu-refcount.c
95001+++ b/lib/percpu-refcount.c
95002@@ -31,7 +31,7 @@
95003 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
95004 */
95005
95006-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
95007+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
95008
95009 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
95010
95011diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95012index 3291a8e..346a91e 100644
95013--- a/lib/radix-tree.c
95014+++ b/lib/radix-tree.c
95015@@ -67,7 +67,7 @@ struct radix_tree_preload {
95016 int nr;
95017 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95018 };
95019-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95020+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95021
95022 static inline void *ptr_to_indirect(void *ptr)
95023 {
95024diff --git a/lib/random32.c b/lib/random32.c
95025index 0bee183..526f12f 100644
95026--- a/lib/random32.c
95027+++ b/lib/random32.c
95028@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
95029 }
95030 #endif
95031
95032-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95033+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95034
95035 /**
95036 * prandom_u32_state - seeded pseudo-random number generator.
95037diff --git a/lib/rbtree.c b/lib/rbtree.c
95038index c16c81a..4dcbda1 100644
95039--- a/lib/rbtree.c
95040+++ b/lib/rbtree.c
95041@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95042 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95043
95044 static const struct rb_augment_callbacks dummy_callbacks = {
95045- dummy_propagate, dummy_copy, dummy_rotate
95046+ .propagate = dummy_propagate,
95047+ .copy = dummy_copy,
95048+ .rotate = dummy_rotate
95049 };
95050
95051 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95052diff --git a/lib/show_mem.c b/lib/show_mem.c
95053index 7de89f4..00d70b7 100644
95054--- a/lib/show_mem.c
95055+++ b/lib/show_mem.c
95056@@ -50,6 +50,6 @@ void show_mem(unsigned int filter)
95057 quicklist_total_size());
95058 #endif
95059 #ifdef CONFIG_MEMORY_FAILURE
95060- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95061+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95062 #endif
95063 }
95064diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95065index bb2b201..46abaf9 100644
95066--- a/lib/strncpy_from_user.c
95067+++ b/lib/strncpy_from_user.c
95068@@ -21,7 +21,7 @@
95069 */
95070 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95071 {
95072- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95073+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95074 long res = 0;
95075
95076 /*
95077diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95078index a28df52..3d55877 100644
95079--- a/lib/strnlen_user.c
95080+++ b/lib/strnlen_user.c
95081@@ -26,7 +26,7 @@
95082 */
95083 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95084 {
95085- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95086+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95087 long align, res = 0;
95088 unsigned long c;
95089
95090diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95091index 4abda07..b9d3765 100644
95092--- a/lib/swiotlb.c
95093+++ b/lib/swiotlb.c
95094@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95095
95096 void
95097 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95098- dma_addr_t dev_addr)
95099+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95100 {
95101 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95102
95103diff --git a/lib/usercopy.c b/lib/usercopy.c
95104index 4f5b1dd..7cab418 100644
95105--- a/lib/usercopy.c
95106+++ b/lib/usercopy.c
95107@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95108 WARN(1, "Buffer overflow detected!\n");
95109 }
95110 EXPORT_SYMBOL(copy_from_user_overflow);
95111+
95112+void copy_to_user_overflow(void)
95113+{
95114+ WARN(1, "Buffer overflow detected!\n");
95115+}
95116+EXPORT_SYMBOL(copy_to_user_overflow);
95117diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95118index ec337f6..8484eb2 100644
95119--- a/lib/vsprintf.c
95120+++ b/lib/vsprintf.c
95121@@ -16,6 +16,9 @@
95122 * - scnprintf and vscnprintf
95123 */
95124
95125+#ifdef CONFIG_GRKERNSEC_HIDESYM
95126+#define __INCLUDED_BY_HIDESYM 1
95127+#endif
95128 #include <stdarg.h>
95129 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
95130 #include <linux/types.h>
95131@@ -625,7 +628,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
95132 #ifdef CONFIG_KALLSYMS
95133 if (*fmt == 'B')
95134 sprint_backtrace(sym, value);
95135- else if (*fmt != 'f' && *fmt != 's')
95136+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
95137 sprint_symbol(sym, value);
95138 else
95139 sprint_symbol_no_offset(sym, value);
95140@@ -1240,7 +1243,11 @@ char *address_val(char *buf, char *end, const void *addr,
95141 return number(buf, end, num, spec);
95142 }
95143
95144+#ifdef CONFIG_GRKERNSEC_HIDESYM
95145+int kptr_restrict __read_mostly = 2;
95146+#else
95147 int kptr_restrict __read_mostly;
95148+#endif
95149
95150 /*
95151 * Show a '%p' thing. A kernel extension is that the '%p' is followed
95152@@ -1251,8 +1258,10 @@ int kptr_restrict __read_mostly;
95153 *
95154 * - 'F' For symbolic function descriptor pointers with offset
95155 * - 'f' For simple symbolic function names without offset
95156+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
95157 * - 'S' For symbolic direct pointers with offset
95158 * - 's' For symbolic direct pointers without offset
95159+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95160 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
95161 * - 'B' For backtraced symbolic direct pointers with offset
95162 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
95163@@ -1331,12 +1340,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95164
95165 if (!ptr && *fmt != 'K') {
95166 /*
95167- * Print (null) with the same width as a pointer so it makes
95168+ * Print (nil) with the same width as a pointer so it makes
95169 * tabular output look nice.
95170 */
95171 if (spec.field_width == -1)
95172 spec.field_width = default_width;
95173- return string(buf, end, "(null)", spec);
95174+ return string(buf, end, "(nil)", spec);
95175 }
95176
95177 switch (*fmt) {
95178@@ -1346,6 +1355,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95179 /* Fallthrough */
95180 case 'S':
95181 case 's':
95182+#ifdef CONFIG_GRKERNSEC_HIDESYM
95183+ break;
95184+#else
95185+ return symbol_string(buf, end, ptr, spec, fmt);
95186+#endif
95187+ case 'X':
95188+ ptr = dereference_function_descriptor(ptr);
95189+ case 'A':
95190 case 'B':
95191 return symbol_string(buf, end, ptr, spec, fmt);
95192 case 'R':
95193@@ -1403,6 +1420,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95194 va_end(va);
95195 return buf;
95196 }
95197+ case 'P':
95198+ break;
95199 case 'K':
95200 /*
95201 * %pK cannot be used in IRQ context because its test
95202@@ -1460,6 +1479,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95203 ((const struct file *)ptr)->f_path.dentry,
95204 spec, fmt);
95205 }
95206+
95207+#ifdef CONFIG_GRKERNSEC_HIDESYM
95208+ /* 'P' = approved pointers to copy to userland,
95209+ as in the /proc/kallsyms case, as we make it display nothing
95210+ for non-root users, and the real contents for root users
95211+ 'X' = approved simple symbols
95212+ Also ignore 'K' pointers, since we force their NULLing for non-root users
95213+ above
95214+ */
95215+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
95216+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
95217+ dump_stack();
95218+ ptr = NULL;
95219+ }
95220+#endif
95221+
95222 spec.flags |= SMALL;
95223 if (spec.field_width == -1) {
95224 spec.field_width = default_width;
95225@@ -2160,11 +2195,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95226 typeof(type) value; \
95227 if (sizeof(type) == 8) { \
95228 args = PTR_ALIGN(args, sizeof(u32)); \
95229- *(u32 *)&value = *(u32 *)args; \
95230- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95231+ *(u32 *)&value = *(const u32 *)args; \
95232+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95233 } else { \
95234 args = PTR_ALIGN(args, sizeof(type)); \
95235- value = *(typeof(type) *)args; \
95236+ value = *(const typeof(type) *)args; \
95237 } \
95238 args += sizeof(type); \
95239 value; \
95240@@ -2227,7 +2262,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95241 case FORMAT_TYPE_STR: {
95242 const char *str_arg = args;
95243 args += strlen(str_arg) + 1;
95244- str = string(str, end, (char *)str_arg, spec);
95245+ str = string(str, end, str_arg, spec);
95246 break;
95247 }
95248
95249diff --git a/localversion-grsec b/localversion-grsec
95250new file mode 100644
95251index 0000000..7cd6065
95252--- /dev/null
95253+++ b/localversion-grsec
95254@@ -0,0 +1 @@
95255+-grsec
95256diff --git a/mm/Kconfig b/mm/Kconfig
95257index 1d1ae6b..0f05885 100644
95258--- a/mm/Kconfig
95259+++ b/mm/Kconfig
95260@@ -341,10 +341,11 @@ config KSM
95261 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
95262
95263 config DEFAULT_MMAP_MIN_ADDR
95264- int "Low address space to protect from user allocation"
95265+ int "Low address space to protect from user allocation"
95266 depends on MMU
95267- default 4096
95268- help
95269+ default 32768 if ALPHA || ARM || PARISC || SPARC32
95270+ default 65536
95271+ help
95272 This is the portion of low virtual memory which should be protected
95273 from userspace allocation. Keeping a user from writing to low pages
95274 can help reduce the impact of kernel NULL pointer bugs.
95275@@ -375,7 +376,7 @@ config MEMORY_FAILURE
95276
95277 config HWPOISON_INJECT
95278 tristate "HWPoison pages injector"
95279- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
95280+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
95281 select PROC_PAGE_MONITOR
95282
95283 config NOMMU_INITIAL_TRIM_EXCESS
95284diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
95285index 957d3da..1d34e20 100644
95286--- a/mm/Kconfig.debug
95287+++ b/mm/Kconfig.debug
95288@@ -10,6 +10,7 @@ config PAGE_EXTENSION
95289 config DEBUG_PAGEALLOC
95290 bool "Debug page memory allocations"
95291 depends on DEBUG_KERNEL
95292+ depends on !PAX_MEMORY_SANITIZE
95293 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
95294 depends on !KMEMCHECK
95295 select PAGE_EXTENSION
95296diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95297index 0ae0df5..82ac56b 100644
95298--- a/mm/backing-dev.c
95299+++ b/mm/backing-dev.c
95300@@ -12,7 +12,7 @@
95301 #include <linux/device.h>
95302 #include <trace/events/writeback.h>
95303
95304-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
95305+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
95306
95307 struct backing_dev_info default_backing_dev_info = {
95308 .name = "default",
95309@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
95310 return err;
95311
95312 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
95313- atomic_long_inc_return(&bdi_seq));
95314+ atomic_long_inc_return_unchecked(&bdi_seq));
95315 if (err) {
95316 bdi_destroy(bdi);
95317 return err;
95318diff --git a/mm/filemap.c b/mm/filemap.c
95319index 673e458..7192013 100644
95320--- a/mm/filemap.c
95321+++ b/mm/filemap.c
95322@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95323 struct address_space *mapping = file->f_mapping;
95324
95325 if (!mapping->a_ops->readpage)
95326- return -ENOEXEC;
95327+ return -ENODEV;
95328 file_accessed(file);
95329 vma->vm_ops = &generic_file_vm_ops;
95330 return 0;
95331@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95332 *pos = i_size_read(inode);
95333
95334 if (limit != RLIM_INFINITY) {
95335+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95336 if (*pos >= limit) {
95337 send_sig(SIGXFSZ, current, 0);
95338 return -EFBIG;
95339diff --git a/mm/fremap.c b/mm/fremap.c
95340index 2805d71..8b56e7d 100644
95341--- a/mm/fremap.c
95342+++ b/mm/fremap.c
95343@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95344 retry:
95345 vma = find_vma(mm, start);
95346
95347+#ifdef CONFIG_PAX_SEGMEXEC
95348+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95349+ goto out;
95350+#endif
95351+
95352 /*
95353 * Make sure the vma is shared, that it supports prefaulting,
95354 * and that the remapped range is valid and fully within
95355diff --git a/mm/gup.c b/mm/gup.c
95356index 9b2afbf..647297c 100644
95357--- a/mm/gup.c
95358+++ b/mm/gup.c
95359@@ -274,11 +274,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
95360 unsigned int fault_flags = 0;
95361 int ret;
95362
95363- /* For mlock, just skip the stack guard page. */
95364- if ((*flags & FOLL_MLOCK) &&
95365- (stack_guard_page_start(vma, address) ||
95366- stack_guard_page_end(vma, address + PAGE_SIZE)))
95367- return -ENOENT;
95368 if (*flags & FOLL_WRITE)
95369 fault_flags |= FAULT_FLAG_WRITE;
95370 if (nonblocking)
95371@@ -444,14 +439,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95372 if (!(gup_flags & FOLL_FORCE))
95373 gup_flags |= FOLL_NUMA;
95374
95375- do {
95376+ while (nr_pages) {
95377 struct page *page;
95378 unsigned int foll_flags = gup_flags;
95379 unsigned int page_increm;
95380
95381 /* first iteration or cross vma bound */
95382 if (!vma || start >= vma->vm_end) {
95383- vma = find_extend_vma(mm, start);
95384+ vma = find_vma(mm, start);
95385 if (!vma && in_gate_area(mm, start)) {
95386 int ret;
95387 ret = get_gate_page(mm, start & PAGE_MASK,
95388@@ -463,7 +458,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95389 goto next_page;
95390 }
95391
95392- if (!vma || check_vma_flags(vma, gup_flags))
95393+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
95394 return i ? : -EFAULT;
95395 if (is_vm_hugetlb_page(vma)) {
95396 i = follow_hugetlb_page(mm, vma, pages, vmas,
95397@@ -518,7 +513,7 @@ next_page:
95398 i += page_increm;
95399 start += page_increm * PAGE_SIZE;
95400 nr_pages -= page_increm;
95401- } while (nr_pages);
95402+ }
95403 return i;
95404 }
95405 EXPORT_SYMBOL(__get_user_pages);
95406diff --git a/mm/highmem.c b/mm/highmem.c
95407index 123bcd3..0de52ba 100644
95408--- a/mm/highmem.c
95409+++ b/mm/highmem.c
95410@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
95411 * So no dangers, even with speculative execution.
95412 */
95413 page = pte_page(pkmap_page_table[i]);
95414+ pax_open_kernel();
95415 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
95416-
95417+ pax_close_kernel();
95418 set_page_address(page, NULL);
95419 need_flush = 1;
95420 }
95421@@ -259,9 +260,11 @@ start:
95422 }
95423 }
95424 vaddr = PKMAP_ADDR(last_pkmap_nr);
95425+
95426+ pax_open_kernel();
95427 set_pte_at(&init_mm, vaddr,
95428 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95429-
95430+ pax_close_kernel();
95431 pkmap_count[last_pkmap_nr] = 1;
95432 set_page_address(page, (void *)vaddr);
95433
95434diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95435index 267e419..394bed9 100644
95436--- a/mm/hugetlb.c
95437+++ b/mm/hugetlb.c
95438@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95439 struct ctl_table *table, int write,
95440 void __user *buffer, size_t *length, loff_t *ppos)
95441 {
95442+ ctl_table_no_const t;
95443 struct hstate *h = &default_hstate;
95444 unsigned long tmp = h->max_huge_pages;
95445 int ret;
95446@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95447 if (!hugepages_supported())
95448 return -ENOTSUPP;
95449
95450- table->data = &tmp;
95451- table->maxlen = sizeof(unsigned long);
95452- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95453+ t = *table;
95454+ t.data = &tmp;
95455+ t.maxlen = sizeof(unsigned long);
95456+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
95457 if (ret)
95458 goto out;
95459
95460@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95461 struct hstate *h = &default_hstate;
95462 unsigned long tmp;
95463 int ret;
95464+ ctl_table_no_const hugetlb_table;
95465
95466 if (!hugepages_supported())
95467 return -ENOTSUPP;
95468@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95469 if (write && hstate_is_gigantic(h))
95470 return -EINVAL;
95471
95472- table->data = &tmp;
95473- table->maxlen = sizeof(unsigned long);
95474- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95475+ hugetlb_table = *table;
95476+ hugetlb_table.data = &tmp;
95477+ hugetlb_table.maxlen = sizeof(unsigned long);
95478+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
95479 if (ret)
95480 goto out;
95481
95482@@ -2798,6 +2802,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95483 i_mmap_unlock_write(mapping);
95484 }
95485
95486+#ifdef CONFIG_PAX_SEGMEXEC
95487+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95488+{
95489+ struct mm_struct *mm = vma->vm_mm;
95490+ struct vm_area_struct *vma_m;
95491+ unsigned long address_m;
95492+ pte_t *ptep_m;
95493+
95494+ vma_m = pax_find_mirror_vma(vma);
95495+ if (!vma_m)
95496+ return;
95497+
95498+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95499+ address_m = address + SEGMEXEC_TASK_SIZE;
95500+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95501+ get_page(page_m);
95502+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
95503+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95504+}
95505+#endif
95506+
95507 /*
95508 * Hugetlb_cow() should be called with page lock of the original hugepage held.
95509 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
95510@@ -2910,6 +2935,11 @@ retry_avoidcopy:
95511 make_huge_pte(vma, new_page, 1));
95512 page_remove_rmap(old_page);
95513 hugepage_add_new_anon_rmap(new_page, vma, address);
95514+
95515+#ifdef CONFIG_PAX_SEGMEXEC
95516+ pax_mirror_huge_pte(vma, address, new_page);
95517+#endif
95518+
95519 /* Make the old page be freed below */
95520 new_page = old_page;
95521 }
95522@@ -3070,6 +3100,10 @@ retry:
95523 && (vma->vm_flags & VM_SHARED)));
95524 set_huge_pte_at(mm, address, ptep, new_pte);
95525
95526+#ifdef CONFIG_PAX_SEGMEXEC
95527+ pax_mirror_huge_pte(vma, address, page);
95528+#endif
95529+
95530 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95531 /* Optimization, do the COW without a second fault */
95532 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
95533@@ -3137,6 +3171,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95534 struct address_space *mapping;
95535 int need_wait_lock = 0;
95536
95537+#ifdef CONFIG_PAX_SEGMEXEC
95538+ struct vm_area_struct *vma_m;
95539+#endif
95540+
95541 address &= huge_page_mask(h);
95542
95543 ptep = huge_pte_offset(mm, address);
95544@@ -3150,6 +3188,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95545 VM_FAULT_SET_HINDEX(hstate_index(h));
95546 }
95547
95548+#ifdef CONFIG_PAX_SEGMEXEC
95549+ vma_m = pax_find_mirror_vma(vma);
95550+ if (vma_m) {
95551+ unsigned long address_m;
95552+
95553+ if (vma->vm_start > vma_m->vm_start) {
95554+ address_m = address;
95555+ address -= SEGMEXEC_TASK_SIZE;
95556+ vma = vma_m;
95557+ h = hstate_vma(vma);
95558+ } else
95559+ address_m = address + SEGMEXEC_TASK_SIZE;
95560+
95561+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95562+ return VM_FAULT_OOM;
95563+ address_m &= HPAGE_MASK;
95564+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95565+ }
95566+#endif
95567+
95568 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95569 if (!ptep)
95570 return VM_FAULT_OOM;
95571diff --git a/mm/internal.h b/mm/internal.h
95572index efad241..57ae4ca 100644
95573--- a/mm/internal.h
95574+++ b/mm/internal.h
95575@@ -134,6 +134,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
95576
95577 extern int __isolate_free_page(struct page *page, unsigned int order);
95578 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95579+extern void free_compound_page(struct page *page);
95580 extern void prep_compound_page(struct page *page, unsigned long order);
95581 #ifdef CONFIG_MEMORY_FAILURE
95582 extern bool is_free_buddy_page(struct page *page);
95583@@ -387,7 +388,7 @@ extern u32 hwpoison_filter_enable;
95584
95585 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
95586 unsigned long, unsigned long,
95587- unsigned long, unsigned long);
95588+ unsigned long, unsigned long) __intentional_overflow(-1);
95589
95590 extern void set_pageblock_order(void);
95591 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
95592diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95593index 3cda50c..032ba634 100644
95594--- a/mm/kmemleak.c
95595+++ b/mm/kmemleak.c
95596@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
95597
95598 for (i = 0; i < object->trace_len; i++) {
95599 void *ptr = (void *)object->trace[i];
95600- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95601+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
95602 }
95603 }
95604
95605@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
95606 return -ENOMEM;
95607 }
95608
95609- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
95610+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
95611 &kmemleak_fops);
95612 if (!dentry)
95613 pr_warning("Failed to create the debugfs kmemleak file\n");
95614diff --git a/mm/maccess.c b/mm/maccess.c
95615index d53adf9..03a24bf 100644
95616--- a/mm/maccess.c
95617+++ b/mm/maccess.c
95618@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
95619 set_fs(KERNEL_DS);
95620 pagefault_disable();
95621 ret = __copy_from_user_inatomic(dst,
95622- (__force const void __user *)src, size);
95623+ (const void __force_user *)src, size);
95624 pagefault_enable();
95625 set_fs(old_fs);
95626
95627@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
95628
95629 set_fs(KERNEL_DS);
95630 pagefault_disable();
95631- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95632+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95633 pagefault_enable();
95634 set_fs(old_fs);
95635
95636diff --git a/mm/madvise.c b/mm/madvise.c
95637index a271adc..831d82f 100644
95638--- a/mm/madvise.c
95639+++ b/mm/madvise.c
95640@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
95641 pgoff_t pgoff;
95642 unsigned long new_flags = vma->vm_flags;
95643
95644+#ifdef CONFIG_PAX_SEGMEXEC
95645+ struct vm_area_struct *vma_m;
95646+#endif
95647+
95648 switch (behavior) {
95649 case MADV_NORMAL:
95650 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95651@@ -126,6 +130,13 @@ success:
95652 /*
95653 * vm_flags is protected by the mmap_sem held in write mode.
95654 */
95655+
95656+#ifdef CONFIG_PAX_SEGMEXEC
95657+ vma_m = pax_find_mirror_vma(vma);
95658+ if (vma_m)
95659+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95660+#endif
95661+
95662 vma->vm_flags = new_flags;
95663
95664 out:
95665@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95666 struct vm_area_struct **prev,
95667 unsigned long start, unsigned long end)
95668 {
95669+
95670+#ifdef CONFIG_PAX_SEGMEXEC
95671+ struct vm_area_struct *vma_m;
95672+#endif
95673+
95674 *prev = vma;
95675 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95676 return -EINVAL;
95677@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95678 zap_page_range(vma, start, end - start, &details);
95679 } else
95680 zap_page_range(vma, start, end - start, NULL);
95681+
95682+#ifdef CONFIG_PAX_SEGMEXEC
95683+ vma_m = pax_find_mirror_vma(vma);
95684+ if (vma_m) {
95685+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95686+ struct zap_details details = {
95687+ .nonlinear_vma = vma_m,
95688+ .last_index = ULONG_MAX,
95689+ };
95690+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95691+ } else
95692+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95693+ }
95694+#endif
95695+
95696 return 0;
95697 }
95698
95699@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95700 if (end < start)
95701 return error;
95702
95703+#ifdef CONFIG_PAX_SEGMEXEC
95704+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95705+ if (end > SEGMEXEC_TASK_SIZE)
95706+ return error;
95707+ } else
95708+#endif
95709+
95710+ if (end > TASK_SIZE)
95711+ return error;
95712+
95713 error = 0;
95714 if (end == start)
95715 return error;
95716diff --git a/mm/memory-failure.c b/mm/memory-failure.c
95717index 20c29dd..22bd8e2 100644
95718--- a/mm/memory-failure.c
95719+++ b/mm/memory-failure.c
95720@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
95721
95722 int sysctl_memory_failure_recovery __read_mostly = 1;
95723
95724-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95725+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95726
95727 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
95728
95729@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
95730 pfn, t->comm, t->pid);
95731 si.si_signo = SIGBUS;
95732 si.si_errno = 0;
95733- si.si_addr = (void *)addr;
95734+ si.si_addr = (void __user *)addr;
95735 #ifdef __ARCH_SI_TRAPNO
95736 si.si_trapno = trapno;
95737 #endif
95738@@ -786,7 +786,7 @@ static struct page_state {
95739 unsigned long res;
95740 char *msg;
95741 int (*action)(struct page *p, unsigned long pfn);
95742-} error_states[] = {
95743+} __do_const error_states[] = {
95744 { reserved, reserved, "reserved kernel", me_kernel },
95745 /*
95746 * free pages are specially detected outside this table:
95747@@ -1094,7 +1094,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95748 nr_pages = 1 << compound_order(hpage);
95749 else /* normal page or thp */
95750 nr_pages = 1;
95751- atomic_long_add(nr_pages, &num_poisoned_pages);
95752+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
95753
95754 /*
95755 * We need/can do nothing about count=0 pages.
95756@@ -1123,7 +1123,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95757 if (PageHWPoison(hpage)) {
95758 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
95759 || (p != hpage && TestSetPageHWPoison(hpage))) {
95760- atomic_long_sub(nr_pages, &num_poisoned_pages);
95761+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95762 unlock_page(hpage);
95763 return 0;
95764 }
95765@@ -1191,14 +1191,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95766 */
95767 if (!PageHWPoison(p)) {
95768 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
95769- atomic_long_sub(nr_pages, &num_poisoned_pages);
95770+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95771 put_page(hpage);
95772 res = 0;
95773 goto out;
95774 }
95775 if (hwpoison_filter(p)) {
95776 if (TestClearPageHWPoison(p))
95777- atomic_long_sub(nr_pages, &num_poisoned_pages);
95778+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95779 unlock_page(hpage);
95780 put_page(hpage);
95781 return 0;
95782@@ -1428,7 +1428,7 @@ int unpoison_memory(unsigned long pfn)
95783 return 0;
95784 }
95785 if (TestClearPageHWPoison(p))
95786- atomic_long_dec(&num_poisoned_pages);
95787+ atomic_long_dec_unchecked(&num_poisoned_pages);
95788 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
95789 return 0;
95790 }
95791@@ -1442,7 +1442,7 @@ int unpoison_memory(unsigned long pfn)
95792 */
95793 if (TestClearPageHWPoison(page)) {
95794 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
95795- atomic_long_sub(nr_pages, &num_poisoned_pages);
95796+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95797 freeit = 1;
95798 if (PageHuge(page))
95799 clear_page_hwpoison_huge_page(page);
95800@@ -1567,11 +1567,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
95801 if (PageHuge(page)) {
95802 set_page_hwpoison_huge_page(hpage);
95803 dequeue_hwpoisoned_huge_page(hpage);
95804- atomic_long_add(1 << compound_order(hpage),
95805+ atomic_long_add_unchecked(1 << compound_order(hpage),
95806 &num_poisoned_pages);
95807 } else {
95808 SetPageHWPoison(page);
95809- atomic_long_inc(&num_poisoned_pages);
95810+ atomic_long_inc_unchecked(&num_poisoned_pages);
95811 }
95812 }
95813 return ret;
95814@@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct page *page, int flags)
95815 put_page(page);
95816 pr_info("soft_offline: %#lx: invalidated\n", pfn);
95817 SetPageHWPoison(page);
95818- atomic_long_inc(&num_poisoned_pages);
95819+ atomic_long_inc_unchecked(&num_poisoned_pages);
95820 return 0;
95821 }
95822
95823@@ -1659,7 +1659,7 @@ static int __soft_offline_page(struct page *page, int flags)
95824 if (!is_free_buddy_page(page))
95825 pr_info("soft offline: %#lx: page leaked\n",
95826 pfn);
95827- atomic_long_inc(&num_poisoned_pages);
95828+ atomic_long_inc_unchecked(&num_poisoned_pages);
95829 }
95830 } else {
95831 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
95832@@ -1729,11 +1729,11 @@ int soft_offline_page(struct page *page, int flags)
95833 if (PageHuge(page)) {
95834 set_page_hwpoison_huge_page(hpage);
95835 dequeue_hwpoisoned_huge_page(hpage);
95836- atomic_long_add(1 << compound_order(hpage),
95837+ atomic_long_add_unchecked(1 << compound_order(hpage),
95838 &num_poisoned_pages);
95839 } else {
95840 SetPageHWPoison(page);
95841- atomic_long_inc(&num_poisoned_pages);
95842+ atomic_long_inc_unchecked(&num_poisoned_pages);
95843 }
95844 }
95845 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
95846diff --git a/mm/memory.c b/mm/memory.c
95847index 6aa7822..3c76005 100644
95848--- a/mm/memory.c
95849+++ b/mm/memory.c
95850@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95851 free_pte_range(tlb, pmd, addr);
95852 } while (pmd++, addr = next, addr != end);
95853
95854+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
95855 start &= PUD_MASK;
95856 if (start < floor)
95857 return;
95858@@ -428,6 +429,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95859 pmd = pmd_offset(pud, start);
95860 pud_clear(pud);
95861 pmd_free_tlb(tlb, pmd, start);
95862+#endif
95863+
95864 }
95865
95866 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95867@@ -447,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95868 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
95869 } while (pud++, addr = next, addr != end);
95870
95871+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
95872 start &= PGDIR_MASK;
95873 if (start < floor)
95874 return;
95875@@ -461,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95876 pud = pud_offset(pgd, start);
95877 pgd_clear(pgd);
95878 pud_free_tlb(tlb, pud, start);
95879+#endif
95880+
95881 }
95882
95883 /*
95884@@ -690,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
95885 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
95886 */
95887 if (vma->vm_ops)
95888- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
95889+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
95890 vma->vm_ops->fault);
95891 if (vma->vm_file)
95892- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
95893+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
95894 vma->vm_file->f_op->mmap);
95895 dump_stack();
95896 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
95897@@ -1488,6 +1494,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
95898 page_add_file_rmap(page);
95899 set_pte_at(mm, addr, pte, mk_pte(page, prot));
95900
95901+#ifdef CONFIG_PAX_SEGMEXEC
95902+ pax_mirror_file_pte(vma, addr, page, ptl);
95903+#endif
95904+
95905 retval = 0;
95906 pte_unmap_unlock(pte, ptl);
95907 return retval;
95908@@ -1532,9 +1542,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
95909 if (!page_count(page))
95910 return -EINVAL;
95911 if (!(vma->vm_flags & VM_MIXEDMAP)) {
95912+
95913+#ifdef CONFIG_PAX_SEGMEXEC
95914+ struct vm_area_struct *vma_m;
95915+#endif
95916+
95917 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
95918 BUG_ON(vma->vm_flags & VM_PFNMAP);
95919 vma->vm_flags |= VM_MIXEDMAP;
95920+
95921+#ifdef CONFIG_PAX_SEGMEXEC
95922+ vma_m = pax_find_mirror_vma(vma);
95923+ if (vma_m)
95924+ vma_m->vm_flags |= VM_MIXEDMAP;
95925+#endif
95926+
95927 }
95928 return insert_page(vma, addr, page, vma->vm_page_prot);
95929 }
95930@@ -1617,6 +1639,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
95931 unsigned long pfn)
95932 {
95933 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
95934+ BUG_ON(vma->vm_mirror);
95935
95936 if (addr < vma->vm_start || addr >= vma->vm_end)
95937 return -EFAULT;
95938@@ -1864,7 +1887,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
95939
95940 BUG_ON(pud_huge(*pud));
95941
95942- pmd = pmd_alloc(mm, pud, addr);
95943+ pmd = (mm == &init_mm) ?
95944+ pmd_alloc_kernel(mm, pud, addr) :
95945+ pmd_alloc(mm, pud, addr);
95946 if (!pmd)
95947 return -ENOMEM;
95948 do {
95949@@ -1884,7 +1909,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
95950 unsigned long next;
95951 int err;
95952
95953- pud = pud_alloc(mm, pgd, addr);
95954+ pud = (mm == &init_mm) ?
95955+ pud_alloc_kernel(mm, pgd, addr) :
95956+ pud_alloc(mm, pgd, addr);
95957 if (!pud)
95958 return -ENOMEM;
95959 do {
95960@@ -2006,6 +2033,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
95961 return ret;
95962 }
95963
95964+#ifdef CONFIG_PAX_SEGMEXEC
95965+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
95966+{
95967+ struct mm_struct *mm = vma->vm_mm;
95968+ spinlock_t *ptl;
95969+ pte_t *pte, entry;
95970+
95971+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
95972+ entry = *pte;
95973+ if (!pte_present(entry)) {
95974+ if (!pte_none(entry)) {
95975+ BUG_ON(pte_file(entry));
95976+ free_swap_and_cache(pte_to_swp_entry(entry));
95977+ pte_clear_not_present_full(mm, address, pte, 0);
95978+ }
95979+ } else {
95980+ struct page *page;
95981+
95982+ flush_cache_page(vma, address, pte_pfn(entry));
95983+ entry = ptep_clear_flush(vma, address, pte);
95984+ BUG_ON(pte_dirty(entry));
95985+ page = vm_normal_page(vma, address, entry);
95986+ if (page) {
95987+ update_hiwater_rss(mm);
95988+ if (PageAnon(page))
95989+ dec_mm_counter_fast(mm, MM_ANONPAGES);
95990+ else
95991+ dec_mm_counter_fast(mm, MM_FILEPAGES);
95992+ page_remove_rmap(page);
95993+ page_cache_release(page);
95994+ }
95995+ }
95996+ pte_unmap_unlock(pte, ptl);
95997+}
95998+
95999+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96000+ *
96001+ * the ptl of the lower mapped page is held on entry and is not released on exit
96002+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96003+ */
96004+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96005+{
96006+ struct mm_struct *mm = vma->vm_mm;
96007+ unsigned long address_m;
96008+ spinlock_t *ptl_m;
96009+ struct vm_area_struct *vma_m;
96010+ pmd_t *pmd_m;
96011+ pte_t *pte_m, entry_m;
96012+
96013+ BUG_ON(!page_m || !PageAnon(page_m));
96014+
96015+ vma_m = pax_find_mirror_vma(vma);
96016+ if (!vma_m)
96017+ return;
96018+
96019+ BUG_ON(!PageLocked(page_m));
96020+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96021+ address_m = address + SEGMEXEC_TASK_SIZE;
96022+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96023+ pte_m = pte_offset_map(pmd_m, address_m);
96024+ ptl_m = pte_lockptr(mm, pmd_m);
96025+ if (ptl != ptl_m) {
96026+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96027+ if (!pte_none(*pte_m))
96028+ goto out;
96029+ }
96030+
96031+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96032+ page_cache_get(page_m);
96033+ page_add_anon_rmap(page_m, vma_m, address_m);
96034+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96035+ set_pte_at(mm, address_m, pte_m, entry_m);
96036+ update_mmu_cache(vma_m, address_m, pte_m);
96037+out:
96038+ if (ptl != ptl_m)
96039+ spin_unlock(ptl_m);
96040+ pte_unmap(pte_m);
96041+ unlock_page(page_m);
96042+}
96043+
96044+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96045+{
96046+ struct mm_struct *mm = vma->vm_mm;
96047+ unsigned long address_m;
96048+ spinlock_t *ptl_m;
96049+ struct vm_area_struct *vma_m;
96050+ pmd_t *pmd_m;
96051+ pte_t *pte_m, entry_m;
96052+
96053+ BUG_ON(!page_m || PageAnon(page_m));
96054+
96055+ vma_m = pax_find_mirror_vma(vma);
96056+ if (!vma_m)
96057+ return;
96058+
96059+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96060+ address_m = address + SEGMEXEC_TASK_SIZE;
96061+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96062+ pte_m = pte_offset_map(pmd_m, address_m);
96063+ ptl_m = pte_lockptr(mm, pmd_m);
96064+ if (ptl != ptl_m) {
96065+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96066+ if (!pte_none(*pte_m))
96067+ goto out;
96068+ }
96069+
96070+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96071+ page_cache_get(page_m);
96072+ page_add_file_rmap(page_m);
96073+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96074+ set_pte_at(mm, address_m, pte_m, entry_m);
96075+ update_mmu_cache(vma_m, address_m, pte_m);
96076+out:
96077+ if (ptl != ptl_m)
96078+ spin_unlock(ptl_m);
96079+ pte_unmap(pte_m);
96080+}
96081+
96082+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96083+{
96084+ struct mm_struct *mm = vma->vm_mm;
96085+ unsigned long address_m;
96086+ spinlock_t *ptl_m;
96087+ struct vm_area_struct *vma_m;
96088+ pmd_t *pmd_m;
96089+ pte_t *pte_m, entry_m;
96090+
96091+ vma_m = pax_find_mirror_vma(vma);
96092+ if (!vma_m)
96093+ return;
96094+
96095+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96096+ address_m = address + SEGMEXEC_TASK_SIZE;
96097+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96098+ pte_m = pte_offset_map(pmd_m, address_m);
96099+ ptl_m = pte_lockptr(mm, pmd_m);
96100+ if (ptl != ptl_m) {
96101+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96102+ if (!pte_none(*pte_m))
96103+ goto out;
96104+ }
96105+
96106+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96107+ set_pte_at(mm, address_m, pte_m, entry_m);
96108+out:
96109+ if (ptl != ptl_m)
96110+ spin_unlock(ptl_m);
96111+ pte_unmap(pte_m);
96112+}
96113+
96114+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96115+{
96116+ struct page *page_m;
96117+ pte_t entry;
96118+
96119+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96120+ goto out;
96121+
96122+ entry = *pte;
96123+ page_m = vm_normal_page(vma, address, entry);
96124+ if (!page_m)
96125+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96126+ else if (PageAnon(page_m)) {
96127+ if (pax_find_mirror_vma(vma)) {
96128+ pte_unmap_unlock(pte, ptl);
96129+ lock_page(page_m);
96130+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96131+ if (pte_same(entry, *pte))
96132+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96133+ else
96134+ unlock_page(page_m);
96135+ }
96136+ } else
96137+ pax_mirror_file_pte(vma, address, page_m, ptl);
96138+
96139+out:
96140+ pte_unmap_unlock(pte, ptl);
96141+}
96142+#endif
96143+
96144 /*
96145 * This routine handles present pages, when users try to write
96146 * to a shared page. It is done by copying the page to a new address
96147@@ -2212,6 +2419,12 @@ gotten:
96148 */
96149 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96150 if (likely(pte_same(*page_table, orig_pte))) {
96151+
96152+#ifdef CONFIG_PAX_SEGMEXEC
96153+ if (pax_find_mirror_vma(vma))
96154+ BUG_ON(!trylock_page(new_page));
96155+#endif
96156+
96157 if (old_page) {
96158 if (!PageAnon(old_page)) {
96159 dec_mm_counter_fast(mm, MM_FILEPAGES);
96160@@ -2265,6 +2478,10 @@ gotten:
96161 page_remove_rmap(old_page);
96162 }
96163
96164+#ifdef CONFIG_PAX_SEGMEXEC
96165+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96166+#endif
96167+
96168 /* Free the old page.. */
96169 new_page = old_page;
96170 ret |= VM_FAULT_WRITE;
96171@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96172 swap_free(entry);
96173 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96174 try_to_free_swap(page);
96175+
96176+#ifdef CONFIG_PAX_SEGMEXEC
96177+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96178+#endif
96179+
96180 unlock_page(page);
96181 if (page != swapcache) {
96182 /*
96183@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96184
96185 /* No need to invalidate - it was non-present before */
96186 update_mmu_cache(vma, address, page_table);
96187+
96188+#ifdef CONFIG_PAX_SEGMEXEC
96189+ pax_mirror_anon_pte(vma, address, page, ptl);
96190+#endif
96191+
96192 unlock:
96193 pte_unmap_unlock(page_table, ptl);
96194 out:
96195@@ -2581,40 +2808,6 @@ out_release:
96196 }
96197
96198 /*
96199- * This is like a special single-page "expand_{down|up}wards()",
96200- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96201- * doesn't hit another vma.
96202- */
96203-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96204-{
96205- address &= PAGE_MASK;
96206- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96207- struct vm_area_struct *prev = vma->vm_prev;
96208-
96209- /*
96210- * Is there a mapping abutting this one below?
96211- *
96212- * That's only ok if it's the same stack mapping
96213- * that has gotten split..
96214- */
96215- if (prev && prev->vm_end == address)
96216- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96217-
96218- return expand_downwards(vma, address - PAGE_SIZE);
96219- }
96220- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96221- struct vm_area_struct *next = vma->vm_next;
96222-
96223- /* As VM_GROWSDOWN but s/below/above/ */
96224- if (next && next->vm_start == address + PAGE_SIZE)
96225- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96226-
96227- return expand_upwards(vma, address + PAGE_SIZE);
96228- }
96229- return 0;
96230-}
96231-
96232-/*
96233 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96234 * but allow concurrent faults), and pte mapped but not yet locked.
96235 * We return with mmap_sem still held, but pte unmapped and unlocked.
96236@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96237 unsigned int flags)
96238 {
96239 struct mem_cgroup *memcg;
96240- struct page *page;
96241+ struct page *page = NULL;
96242 spinlock_t *ptl;
96243 pte_t entry;
96244
96245- pte_unmap(page_table);
96246-
96247- /* Check if we need to add a guard page to the stack */
96248- if (check_stack_guard_page(vma, address) < 0)
96249- return VM_FAULT_SIGSEGV;
96250-
96251- /* Use the zero-page for reads */
96252 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
96253 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96254 vma->vm_page_prot));
96255- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96256+ ptl = pte_lockptr(mm, pmd);
96257+ spin_lock(ptl);
96258 if (!pte_none(*page_table))
96259 goto unlock;
96260 goto setpte;
96261 }
96262
96263 /* Allocate our own private page. */
96264+ pte_unmap(page_table);
96265+
96266 if (unlikely(anon_vma_prepare(vma)))
96267 goto oom;
96268 page = alloc_zeroed_user_highpage_movable(vma, address);
96269@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96270 if (!pte_none(*page_table))
96271 goto release;
96272
96273+#ifdef CONFIG_PAX_SEGMEXEC
96274+ if (pax_find_mirror_vma(vma))
96275+ BUG_ON(!trylock_page(page));
96276+#endif
96277+
96278 inc_mm_counter_fast(mm, MM_ANONPAGES);
96279 page_add_new_anon_rmap(page, vma, address);
96280 mem_cgroup_commit_charge(page, memcg, false);
96281@@ -2677,6 +2871,12 @@ setpte:
96282
96283 /* No need to invalidate - it was non-present before */
96284 update_mmu_cache(vma, address, page_table);
96285+
96286+#ifdef CONFIG_PAX_SEGMEXEC
96287+ if (page)
96288+ pax_mirror_anon_pte(vma, address, page, ptl);
96289+#endif
96290+
96291 unlock:
96292 pte_unmap_unlock(page_table, ptl);
96293 return 0;
96294@@ -2907,6 +3107,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96295 return ret;
96296 }
96297 do_set_pte(vma, address, fault_page, pte, false, false);
96298+
96299+#ifdef CONFIG_PAX_SEGMEXEC
96300+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96301+#endif
96302+
96303 unlock_page(fault_page);
96304 unlock_out:
96305 pte_unmap_unlock(pte, ptl);
96306@@ -2949,7 +3154,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96307 page_cache_release(fault_page);
96308 goto uncharge_out;
96309 }
96310+
96311+#ifdef CONFIG_PAX_SEGMEXEC
96312+ if (pax_find_mirror_vma(vma))
96313+ BUG_ON(!trylock_page(new_page));
96314+#endif
96315+
96316 do_set_pte(vma, address, new_page, pte, true, true);
96317+
96318+#ifdef CONFIG_PAX_SEGMEXEC
96319+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96320+#endif
96321+
96322 mem_cgroup_commit_charge(new_page, memcg, false);
96323 lru_cache_add_active_or_unevictable(new_page, vma);
96324 pte_unmap_unlock(pte, ptl);
96325@@ -2999,6 +3215,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96326 return ret;
96327 }
96328 do_set_pte(vma, address, fault_page, pte, true, false);
96329+
96330+#ifdef CONFIG_PAX_SEGMEXEC
96331+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96332+#endif
96333+
96334 pte_unmap_unlock(pte, ptl);
96335
96336 if (set_page_dirty(fault_page))
96337@@ -3255,6 +3476,12 @@ static int handle_pte_fault(struct mm_struct *mm,
96338 if (flags & FAULT_FLAG_WRITE)
96339 flush_tlb_fix_spurious_fault(vma, address);
96340 }
96341+
96342+#ifdef CONFIG_PAX_SEGMEXEC
96343+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96344+ return 0;
96345+#endif
96346+
96347 unlock:
96348 pte_unmap_unlock(pte, ptl);
96349 return 0;
96350@@ -3274,9 +3501,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96351 pmd_t *pmd;
96352 pte_t *pte;
96353
96354+#ifdef CONFIG_PAX_SEGMEXEC
96355+ struct vm_area_struct *vma_m;
96356+#endif
96357+
96358 if (unlikely(is_vm_hugetlb_page(vma)))
96359 return hugetlb_fault(mm, vma, address, flags);
96360
96361+#ifdef CONFIG_PAX_SEGMEXEC
96362+ vma_m = pax_find_mirror_vma(vma);
96363+ if (vma_m) {
96364+ unsigned long address_m;
96365+ pgd_t *pgd_m;
96366+ pud_t *pud_m;
96367+ pmd_t *pmd_m;
96368+
96369+ if (vma->vm_start > vma_m->vm_start) {
96370+ address_m = address;
96371+ address -= SEGMEXEC_TASK_SIZE;
96372+ vma = vma_m;
96373+ } else
96374+ address_m = address + SEGMEXEC_TASK_SIZE;
96375+
96376+ pgd_m = pgd_offset(mm, address_m);
96377+ pud_m = pud_alloc(mm, pgd_m, address_m);
96378+ if (!pud_m)
96379+ return VM_FAULT_OOM;
96380+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96381+ if (!pmd_m)
96382+ return VM_FAULT_OOM;
96383+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
96384+ return VM_FAULT_OOM;
96385+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96386+ }
96387+#endif
96388+
96389 pgd = pgd_offset(mm, address);
96390 pud = pud_alloc(mm, pgd, address);
96391 if (!pud)
96392@@ -3411,6 +3670,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96393 spin_unlock(&mm->page_table_lock);
96394 return 0;
96395 }
96396+
96397+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96398+{
96399+ pud_t *new = pud_alloc_one(mm, address);
96400+ if (!new)
96401+ return -ENOMEM;
96402+
96403+ smp_wmb(); /* See comment in __pte_alloc */
96404+
96405+ spin_lock(&mm->page_table_lock);
96406+ if (pgd_present(*pgd)) /* Another has populated it */
96407+ pud_free(mm, new);
96408+ else
96409+ pgd_populate_kernel(mm, pgd, new);
96410+ spin_unlock(&mm->page_table_lock);
96411+ return 0;
96412+}
96413 #endif /* __PAGETABLE_PUD_FOLDED */
96414
96415 #ifndef __PAGETABLE_PMD_FOLDED
96416@@ -3441,6 +3717,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
96417 spin_unlock(&mm->page_table_lock);
96418 return 0;
96419 }
96420+
96421+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
96422+{
96423+ pmd_t *new = pmd_alloc_one(mm, address);
96424+ if (!new)
96425+ return -ENOMEM;
96426+
96427+ smp_wmb(); /* See comment in __pte_alloc */
96428+
96429+ spin_lock(&mm->page_table_lock);
96430+#ifndef __ARCH_HAS_4LEVEL_HACK
96431+ if (pud_present(*pud)) /* Another has populated it */
96432+ pmd_free(mm, new);
96433+ else
96434+ pud_populate_kernel(mm, pud, new);
96435+#else
96436+ if (pgd_present(*pud)) /* Another has populated it */
96437+ pmd_free(mm, new);
96438+ else
96439+ pgd_populate_kernel(mm, pud, new);
96440+#endif /* __ARCH_HAS_4LEVEL_HACK */
96441+ spin_unlock(&mm->page_table_lock);
96442+ return 0;
96443+}
96444 #endif /* __PAGETABLE_PMD_FOLDED */
96445
96446 static int __follow_pte(struct mm_struct *mm, unsigned long address,
96447@@ -3550,8 +3850,8 @@ out:
96448 return ret;
96449 }
96450
96451-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96452- void *buf, int len, int write)
96453+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96454+ void *buf, size_t len, int write)
96455 {
96456 resource_size_t phys_addr;
96457 unsigned long prot = 0;
96458@@ -3577,8 +3877,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
96459 * Access another process' address space as given in mm. If non-NULL, use the
96460 * given task for page fault accounting.
96461 */
96462-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96463- unsigned long addr, void *buf, int len, int write)
96464+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96465+ unsigned long addr, void *buf, size_t len, int write)
96466 {
96467 struct vm_area_struct *vma;
96468 void *old_buf = buf;
96469@@ -3586,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96470 down_read(&mm->mmap_sem);
96471 /* ignore errors, just check how much was successfully transferred */
96472 while (len) {
96473- int bytes, ret, offset;
96474+ ssize_t bytes, ret, offset;
96475 void *maddr;
96476 struct page *page = NULL;
96477
96478@@ -3647,8 +3947,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96479 *
96480 * The caller must hold a reference on @mm.
96481 */
96482-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96483- void *buf, int len, int write)
96484+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
96485+ void *buf, size_t len, int write)
96486 {
96487 return __access_remote_vm(NULL, mm, addr, buf, len, write);
96488 }
96489@@ -3658,11 +3958,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96490 * Source/target buffer must be kernel space,
96491 * Do not walk the page table directly, use get_user_pages
96492 */
96493-int access_process_vm(struct task_struct *tsk, unsigned long addr,
96494- void *buf, int len, int write)
96495+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
96496+ void *buf, size_t len, int write)
96497 {
96498 struct mm_struct *mm;
96499- int ret;
96500+ ssize_t ret;
96501
96502 mm = get_task_mm(tsk);
96503 if (!mm)
96504diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96505index 0e0961b..c9143b9 100644
96506--- a/mm/mempolicy.c
96507+++ b/mm/mempolicy.c
96508@@ -744,6 +744,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96509 unsigned long vmstart;
96510 unsigned long vmend;
96511
96512+#ifdef CONFIG_PAX_SEGMEXEC
96513+ struct vm_area_struct *vma_m;
96514+#endif
96515+
96516 vma = find_vma(mm, start);
96517 if (!vma || vma->vm_start > start)
96518 return -EFAULT;
96519@@ -787,6 +791,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96520 err = vma_replace_policy(vma, new_pol);
96521 if (err)
96522 goto out;
96523+
96524+#ifdef CONFIG_PAX_SEGMEXEC
96525+ vma_m = pax_find_mirror_vma(vma);
96526+ if (vma_m) {
96527+ err = vma_replace_policy(vma_m, new_pol);
96528+ if (err)
96529+ goto out;
96530+ }
96531+#endif
96532+
96533 }
96534
96535 out:
96536@@ -1201,6 +1215,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96537
96538 if (end < start)
96539 return -EINVAL;
96540+
96541+#ifdef CONFIG_PAX_SEGMEXEC
96542+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96543+ if (end > SEGMEXEC_TASK_SIZE)
96544+ return -EINVAL;
96545+ } else
96546+#endif
96547+
96548+ if (end > TASK_SIZE)
96549+ return -EINVAL;
96550+
96551 if (end == start)
96552 return 0;
96553
96554@@ -1426,8 +1451,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96555 */
96556 tcred = __task_cred(task);
96557 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96558- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96559- !capable(CAP_SYS_NICE)) {
96560+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96561 rcu_read_unlock();
96562 err = -EPERM;
96563 goto out_put;
96564@@ -1458,6 +1482,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96565 goto out;
96566 }
96567
96568+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96569+ if (mm != current->mm &&
96570+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96571+ mmput(mm);
96572+ err = -EPERM;
96573+ goto out;
96574+ }
96575+#endif
96576+
96577 err = do_migrate_pages(mm, old, new,
96578 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
96579
96580diff --git a/mm/migrate.c b/mm/migrate.c
96581index 344cdf6..07399500 100644
96582--- a/mm/migrate.c
96583+++ b/mm/migrate.c
96584@@ -1503,8 +1503,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96585 */
96586 tcred = __task_cred(task);
96587 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96588- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96589- !capable(CAP_SYS_NICE)) {
96590+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96591 rcu_read_unlock();
96592 err = -EPERM;
96593 goto out;
96594diff --git a/mm/mlock.c b/mm/mlock.c
96595index 73cf098..ab547c7 100644
96596--- a/mm/mlock.c
96597+++ b/mm/mlock.c
96598@@ -14,6 +14,7 @@
96599 #include <linux/pagevec.h>
96600 #include <linux/mempolicy.h>
96601 #include <linux/syscalls.h>
96602+#include <linux/security.h>
96603 #include <linux/sched.h>
96604 #include <linux/export.h>
96605 #include <linux/rmap.h>
96606@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96607 {
96608 unsigned long nstart, end, tmp;
96609 struct vm_area_struct * vma, * prev;
96610- int error;
96611+ int error = 0;
96612
96613 VM_BUG_ON(start & ~PAGE_MASK);
96614 VM_BUG_ON(len != PAGE_ALIGN(len));
96615@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96616 return -EINVAL;
96617 if (end == start)
96618 return 0;
96619+ if (end > TASK_SIZE)
96620+ return -EINVAL;
96621+
96622 vma = find_vma(current->mm, start);
96623 if (!vma || vma->vm_start > start)
96624 return -ENOMEM;
96625@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96626 for (nstart = start ; ; ) {
96627 vm_flags_t newflags;
96628
96629+#ifdef CONFIG_PAX_SEGMEXEC
96630+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96631+ break;
96632+#endif
96633+
96634 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96635
96636 newflags = vma->vm_flags & ~VM_LOCKED;
96637@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96638 locked += current->mm->locked_vm;
96639
96640 /* check against resource limits */
96641+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96642 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96643 error = do_mlock(start, len, 1);
96644
96645@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
96646 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96647 vm_flags_t newflags;
96648
96649+#ifdef CONFIG_PAX_SEGMEXEC
96650+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96651+ break;
96652+#endif
96653+
96654 newflags = vma->vm_flags & ~VM_LOCKED;
96655 if (flags & MCL_CURRENT)
96656 newflags |= VM_LOCKED;
96657@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96658 lock_limit >>= PAGE_SHIFT;
96659
96660 ret = -ENOMEM;
96661+
96662+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96663+
96664 down_write(&current->mm->mmap_sem);
96665-
96666 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96667 capable(CAP_IPC_LOCK))
96668 ret = do_mlockall(flags);
96669diff --git a/mm/mmap.c b/mm/mmap.c
96670index e5cc3ca..bb9333f 100644
96671--- a/mm/mmap.c
96672+++ b/mm/mmap.c
96673@@ -41,6 +41,7 @@
96674 #include <linux/notifier.h>
96675 #include <linux/memory.h>
96676 #include <linux/printk.h>
96677+#include <linux/random.h>
96678
96679 #include <asm/uaccess.h>
96680 #include <asm/cacheflush.h>
96681@@ -57,6 +58,16 @@
96682 #define arch_rebalance_pgtables(addr, len) (addr)
96683 #endif
96684
96685+static inline void verify_mm_writelocked(struct mm_struct *mm)
96686+{
96687+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96688+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96689+ up_read(&mm->mmap_sem);
96690+ BUG();
96691+ }
96692+#endif
96693+}
96694+
96695 static void unmap_region(struct mm_struct *mm,
96696 struct vm_area_struct *vma, struct vm_area_struct *prev,
96697 unsigned long start, unsigned long end);
96698@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
96699 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96700 *
96701 */
96702-pgprot_t protection_map[16] = {
96703+pgprot_t protection_map[16] __read_only = {
96704 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96705 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96706 };
96707
96708-pgprot_t vm_get_page_prot(unsigned long vm_flags)
96709+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
96710 {
96711- return __pgprot(pgprot_val(protection_map[vm_flags &
96712+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96713 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96714 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96715+
96716+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96717+ if (!(__supported_pte_mask & _PAGE_NX) &&
96718+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96719+ (vm_flags & (VM_READ | VM_WRITE)))
96720+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96721+#endif
96722+
96723+ return prot;
96724 }
96725 EXPORT_SYMBOL(vm_get_page_prot);
96726
96727@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
96728 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96729 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
96730 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
96731+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96732 /*
96733 * Make sure vm_committed_as in one cacheline and not cacheline shared with
96734 * other variables. It can be updated by several CPUs frequently.
96735@@ -274,6 +295,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96736 struct vm_area_struct *next = vma->vm_next;
96737
96738 might_sleep();
96739+ BUG_ON(vma->vm_mirror);
96740 if (vma->vm_ops && vma->vm_ops->close)
96741 vma->vm_ops->close(vma);
96742 if (vma->vm_file)
96743@@ -287,6 +309,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
96744
96745 SYSCALL_DEFINE1(brk, unsigned long, brk)
96746 {
96747+ unsigned long rlim;
96748 unsigned long retval;
96749 unsigned long newbrk, oldbrk;
96750 struct mm_struct *mm = current->mm;
96751@@ -317,7 +340,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96752 * segment grow beyond its set limit the in case where the limit is
96753 * not page aligned -Ram Gupta
96754 */
96755- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
96756+ rlim = rlimit(RLIMIT_DATA);
96757+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96758+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
96759+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
96760+ rlim = 4096 * PAGE_SIZE;
96761+#endif
96762+ if (check_data_rlimit(rlim, brk, mm->start_brk,
96763 mm->end_data, mm->start_data))
96764 goto out;
96765
96766@@ -978,6 +1007,12 @@ static int
96767 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96768 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96769 {
96770+
96771+#ifdef CONFIG_PAX_SEGMEXEC
96772+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96773+ return 0;
96774+#endif
96775+
96776 if (is_mergeable_vma(vma, file, vm_flags) &&
96777 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96778 if (vma->vm_pgoff == vm_pgoff)
96779@@ -997,6 +1032,12 @@ static int
96780 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96781 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96782 {
96783+
96784+#ifdef CONFIG_PAX_SEGMEXEC
96785+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96786+ return 0;
96787+#endif
96788+
96789 if (is_mergeable_vma(vma, file, vm_flags) &&
96790 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96791 pgoff_t vm_pglen;
96792@@ -1046,6 +1087,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96793 struct vm_area_struct *area, *next;
96794 int err;
96795
96796+#ifdef CONFIG_PAX_SEGMEXEC
96797+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
96798+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
96799+
96800+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
96801+#endif
96802+
96803 /*
96804 * We later require that vma->vm_flags == vm_flags,
96805 * so this tests vma->vm_flags & VM_SPECIAL, too.
96806@@ -1061,6 +1109,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96807 if (next && next->vm_end == end) /* cases 6, 7, 8 */
96808 next = next->vm_next;
96809
96810+#ifdef CONFIG_PAX_SEGMEXEC
96811+ if (prev)
96812+ prev_m = pax_find_mirror_vma(prev);
96813+ if (area)
96814+ area_m = pax_find_mirror_vma(area);
96815+ if (next)
96816+ next_m = pax_find_mirror_vma(next);
96817+#endif
96818+
96819 /*
96820 * Can it merge with the predecessor?
96821 */
96822@@ -1080,9 +1137,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96823 /* cases 1, 6 */
96824 err = vma_adjust(prev, prev->vm_start,
96825 next->vm_end, prev->vm_pgoff, NULL);
96826- } else /* cases 2, 5, 7 */
96827+
96828+#ifdef CONFIG_PAX_SEGMEXEC
96829+ if (!err && prev_m)
96830+ err = vma_adjust(prev_m, prev_m->vm_start,
96831+ next_m->vm_end, prev_m->vm_pgoff, NULL);
96832+#endif
96833+
96834+ } else { /* cases 2, 5, 7 */
96835 err = vma_adjust(prev, prev->vm_start,
96836 end, prev->vm_pgoff, NULL);
96837+
96838+#ifdef CONFIG_PAX_SEGMEXEC
96839+ if (!err && prev_m)
96840+ err = vma_adjust(prev_m, prev_m->vm_start,
96841+ end_m, prev_m->vm_pgoff, NULL);
96842+#endif
96843+
96844+ }
96845 if (err)
96846 return NULL;
96847 khugepaged_enter_vma_merge(prev, vm_flags);
96848@@ -1096,12 +1168,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96849 mpol_equal(policy, vma_policy(next)) &&
96850 can_vma_merge_before(next, vm_flags,
96851 anon_vma, file, pgoff+pglen)) {
96852- if (prev && addr < prev->vm_end) /* case 4 */
96853+ if (prev && addr < prev->vm_end) { /* case 4 */
96854 err = vma_adjust(prev, prev->vm_start,
96855 addr, prev->vm_pgoff, NULL);
96856- else /* cases 3, 8 */
96857+
96858+#ifdef CONFIG_PAX_SEGMEXEC
96859+ if (!err && prev_m)
96860+ err = vma_adjust(prev_m, prev_m->vm_start,
96861+ addr_m, prev_m->vm_pgoff, NULL);
96862+#endif
96863+
96864+ } else { /* cases 3, 8 */
96865 err = vma_adjust(area, addr, next->vm_end,
96866 next->vm_pgoff - pglen, NULL);
96867+
96868+#ifdef CONFIG_PAX_SEGMEXEC
96869+ if (!err && area_m)
96870+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
96871+ next_m->vm_pgoff - pglen, NULL);
96872+#endif
96873+
96874+ }
96875 if (err)
96876 return NULL;
96877 khugepaged_enter_vma_merge(area, vm_flags);
96878@@ -1210,8 +1297,10 @@ none:
96879 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
96880 struct file *file, long pages)
96881 {
96882- const unsigned long stack_flags
96883- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
96884+
96885+#ifdef CONFIG_PAX_RANDMMAP
96886+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
96887+#endif
96888
96889 mm->total_vm += pages;
96890
96891@@ -1219,7 +1308,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
96892 mm->shared_vm += pages;
96893 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
96894 mm->exec_vm += pages;
96895- } else if (flags & stack_flags)
96896+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
96897 mm->stack_vm += pages;
96898 }
96899 #endif /* CONFIG_PROC_FS */
96900@@ -1249,6 +1338,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
96901 locked += mm->locked_vm;
96902 lock_limit = rlimit(RLIMIT_MEMLOCK);
96903 lock_limit >>= PAGE_SHIFT;
96904+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
96905 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
96906 return -EAGAIN;
96907 }
96908@@ -1275,7 +1365,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96909 * (the exception is when the underlying filesystem is noexec
96910 * mounted, in which case we dont add PROT_EXEC.)
96911 */
96912- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
96913+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
96914 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
96915 prot |= PROT_EXEC;
96916
96917@@ -1301,7 +1391,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96918 /* Obtain the address to map to. we verify (or select) it and ensure
96919 * that it represents a valid section of the address space.
96920 */
96921- addr = get_unmapped_area(file, addr, len, pgoff, flags);
96922+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
96923 if (addr & ~PAGE_MASK)
96924 return addr;
96925
96926@@ -1312,6 +1402,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96927 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
96928 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
96929
96930+#ifdef CONFIG_PAX_MPROTECT
96931+ if (mm->pax_flags & MF_PAX_MPROTECT) {
96932+
96933+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
96934+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
96935+ mm->binfmt->handle_mmap)
96936+ mm->binfmt->handle_mmap(file);
96937+#endif
96938+
96939+#ifndef CONFIG_PAX_MPROTECT_COMPAT
96940+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
96941+ gr_log_rwxmmap(file);
96942+
96943+#ifdef CONFIG_PAX_EMUPLT
96944+ vm_flags &= ~VM_EXEC;
96945+#else
96946+ return -EPERM;
96947+#endif
96948+
96949+ }
96950+
96951+ if (!(vm_flags & VM_EXEC))
96952+ vm_flags &= ~VM_MAYEXEC;
96953+#else
96954+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
96955+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
96956+#endif
96957+ else
96958+ vm_flags &= ~VM_MAYWRITE;
96959+ }
96960+#endif
96961+
96962+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96963+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
96964+ vm_flags &= ~VM_PAGEEXEC;
96965+#endif
96966+
96967 if (flags & MAP_LOCKED)
96968 if (!can_do_mlock())
96969 return -EPERM;
96970@@ -1399,6 +1526,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96971 vm_flags |= VM_NORESERVE;
96972 }
96973
96974+ if (!gr_acl_handle_mmap(file, prot))
96975+ return -EACCES;
96976+
96977 addr = mmap_region(file, addr, len, vm_flags, pgoff);
96978 if (!IS_ERR_VALUE(addr) &&
96979 ((vm_flags & VM_LOCKED) ||
96980@@ -1492,7 +1622,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
96981 vm_flags_t vm_flags = vma->vm_flags;
96982
96983 /* If it was private or non-writable, the write bit is already clear */
96984- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
96985+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
96986 return 0;
96987
96988 /* The backer wishes to know when pages are first written to? */
96989@@ -1543,7 +1673,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
96990 struct rb_node **rb_link, *rb_parent;
96991 unsigned long charged = 0;
96992
96993+#ifdef CONFIG_PAX_SEGMEXEC
96994+ struct vm_area_struct *vma_m = NULL;
96995+#endif
96996+
96997+ /*
96998+ * mm->mmap_sem is required to protect against another thread
96999+ * changing the mappings in case we sleep.
97000+ */
97001+ verify_mm_writelocked(mm);
97002+
97003 /* Check against address space limit. */
97004+
97005+#ifdef CONFIG_PAX_RANDMMAP
97006+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97007+#endif
97008+
97009 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97010 unsigned long nr_pages;
97011
97012@@ -1562,11 +1707,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97013
97014 /* Clear old maps */
97015 error = -ENOMEM;
97016-munmap_back:
97017 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97018 if (do_munmap(mm, addr, len))
97019 return -ENOMEM;
97020- goto munmap_back;
97021+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97022 }
97023
97024 /*
97025@@ -1597,6 +1741,16 @@ munmap_back:
97026 goto unacct_error;
97027 }
97028
97029+#ifdef CONFIG_PAX_SEGMEXEC
97030+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97031+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97032+ if (!vma_m) {
97033+ error = -ENOMEM;
97034+ goto free_vma;
97035+ }
97036+ }
97037+#endif
97038+
97039 vma->vm_mm = mm;
97040 vma->vm_start = addr;
97041 vma->vm_end = addr + len;
97042@@ -1627,6 +1781,13 @@ munmap_back:
97043 if (error)
97044 goto unmap_and_free_vma;
97045
97046+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97047+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97048+ vma->vm_flags |= VM_PAGEEXEC;
97049+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97050+ }
97051+#endif
97052+
97053 /* Can addr have changed??
97054 *
97055 * Answer: Yes, several device drivers can do it in their
97056@@ -1645,6 +1806,12 @@ munmap_back:
97057 }
97058
97059 vma_link(mm, vma, prev, rb_link, rb_parent);
97060+
97061+#ifdef CONFIG_PAX_SEGMEXEC
97062+ if (vma_m)
97063+ BUG_ON(pax_mirror_vma(vma_m, vma));
97064+#endif
97065+
97066 /* Once vma denies write, undo our temporary denial count */
97067 if (file) {
97068 if (vm_flags & VM_SHARED)
97069@@ -1657,6 +1824,7 @@ out:
97070 perf_event_mmap(vma);
97071
97072 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97073+ track_exec_limit(mm, addr, addr + len, vm_flags);
97074 if (vm_flags & VM_LOCKED) {
97075 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97076 vma == get_gate_vma(current->mm)))
97077@@ -1694,6 +1862,12 @@ allow_write_and_free_vma:
97078 if (vm_flags & VM_DENYWRITE)
97079 allow_write_access(file);
97080 free_vma:
97081+
97082+#ifdef CONFIG_PAX_SEGMEXEC
97083+ if (vma_m)
97084+ kmem_cache_free(vm_area_cachep, vma_m);
97085+#endif
97086+
97087 kmem_cache_free(vm_area_cachep, vma);
97088 unacct_error:
97089 if (charged)
97090@@ -1701,7 +1875,63 @@ unacct_error:
97091 return error;
97092 }
97093
97094-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97095+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97096+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97097+{
97098+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97099+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97100+
97101+ return 0;
97102+}
97103+#endif
97104+
97105+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97106+{
97107+ if (!vma) {
97108+#ifdef CONFIG_STACK_GROWSUP
97109+ if (addr > sysctl_heap_stack_gap)
97110+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97111+ else
97112+ vma = find_vma(current->mm, 0);
97113+ if (vma && (vma->vm_flags & VM_GROWSUP))
97114+ return false;
97115+#endif
97116+ return true;
97117+ }
97118+
97119+ if (addr + len > vma->vm_start)
97120+ return false;
97121+
97122+ if (vma->vm_flags & VM_GROWSDOWN)
97123+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97124+#ifdef CONFIG_STACK_GROWSUP
97125+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97126+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97127+#endif
97128+ else if (offset)
97129+ return offset <= vma->vm_start - addr - len;
97130+
97131+ return true;
97132+}
97133+
97134+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
97135+{
97136+ if (vma->vm_start < len)
97137+ return -ENOMEM;
97138+
97139+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
97140+ if (offset <= vma->vm_start - len)
97141+ return vma->vm_start - len - offset;
97142+ else
97143+ return -ENOMEM;
97144+ }
97145+
97146+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97147+ return vma->vm_start - len - sysctl_heap_stack_gap;
97148+ return -ENOMEM;
97149+}
97150+
97151+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
97152 {
97153 /*
97154 * We implement the search by looking for an rbtree node that
97155@@ -1749,11 +1979,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97156 }
97157 }
97158
97159- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
97160+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
97161 check_current:
97162 /* Check if current node has a suitable gap */
97163 if (gap_start > high_limit)
97164 return -ENOMEM;
97165+
97166+ if (gap_end - gap_start > info->threadstack_offset)
97167+ gap_start += info->threadstack_offset;
97168+ else
97169+ gap_start = gap_end;
97170+
97171+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97172+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97173+ gap_start += sysctl_heap_stack_gap;
97174+ else
97175+ gap_start = gap_end;
97176+ }
97177+ if (vma->vm_flags & VM_GROWSDOWN) {
97178+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97179+ gap_end -= sysctl_heap_stack_gap;
97180+ else
97181+ gap_end = gap_start;
97182+ }
97183 if (gap_end >= low_limit && gap_end - gap_start >= length)
97184 goto found;
97185
97186@@ -1803,7 +2051,7 @@ found:
97187 return gap_start;
97188 }
97189
97190-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
97191+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
97192 {
97193 struct mm_struct *mm = current->mm;
97194 struct vm_area_struct *vma;
97195@@ -1857,6 +2105,24 @@ check_current:
97196 gap_end = vma->vm_start;
97197 if (gap_end < low_limit)
97198 return -ENOMEM;
97199+
97200+ if (gap_end - gap_start > info->threadstack_offset)
97201+ gap_end -= info->threadstack_offset;
97202+ else
97203+ gap_end = gap_start;
97204+
97205+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97206+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97207+ gap_start += sysctl_heap_stack_gap;
97208+ else
97209+ gap_start = gap_end;
97210+ }
97211+ if (vma->vm_flags & VM_GROWSDOWN) {
97212+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97213+ gap_end -= sysctl_heap_stack_gap;
97214+ else
97215+ gap_end = gap_start;
97216+ }
97217 if (gap_start <= high_limit && gap_end - gap_start >= length)
97218 goto found;
97219
97220@@ -1920,6 +2186,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97221 struct mm_struct *mm = current->mm;
97222 struct vm_area_struct *vma;
97223 struct vm_unmapped_area_info info;
97224+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97225
97226 if (len > TASK_SIZE - mmap_min_addr)
97227 return -ENOMEM;
97228@@ -1927,11 +2194,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97229 if (flags & MAP_FIXED)
97230 return addr;
97231
97232+#ifdef CONFIG_PAX_RANDMMAP
97233+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97234+#endif
97235+
97236 if (addr) {
97237 addr = PAGE_ALIGN(addr);
97238 vma = find_vma(mm, addr);
97239 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97240- (!vma || addr + len <= vma->vm_start))
97241+ check_heap_stack_gap(vma, addr, len, offset))
97242 return addr;
97243 }
97244
97245@@ -1940,6 +2211,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97246 info.low_limit = mm->mmap_base;
97247 info.high_limit = TASK_SIZE;
97248 info.align_mask = 0;
97249+ info.threadstack_offset = offset;
97250 return vm_unmapped_area(&info);
97251 }
97252 #endif
97253@@ -1958,6 +2230,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97254 struct mm_struct *mm = current->mm;
97255 unsigned long addr = addr0;
97256 struct vm_unmapped_area_info info;
97257+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97258
97259 /* requested length too big for entire address space */
97260 if (len > TASK_SIZE - mmap_min_addr)
97261@@ -1966,12 +2239,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97262 if (flags & MAP_FIXED)
97263 return addr;
97264
97265+#ifdef CONFIG_PAX_RANDMMAP
97266+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97267+#endif
97268+
97269 /* requesting a specific address */
97270 if (addr) {
97271 addr = PAGE_ALIGN(addr);
97272 vma = find_vma(mm, addr);
97273 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97274- (!vma || addr + len <= vma->vm_start))
97275+ check_heap_stack_gap(vma, addr, len, offset))
97276 return addr;
97277 }
97278
97279@@ -1980,6 +2257,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97280 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
97281 info.high_limit = mm->mmap_base;
97282 info.align_mask = 0;
97283+ info.threadstack_offset = offset;
97284 addr = vm_unmapped_area(&info);
97285
97286 /*
97287@@ -1992,6 +2270,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97288 VM_BUG_ON(addr != -ENOMEM);
97289 info.flags = 0;
97290 info.low_limit = TASK_UNMAPPED_BASE;
97291+
97292+#ifdef CONFIG_PAX_RANDMMAP
97293+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97294+ info.low_limit += mm->delta_mmap;
97295+#endif
97296+
97297 info.high_limit = TASK_SIZE;
97298 addr = vm_unmapped_area(&info);
97299 }
97300@@ -2092,6 +2376,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
97301 return vma;
97302 }
97303
97304+#ifdef CONFIG_PAX_SEGMEXEC
97305+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97306+{
97307+ struct vm_area_struct *vma_m;
97308+
97309+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97310+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97311+ BUG_ON(vma->vm_mirror);
97312+ return NULL;
97313+ }
97314+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97315+ vma_m = vma->vm_mirror;
97316+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97317+ BUG_ON(vma->vm_file != vma_m->vm_file);
97318+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97319+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
97320+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
97321+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
97322+ return vma_m;
97323+}
97324+#endif
97325+
97326 /*
97327 * Verify that the stack growth is acceptable and
97328 * update accounting. This is shared with both the
97329@@ -2109,8 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97330
97331 /* Stack limit test */
97332 actual_size = size;
97333- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
97334- actual_size -= PAGE_SIZE;
97335+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
97336 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
97337 return -ENOMEM;
97338
97339@@ -2121,6 +2426,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97340 locked = mm->locked_vm + grow;
97341 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
97342 limit >>= PAGE_SHIFT;
97343+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97344 if (locked > limit && !capable(CAP_IPC_LOCK))
97345 return -ENOMEM;
97346 }
97347@@ -2150,37 +2456,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97348 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97349 * vma is the last one with address > vma->vm_end. Have to extend vma.
97350 */
97351+#ifndef CONFIG_IA64
97352+static
97353+#endif
97354 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97355 {
97356 int error;
97357+ bool locknext;
97358
97359 if (!(vma->vm_flags & VM_GROWSUP))
97360 return -EFAULT;
97361
97362+ /* Also guard against wrapping around to address 0. */
97363+ if (address < PAGE_ALIGN(address+1))
97364+ address = PAGE_ALIGN(address+1);
97365+ else
97366+ return -ENOMEM;
97367+
97368 /*
97369 * We must make sure the anon_vma is allocated
97370 * so that the anon_vma locking is not a noop.
97371 */
97372 if (unlikely(anon_vma_prepare(vma)))
97373 return -ENOMEM;
97374+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97375+ if (locknext && anon_vma_prepare(vma->vm_next))
97376+ return -ENOMEM;
97377 vma_lock_anon_vma(vma);
97378+ if (locknext)
97379+ vma_lock_anon_vma(vma->vm_next);
97380
97381 /*
97382 * vma->vm_start/vm_end cannot change under us because the caller
97383 * is required to hold the mmap_sem in read mode. We need the
97384- * anon_vma lock to serialize against concurrent expand_stacks.
97385- * Also guard against wrapping around to address 0.
97386+ * anon_vma locks to serialize against concurrent expand_stacks
97387+ * and expand_upwards.
97388 */
97389- if (address < PAGE_ALIGN(address+4))
97390- address = PAGE_ALIGN(address+4);
97391- else {
97392- vma_unlock_anon_vma(vma);
97393- return -ENOMEM;
97394- }
97395 error = 0;
97396
97397 /* Somebody else might have raced and expanded it already */
97398- if (address > vma->vm_end) {
97399+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97400+ error = -ENOMEM;
97401+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97402 unsigned long size, grow;
97403
97404 size = address - vma->vm_start;
97405@@ -2215,6 +2532,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97406 }
97407 }
97408 }
97409+ if (locknext)
97410+ vma_unlock_anon_vma(vma->vm_next);
97411 vma_unlock_anon_vma(vma);
97412 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97413 validate_mm(vma->vm_mm);
97414@@ -2229,6 +2548,8 @@ int expand_downwards(struct vm_area_struct *vma,
97415 unsigned long address)
97416 {
97417 int error;
97418+ bool lockprev = false;
97419+ struct vm_area_struct *prev;
97420
97421 /*
97422 * We must make sure the anon_vma is allocated
97423@@ -2242,6 +2563,15 @@ int expand_downwards(struct vm_area_struct *vma,
97424 if (error)
97425 return error;
97426
97427+ prev = vma->vm_prev;
97428+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97429+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97430+#endif
97431+ if (lockprev && anon_vma_prepare(prev))
97432+ return -ENOMEM;
97433+ if (lockprev)
97434+ vma_lock_anon_vma(prev);
97435+
97436 vma_lock_anon_vma(vma);
97437
97438 /*
97439@@ -2251,9 +2581,17 @@ int expand_downwards(struct vm_area_struct *vma,
97440 */
97441
97442 /* Somebody else might have raced and expanded it already */
97443- if (address < vma->vm_start) {
97444+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97445+ error = -ENOMEM;
97446+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97447 unsigned long size, grow;
97448
97449+#ifdef CONFIG_PAX_SEGMEXEC
97450+ struct vm_area_struct *vma_m;
97451+
97452+ vma_m = pax_find_mirror_vma(vma);
97453+#endif
97454+
97455 size = vma->vm_end - address;
97456 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97457
97458@@ -2278,13 +2616,27 @@ int expand_downwards(struct vm_area_struct *vma,
97459 vma->vm_pgoff -= grow;
97460 anon_vma_interval_tree_post_update_vma(vma);
97461 vma_gap_update(vma);
97462+
97463+#ifdef CONFIG_PAX_SEGMEXEC
97464+ if (vma_m) {
97465+ anon_vma_interval_tree_pre_update_vma(vma_m);
97466+ vma_m->vm_start -= grow << PAGE_SHIFT;
97467+ vma_m->vm_pgoff -= grow;
97468+ anon_vma_interval_tree_post_update_vma(vma_m);
97469+ vma_gap_update(vma_m);
97470+ }
97471+#endif
97472+
97473 spin_unlock(&vma->vm_mm->page_table_lock);
97474
97475+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97476 perf_event_mmap(vma);
97477 }
97478 }
97479 }
97480 vma_unlock_anon_vma(vma);
97481+ if (lockprev)
97482+ vma_unlock_anon_vma(prev);
97483 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97484 validate_mm(vma->vm_mm);
97485 return error;
97486@@ -2384,6 +2736,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97487 do {
97488 long nrpages = vma_pages(vma);
97489
97490+#ifdef CONFIG_PAX_SEGMEXEC
97491+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97492+ vma = remove_vma(vma);
97493+ continue;
97494+ }
97495+#endif
97496+
97497 if (vma->vm_flags & VM_ACCOUNT)
97498 nr_accounted += nrpages;
97499 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97500@@ -2428,6 +2787,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97501 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97502 vma->vm_prev = NULL;
97503 do {
97504+
97505+#ifdef CONFIG_PAX_SEGMEXEC
97506+ if (vma->vm_mirror) {
97507+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97508+ vma->vm_mirror->vm_mirror = NULL;
97509+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97510+ vma->vm_mirror = NULL;
97511+ }
97512+#endif
97513+
97514 vma_rb_erase(vma, &mm->mm_rb);
97515 mm->map_count--;
97516 tail_vma = vma;
97517@@ -2455,14 +2824,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97518 struct vm_area_struct *new;
97519 int err = -ENOMEM;
97520
97521+#ifdef CONFIG_PAX_SEGMEXEC
97522+ struct vm_area_struct *vma_m, *new_m = NULL;
97523+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97524+#endif
97525+
97526 if (is_vm_hugetlb_page(vma) && (addr &
97527 ~(huge_page_mask(hstate_vma(vma)))))
97528 return -EINVAL;
97529
97530+#ifdef CONFIG_PAX_SEGMEXEC
97531+ vma_m = pax_find_mirror_vma(vma);
97532+#endif
97533+
97534 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97535 if (!new)
97536 goto out_err;
97537
97538+#ifdef CONFIG_PAX_SEGMEXEC
97539+ if (vma_m) {
97540+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97541+ if (!new_m) {
97542+ kmem_cache_free(vm_area_cachep, new);
97543+ goto out_err;
97544+ }
97545+ }
97546+#endif
97547+
97548 /* most fields are the same, copy all, and then fixup */
97549 *new = *vma;
97550
97551@@ -2475,6 +2863,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97552 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97553 }
97554
97555+#ifdef CONFIG_PAX_SEGMEXEC
97556+ if (vma_m) {
97557+ *new_m = *vma_m;
97558+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
97559+ new_m->vm_mirror = new;
97560+ new->vm_mirror = new_m;
97561+
97562+ if (new_below)
97563+ new_m->vm_end = addr_m;
97564+ else {
97565+ new_m->vm_start = addr_m;
97566+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97567+ }
97568+ }
97569+#endif
97570+
97571 err = vma_dup_policy(vma, new);
97572 if (err)
97573 goto out_free_vma;
97574@@ -2495,6 +2899,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97575 else
97576 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97577
97578+#ifdef CONFIG_PAX_SEGMEXEC
97579+ if (!err && vma_m) {
97580+ struct mempolicy *pol = vma_policy(new);
97581+
97582+ if (anon_vma_clone(new_m, vma_m))
97583+ goto out_free_mpol;
97584+
97585+ mpol_get(pol);
97586+ set_vma_policy(new_m, pol);
97587+
97588+ if (new_m->vm_file)
97589+ get_file(new_m->vm_file);
97590+
97591+ if (new_m->vm_ops && new_m->vm_ops->open)
97592+ new_m->vm_ops->open(new_m);
97593+
97594+ if (new_below)
97595+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97596+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97597+ else
97598+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97599+
97600+ if (err) {
97601+ if (new_m->vm_ops && new_m->vm_ops->close)
97602+ new_m->vm_ops->close(new_m);
97603+ if (new_m->vm_file)
97604+ fput(new_m->vm_file);
97605+ mpol_put(pol);
97606+ }
97607+ }
97608+#endif
97609+
97610 /* Success. */
97611 if (!err)
97612 return 0;
97613@@ -2504,10 +2940,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97614 new->vm_ops->close(new);
97615 if (new->vm_file)
97616 fput(new->vm_file);
97617- unlink_anon_vmas(new);
97618 out_free_mpol:
97619 mpol_put(vma_policy(new));
97620 out_free_vma:
97621+
97622+#ifdef CONFIG_PAX_SEGMEXEC
97623+ if (new_m) {
97624+ unlink_anon_vmas(new_m);
97625+ kmem_cache_free(vm_area_cachep, new_m);
97626+ }
97627+#endif
97628+
97629+ unlink_anon_vmas(new);
97630 kmem_cache_free(vm_area_cachep, new);
97631 out_err:
97632 return err;
97633@@ -2520,6 +2964,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97634 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97635 unsigned long addr, int new_below)
97636 {
97637+
97638+#ifdef CONFIG_PAX_SEGMEXEC
97639+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97640+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97641+ if (mm->map_count >= sysctl_max_map_count-1)
97642+ return -ENOMEM;
97643+ } else
97644+#endif
97645+
97646 if (mm->map_count >= sysctl_max_map_count)
97647 return -ENOMEM;
97648
97649@@ -2531,11 +2984,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97650 * work. This now handles partial unmappings.
97651 * Jeremy Fitzhardinge <jeremy@goop.org>
97652 */
97653+#ifdef CONFIG_PAX_SEGMEXEC
97654 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97655 {
97656+ int ret = __do_munmap(mm, start, len);
97657+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97658+ return ret;
97659+
97660+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97661+}
97662+
97663+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97664+#else
97665+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97666+#endif
97667+{
97668 unsigned long end;
97669 struct vm_area_struct *vma, *prev, *last;
97670
97671+ /*
97672+ * mm->mmap_sem is required to protect against another thread
97673+ * changing the mappings in case we sleep.
97674+ */
97675+ verify_mm_writelocked(mm);
97676+
97677 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97678 return -EINVAL;
97679
97680@@ -2613,6 +3085,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97681 /* Fix up all other VM information */
97682 remove_vma_list(mm, vma);
97683
97684+ track_exec_limit(mm, start, end, 0UL);
97685+
97686 return 0;
97687 }
97688
97689@@ -2621,6 +3095,13 @@ int vm_munmap(unsigned long start, size_t len)
97690 int ret;
97691 struct mm_struct *mm = current->mm;
97692
97693+
97694+#ifdef CONFIG_PAX_SEGMEXEC
97695+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97696+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
97697+ return -EINVAL;
97698+#endif
97699+
97700 down_write(&mm->mmap_sem);
97701 ret = do_munmap(mm, start, len);
97702 up_write(&mm->mmap_sem);
97703@@ -2634,16 +3115,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97704 return vm_munmap(addr, len);
97705 }
97706
97707-static inline void verify_mm_writelocked(struct mm_struct *mm)
97708-{
97709-#ifdef CONFIG_DEBUG_VM
97710- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97711- WARN_ON(1);
97712- up_read(&mm->mmap_sem);
97713- }
97714-#endif
97715-}
97716-
97717 /*
97718 * this is really a simplified "do_mmap". it only handles
97719 * anonymous maps. eventually we may be able to do some
97720@@ -2657,6 +3128,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97721 struct rb_node **rb_link, *rb_parent;
97722 pgoff_t pgoff = addr >> PAGE_SHIFT;
97723 int error;
97724+ unsigned long charged;
97725
97726 len = PAGE_ALIGN(len);
97727 if (!len)
97728@@ -2664,10 +3136,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97729
97730 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97731
97732+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97733+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97734+ flags &= ~VM_EXEC;
97735+
97736+#ifdef CONFIG_PAX_MPROTECT
97737+ if (mm->pax_flags & MF_PAX_MPROTECT)
97738+ flags &= ~VM_MAYEXEC;
97739+#endif
97740+
97741+ }
97742+#endif
97743+
97744 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97745 if (error & ~PAGE_MASK)
97746 return error;
97747
97748+ charged = len >> PAGE_SHIFT;
97749+
97750 error = mlock_future_check(mm, mm->def_flags, len);
97751 if (error)
97752 return error;
97753@@ -2681,21 +3167,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97754 /*
97755 * Clear old maps. this also does some error checking for us
97756 */
97757- munmap_back:
97758 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97759 if (do_munmap(mm, addr, len))
97760 return -ENOMEM;
97761- goto munmap_back;
97762+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97763 }
97764
97765 /* Check against address space limits *after* clearing old maps... */
97766- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97767+ if (!may_expand_vm(mm, charged))
97768 return -ENOMEM;
97769
97770 if (mm->map_count > sysctl_max_map_count)
97771 return -ENOMEM;
97772
97773- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
97774+ if (security_vm_enough_memory_mm(mm, charged))
97775 return -ENOMEM;
97776
97777 /* Can we just expand an old private anonymous mapping? */
97778@@ -2709,7 +3194,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97779 */
97780 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97781 if (!vma) {
97782- vm_unacct_memory(len >> PAGE_SHIFT);
97783+ vm_unacct_memory(charged);
97784 return -ENOMEM;
97785 }
97786
97787@@ -2723,10 +3208,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97788 vma_link(mm, vma, prev, rb_link, rb_parent);
97789 out:
97790 perf_event_mmap(vma);
97791- mm->total_vm += len >> PAGE_SHIFT;
97792+ mm->total_vm += charged;
97793 if (flags & VM_LOCKED)
97794- mm->locked_vm += (len >> PAGE_SHIFT);
97795+ mm->locked_vm += charged;
97796 vma->vm_flags |= VM_SOFTDIRTY;
97797+ track_exec_limit(mm, addr, addr + len, flags);
97798 return addr;
97799 }
97800
97801@@ -2788,6 +3274,7 @@ void exit_mmap(struct mm_struct *mm)
97802 while (vma) {
97803 if (vma->vm_flags & VM_ACCOUNT)
97804 nr_accounted += vma_pages(vma);
97805+ vma->vm_mirror = NULL;
97806 vma = remove_vma(vma);
97807 }
97808 vm_unacct_memory(nr_accounted);
97809@@ -2805,6 +3292,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
97810 struct vm_area_struct *prev;
97811 struct rb_node **rb_link, *rb_parent;
97812
97813+#ifdef CONFIG_PAX_SEGMEXEC
97814+ struct vm_area_struct *vma_m = NULL;
97815+#endif
97816+
97817+ if (security_mmap_addr(vma->vm_start))
97818+ return -EPERM;
97819+
97820 /*
97821 * The vm_pgoff of a purely anonymous vma should be irrelevant
97822 * until its first write fault, when page's anon_vma and index
97823@@ -2828,7 +3322,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
97824 security_vm_enough_memory_mm(mm, vma_pages(vma)))
97825 return -ENOMEM;
97826
97827+#ifdef CONFIG_PAX_SEGMEXEC
97828+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
97829+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97830+ if (!vma_m)
97831+ return -ENOMEM;
97832+ }
97833+#endif
97834+
97835 vma_link(mm, vma, prev, rb_link, rb_parent);
97836+
97837+#ifdef CONFIG_PAX_SEGMEXEC
97838+ if (vma_m)
97839+ BUG_ON(pax_mirror_vma(vma_m, vma));
97840+#endif
97841+
97842 return 0;
97843 }
97844
97845@@ -2847,6 +3355,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97846 struct rb_node **rb_link, *rb_parent;
97847 bool faulted_in_anon_vma = true;
97848
97849+ BUG_ON(vma->vm_mirror);
97850+
97851 /*
97852 * If anonymous vma has not yet been faulted, update new pgoff
97853 * to match new location, to increase its chance of merging.
97854@@ -2911,6 +3421,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97855 return NULL;
97856 }
97857
97858+#ifdef CONFIG_PAX_SEGMEXEC
97859+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
97860+{
97861+ struct vm_area_struct *prev_m;
97862+ struct rb_node **rb_link_m, *rb_parent_m;
97863+ struct mempolicy *pol_m;
97864+
97865+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
97866+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
97867+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
97868+ *vma_m = *vma;
97869+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
97870+ if (anon_vma_clone(vma_m, vma))
97871+ return -ENOMEM;
97872+ pol_m = vma_policy(vma_m);
97873+ mpol_get(pol_m);
97874+ set_vma_policy(vma_m, pol_m);
97875+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
97876+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
97877+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
97878+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
97879+ if (vma_m->vm_file)
97880+ get_file(vma_m->vm_file);
97881+ if (vma_m->vm_ops && vma_m->vm_ops->open)
97882+ vma_m->vm_ops->open(vma_m);
97883+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
97884+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
97885+ vma_m->vm_mirror = vma;
97886+ vma->vm_mirror = vma_m;
97887+ return 0;
97888+}
97889+#endif
97890+
97891 /*
97892 * Return true if the calling process may expand its vm space by the passed
97893 * number of pages
97894@@ -2922,6 +3465,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
97895
97896 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
97897
97898+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
97899 if (cur + npages > lim)
97900 return 0;
97901 return 1;
97902@@ -3004,6 +3548,22 @@ static struct vm_area_struct *__install_special_mapping(
97903 vma->vm_start = addr;
97904 vma->vm_end = addr + len;
97905
97906+#ifdef CONFIG_PAX_MPROTECT
97907+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97908+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97909+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
97910+ return ERR_PTR(-EPERM);
97911+ if (!(vm_flags & VM_EXEC))
97912+ vm_flags &= ~VM_MAYEXEC;
97913+#else
97914+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97915+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97916+#endif
97917+ else
97918+ vm_flags &= ~VM_MAYWRITE;
97919+ }
97920+#endif
97921+
97922 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
97923 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97924
97925diff --git a/mm/mprotect.c b/mm/mprotect.c
97926index ace9345..63320dc 100644
97927--- a/mm/mprotect.c
97928+++ b/mm/mprotect.c
97929@@ -24,10 +24,18 @@
97930 #include <linux/migrate.h>
97931 #include <linux/perf_event.h>
97932 #include <linux/ksm.h>
97933+#include <linux/sched/sysctl.h>
97934+
97935+#ifdef CONFIG_PAX_MPROTECT
97936+#include <linux/elf.h>
97937+#include <linux/binfmts.h>
97938+#endif
97939+
97940 #include <asm/uaccess.h>
97941 #include <asm/pgtable.h>
97942 #include <asm/cacheflush.h>
97943 #include <asm/tlbflush.h>
97944+#include <asm/mmu_context.h>
97945
97946 /*
97947 * For a prot_numa update we only hold mmap_sem for read so there is a
97948@@ -251,6 +259,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
97949 return pages;
97950 }
97951
97952+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
97953+/* called while holding the mmap semaphor for writing except stack expansion */
97954+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
97955+{
97956+ unsigned long oldlimit, newlimit = 0UL;
97957+
97958+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
97959+ return;
97960+
97961+ spin_lock(&mm->page_table_lock);
97962+ oldlimit = mm->context.user_cs_limit;
97963+ if ((prot & VM_EXEC) && oldlimit < end)
97964+ /* USER_CS limit moved up */
97965+ newlimit = end;
97966+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
97967+ /* USER_CS limit moved down */
97968+ newlimit = start;
97969+
97970+ if (newlimit) {
97971+ mm->context.user_cs_limit = newlimit;
97972+
97973+#ifdef CONFIG_SMP
97974+ wmb();
97975+ cpus_clear(mm->context.cpu_user_cs_mask);
97976+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
97977+#endif
97978+
97979+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
97980+ }
97981+ spin_unlock(&mm->page_table_lock);
97982+ if (newlimit == end) {
97983+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
97984+
97985+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
97986+ if (is_vm_hugetlb_page(vma))
97987+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
97988+ else
97989+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
97990+ }
97991+}
97992+#endif
97993+
97994 int
97995 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
97996 unsigned long start, unsigned long end, unsigned long newflags)
97997@@ -263,11 +313,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
97998 int error;
97999 int dirty_accountable = 0;
98000
98001+#ifdef CONFIG_PAX_SEGMEXEC
98002+ struct vm_area_struct *vma_m = NULL;
98003+ unsigned long start_m, end_m;
98004+
98005+ start_m = start + SEGMEXEC_TASK_SIZE;
98006+ end_m = end + SEGMEXEC_TASK_SIZE;
98007+#endif
98008+
98009 if (newflags == oldflags) {
98010 *pprev = vma;
98011 return 0;
98012 }
98013
98014+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98015+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98016+
98017+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98018+ return -ENOMEM;
98019+
98020+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98021+ return -ENOMEM;
98022+ }
98023+
98024 /*
98025 * If we make a private mapping writable we increase our commit;
98026 * but (without finer accounting) cannot reduce our commit if we
98027@@ -284,6 +352,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98028 }
98029 }
98030
98031+#ifdef CONFIG_PAX_SEGMEXEC
98032+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98033+ if (start != vma->vm_start) {
98034+ error = split_vma(mm, vma, start, 1);
98035+ if (error)
98036+ goto fail;
98037+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98038+ *pprev = (*pprev)->vm_next;
98039+ }
98040+
98041+ if (end != vma->vm_end) {
98042+ error = split_vma(mm, vma, end, 0);
98043+ if (error)
98044+ goto fail;
98045+ }
98046+
98047+ if (pax_find_mirror_vma(vma)) {
98048+ error = __do_munmap(mm, start_m, end_m - start_m);
98049+ if (error)
98050+ goto fail;
98051+ } else {
98052+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98053+ if (!vma_m) {
98054+ error = -ENOMEM;
98055+ goto fail;
98056+ }
98057+ vma->vm_flags = newflags;
98058+ error = pax_mirror_vma(vma_m, vma);
98059+ if (error) {
98060+ vma->vm_flags = oldflags;
98061+ goto fail;
98062+ }
98063+ }
98064+ }
98065+#endif
98066+
98067 /*
98068 * First try to merge with previous and/or next vma.
98069 */
98070@@ -314,7 +418,19 @@ success:
98071 * vm_flags and vm_page_prot are protected by the mmap_sem
98072 * held in write mode.
98073 */
98074+
98075+#ifdef CONFIG_PAX_SEGMEXEC
98076+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98077+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98078+#endif
98079+
98080 vma->vm_flags = newflags;
98081+
98082+#ifdef CONFIG_PAX_MPROTECT
98083+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98084+ mm->binfmt->handle_mprotect(vma, newflags);
98085+#endif
98086+
98087 dirty_accountable = vma_wants_writenotify(vma);
98088 vma_set_page_prot(vma);
98089
98090@@ -350,6 +466,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98091 end = start + len;
98092 if (end <= start)
98093 return -ENOMEM;
98094+
98095+#ifdef CONFIG_PAX_SEGMEXEC
98096+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98097+ if (end > SEGMEXEC_TASK_SIZE)
98098+ return -EINVAL;
98099+ } else
98100+#endif
98101+
98102+ if (end > TASK_SIZE)
98103+ return -EINVAL;
98104+
98105 if (!arch_validate_prot(prot))
98106 return -EINVAL;
98107
98108@@ -357,7 +484,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98109 /*
98110 * Does the application expect PROT_READ to imply PROT_EXEC:
98111 */
98112- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98113+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98114 prot |= PROT_EXEC;
98115
98116 vm_flags = calc_vm_prot_bits(prot);
98117@@ -389,6 +516,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98118 if (start > vma->vm_start)
98119 prev = vma;
98120
98121+#ifdef CONFIG_PAX_MPROTECT
98122+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98123+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98124+#endif
98125+
98126 for (nstart = start ; ; ) {
98127 unsigned long newflags;
98128
98129@@ -399,6 +531,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98130
98131 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98132 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98133+ if (prot & (PROT_WRITE | PROT_EXEC))
98134+ gr_log_rwxmprotect(vma);
98135+
98136+ error = -EACCES;
98137+ goto out;
98138+ }
98139+
98140+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98141 error = -EACCES;
98142 goto out;
98143 }
98144@@ -413,6 +553,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98145 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98146 if (error)
98147 goto out;
98148+
98149+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98150+
98151 nstart = tmp;
98152
98153 if (nstart < prev->vm_end)
98154diff --git a/mm/mremap.c b/mm/mremap.c
98155index 17fa018..6f7892b 100644
98156--- a/mm/mremap.c
98157+++ b/mm/mremap.c
98158@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98159 continue;
98160 pte = ptep_get_and_clear(mm, old_addr, old_pte);
98161 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98162+
98163+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98164+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98165+ pte = pte_exprotect(pte);
98166+#endif
98167+
98168 pte = move_soft_dirty_pte(pte);
98169 set_pte_at(mm, new_addr, new_pte, pte);
98170 }
98171@@ -346,6 +352,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98172 if (is_vm_hugetlb_page(vma))
98173 goto Einval;
98174
98175+#ifdef CONFIG_PAX_SEGMEXEC
98176+ if (pax_find_mirror_vma(vma))
98177+ goto Einval;
98178+#endif
98179+
98180 /* We can't remap across vm area boundaries */
98181 if (old_len > vma->vm_end - addr)
98182 goto Efault;
98183@@ -401,20 +412,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
98184 unsigned long ret = -EINVAL;
98185 unsigned long charged = 0;
98186 unsigned long map_flags;
98187+ unsigned long pax_task_size = TASK_SIZE;
98188
98189 if (new_addr & ~PAGE_MASK)
98190 goto out;
98191
98192- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98193+#ifdef CONFIG_PAX_SEGMEXEC
98194+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98195+ pax_task_size = SEGMEXEC_TASK_SIZE;
98196+#endif
98197+
98198+ pax_task_size -= PAGE_SIZE;
98199+
98200+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98201 goto out;
98202
98203 /* Check if the location we're moving into overlaps the
98204 * old location at all, and fail if it does.
98205 */
98206- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98207- goto out;
98208-
98209- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98210+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98211 goto out;
98212
98213 ret = do_munmap(mm, new_addr, new_len);
98214@@ -483,6 +499,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98215 unsigned long ret = -EINVAL;
98216 unsigned long charged = 0;
98217 bool locked = false;
98218+ unsigned long pax_task_size = TASK_SIZE;
98219
98220 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98221 return ret;
98222@@ -504,6 +521,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98223 if (!new_len)
98224 return ret;
98225
98226+#ifdef CONFIG_PAX_SEGMEXEC
98227+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98228+ pax_task_size = SEGMEXEC_TASK_SIZE;
98229+#endif
98230+
98231+ pax_task_size -= PAGE_SIZE;
98232+
98233+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98234+ old_len > pax_task_size || addr > pax_task_size-old_len)
98235+ return ret;
98236+
98237 down_write(&current->mm->mmap_sem);
98238
98239 if (flags & MREMAP_FIXED) {
98240@@ -554,6 +582,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98241 new_addr = addr;
98242 }
98243 ret = addr;
98244+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98245 goto out;
98246 }
98247 }
98248@@ -577,7 +606,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98249 goto out;
98250 }
98251
98252+ map_flags = vma->vm_flags;
98253 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
98254+ if (!(ret & ~PAGE_MASK)) {
98255+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98256+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98257+ }
98258 }
98259 out:
98260 if (ret & ~PAGE_MASK)
98261diff --git a/mm/nommu.c b/mm/nommu.c
98262index ae5baae..cbb2ed5 100644
98263--- a/mm/nommu.c
98264+++ b/mm/nommu.c
98265@@ -71,7 +71,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98266 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98267 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98268 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98269-int heap_stack_gap = 0;
98270
98271 atomic_long_t mmap_pages_allocated;
98272
98273@@ -858,15 +857,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98274 EXPORT_SYMBOL(find_vma);
98275
98276 /*
98277- * find a VMA
98278- * - we don't extend stack VMAs under NOMMU conditions
98279- */
98280-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98281-{
98282- return find_vma(mm, addr);
98283-}
98284-
98285-/*
98286 * expand a stack to a given address
98287 * - not supported under NOMMU conditions
98288 */
98289@@ -1560,6 +1550,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98290
98291 /* most fields are the same, copy all, and then fixup */
98292 *new = *vma;
98293+ INIT_LIST_HEAD(&new->anon_vma_chain);
98294 *region = *vma->vm_region;
98295 new->vm_region = region;
98296
98297@@ -1990,8 +1981,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
98298 }
98299 EXPORT_SYMBOL(generic_file_remap_pages);
98300
98301-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98302- unsigned long addr, void *buf, int len, int write)
98303+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98304+ unsigned long addr, void *buf, size_t len, int write)
98305 {
98306 struct vm_area_struct *vma;
98307
98308@@ -2032,8 +2023,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98309 *
98310 * The caller must hold a reference on @mm.
98311 */
98312-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98313- void *buf, int len, int write)
98314+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98315+ void *buf, size_t len, int write)
98316 {
98317 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98318 }
98319@@ -2042,7 +2033,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98320 * Access another process' address space.
98321 * - source/target buffer must be kernel space
98322 */
98323-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
98324+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
98325 {
98326 struct mm_struct *mm;
98327
98328diff --git a/mm/page-writeback.c b/mm/page-writeback.c
98329index 6f43352..e44bf41 100644
98330--- a/mm/page-writeback.c
98331+++ b/mm/page-writeback.c
98332@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
98333 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
98334 * - the bdi dirty thresh drops quickly due to change of JBOD workload
98335 */
98336-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
98337+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
98338 unsigned long thresh,
98339 unsigned long bg_thresh,
98340 unsigned long dirty,
98341diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98342index 8bbef06..a8d1989 100644
98343--- a/mm/page_alloc.c
98344+++ b/mm/page_alloc.c
98345@@ -60,6 +60,7 @@
98346 #include <linux/hugetlb.h>
98347 #include <linux/sched/rt.h>
98348 #include <linux/page_owner.h>
98349+#include <linux/random.h>
98350
98351 #include <asm/sections.h>
98352 #include <asm/tlbflush.h>
98353@@ -358,7 +359,7 @@ out:
98354 * This usage means that zero-order pages may not be compound.
98355 */
98356
98357-static void free_compound_page(struct page *page)
98358+void free_compound_page(struct page *page)
98359 {
98360 __free_pages_ok(page, compound_order(page));
98361 }
98362@@ -511,7 +512,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
98363 __mod_zone_freepage_state(zone, (1 << order), migratetype);
98364 }
98365 #else
98366-struct page_ext_operations debug_guardpage_ops = { NULL, };
98367+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
98368 static inline void set_page_guard(struct zone *zone, struct page *page,
98369 unsigned int order, int migratetype) {}
98370 static inline void clear_page_guard(struct zone *zone, struct page *page,
98371@@ -802,6 +803,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98372 int i;
98373 int bad = 0;
98374
98375+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98376+ unsigned long index = 1UL << order;
98377+#endif
98378+
98379 VM_BUG_ON_PAGE(PageTail(page), page);
98380 VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
98381
98382@@ -823,6 +828,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98383 debug_check_no_obj_freed(page_address(page),
98384 PAGE_SIZE << order);
98385 }
98386+
98387+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98388+ for (; index; --index)
98389+ sanitize_highpage(page + index - 1);
98390+#endif
98391+
98392 arch_free_page(page, order);
98393 kernel_map_pages(page, 1 << order, 0);
98394
98395@@ -846,6 +857,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98396 local_irq_restore(flags);
98397 }
98398
98399+#ifdef CONFIG_PAX_LATENT_ENTROPY
98400+bool __meminitdata extra_latent_entropy;
98401+
98402+static int __init setup_pax_extra_latent_entropy(char *str)
98403+{
98404+ extra_latent_entropy = true;
98405+ return 0;
98406+}
98407+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
98408+
98409+volatile u64 latent_entropy __latent_entropy;
98410+EXPORT_SYMBOL(latent_entropy);
98411+#endif
98412+
98413 void __init __free_pages_bootmem(struct page *page, unsigned int order)
98414 {
98415 unsigned int nr_pages = 1 << order;
98416@@ -861,6 +886,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
98417 __ClearPageReserved(p);
98418 set_page_count(p, 0);
98419
98420+#ifdef CONFIG_PAX_LATENT_ENTROPY
98421+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
98422+ u64 hash = 0;
98423+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
98424+ const u64 *data = lowmem_page_address(page);
98425+
98426+ for (index = 0; index < end; index++)
98427+ hash ^= hash + data[index];
98428+ latent_entropy ^= hash;
98429+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
98430+ }
98431+#endif
98432+
98433 page_zone(page)->managed_pages += nr_pages;
98434 set_page_refcounted(page);
98435 __free_pages(page, order);
98436@@ -986,8 +1024,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
98437 arch_alloc_page(page, order);
98438 kernel_map_pages(page, 1 << order, 1);
98439
98440+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98441 if (gfp_flags & __GFP_ZERO)
98442 prep_zero_page(page, order, gfp_flags);
98443+#endif
98444
98445 if (order && (gfp_flags & __GFP_COMP))
98446 prep_compound_page(page, order);
98447@@ -1700,7 +1740,7 @@ again:
98448 }
98449
98450 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
98451- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98452+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98453 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
98454 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98455
98456@@ -2021,7 +2061,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
98457 do {
98458 mod_zone_page_state(zone, NR_ALLOC_BATCH,
98459 high_wmark_pages(zone) - low_wmark_pages(zone) -
98460- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98461+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98462 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98463 } while (zone++ != preferred_zone);
98464 }
98465@@ -5781,7 +5821,7 @@ static void __setup_per_zone_wmarks(void)
98466
98467 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
98468 high_wmark_pages(zone) - low_wmark_pages(zone) -
98469- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98470+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98471
98472 setup_zone_migrate_reserve(zone);
98473 spin_unlock_irqrestore(&zone->lock, flags);
98474diff --git a/mm/percpu.c b/mm/percpu.c
98475index d39e2f4..de5f4b4 100644
98476--- a/mm/percpu.c
98477+++ b/mm/percpu.c
98478@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98479 static unsigned int pcpu_high_unit_cpu __read_mostly;
98480
98481 /* the address of the first chunk which starts with the kernel static area */
98482-void *pcpu_base_addr __read_mostly;
98483+void *pcpu_base_addr __read_only;
98484 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98485
98486 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98487diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
98488index 5077afc..846c9ef 100644
98489--- a/mm/process_vm_access.c
98490+++ b/mm/process_vm_access.c
98491@@ -13,6 +13,7 @@
98492 #include <linux/uio.h>
98493 #include <linux/sched.h>
98494 #include <linux/highmem.h>
98495+#include <linux/security.h>
98496 #include <linux/ptrace.h>
98497 #include <linux/slab.h>
98498 #include <linux/syscalls.h>
98499@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98500 ssize_t iov_len;
98501 size_t total_len = iov_iter_count(iter);
98502
98503+ return -ENOSYS; // PaX: until properly audited
98504+
98505 /*
98506 * Work out how many pages of struct pages we're going to need
98507 * when eventually calling get_user_pages
98508 */
98509 for (i = 0; i < riovcnt; i++) {
98510 iov_len = rvec[i].iov_len;
98511- if (iov_len > 0) {
98512- nr_pages_iov = ((unsigned long)rvec[i].iov_base
98513- + iov_len)
98514- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
98515- / PAGE_SIZE + 1;
98516- nr_pages = max(nr_pages, nr_pages_iov);
98517- }
98518+ if (iov_len <= 0)
98519+ continue;
98520+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
98521+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
98522+ nr_pages = max(nr_pages, nr_pages_iov);
98523 }
98524
98525 if (nr_pages == 0)
98526@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98527 goto free_proc_pages;
98528 }
98529
98530+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
98531+ rc = -EPERM;
98532+ goto put_task_struct;
98533+ }
98534+
98535 mm = mm_access(task, PTRACE_MODE_ATTACH);
98536 if (!mm || IS_ERR(mm)) {
98537 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
98538diff --git a/mm/rmap.c b/mm/rmap.c
98539index 71cd5bd..e259089 100644
98540--- a/mm/rmap.c
98541+++ b/mm/rmap.c
98542@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98543 struct anon_vma *anon_vma = vma->anon_vma;
98544 struct anon_vma_chain *avc;
98545
98546+#ifdef CONFIG_PAX_SEGMEXEC
98547+ struct anon_vma_chain *avc_m = NULL;
98548+#endif
98549+
98550 might_sleep();
98551 if (unlikely(!anon_vma)) {
98552 struct mm_struct *mm = vma->vm_mm;
98553@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98554 if (!avc)
98555 goto out_enomem;
98556
98557+#ifdef CONFIG_PAX_SEGMEXEC
98558+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
98559+ if (!avc_m)
98560+ goto out_enomem_free_avc;
98561+#endif
98562+
98563 anon_vma = find_mergeable_anon_vma(vma);
98564 allocated = NULL;
98565 if (!anon_vma) {
98566@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98567 /* page_table_lock to protect against threads */
98568 spin_lock(&mm->page_table_lock);
98569 if (likely(!vma->anon_vma)) {
98570+
98571+#ifdef CONFIG_PAX_SEGMEXEC
98572+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98573+
98574+ if (vma_m) {
98575+ BUG_ON(vma_m->anon_vma);
98576+ vma_m->anon_vma = anon_vma;
98577+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
98578+ anon_vma->degree++;
98579+ avc_m = NULL;
98580+ }
98581+#endif
98582+
98583 vma->anon_vma = anon_vma;
98584 anon_vma_chain_link(vma, avc, anon_vma);
98585 /* vma reference or self-parent link for new root */
98586@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98587
98588 if (unlikely(allocated))
98589 put_anon_vma(allocated);
98590+
98591+#ifdef CONFIG_PAX_SEGMEXEC
98592+ if (unlikely(avc_m))
98593+ anon_vma_chain_free(avc_m);
98594+#endif
98595+
98596 if (unlikely(avc))
98597 anon_vma_chain_free(avc);
98598 }
98599 return 0;
98600
98601 out_enomem_free_avc:
98602+
98603+#ifdef CONFIG_PAX_SEGMEXEC
98604+ if (avc_m)
98605+ anon_vma_chain_free(avc_m);
98606+#endif
98607+
98608 anon_vma_chain_free(avc);
98609 out_enomem:
98610 return -ENOMEM;
98611@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
98612 * good chance of avoiding scanning the whole hierarchy when it searches where
98613 * page is mapped.
98614 */
98615-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98616+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
98617 {
98618 struct anon_vma_chain *avc, *pavc;
98619 struct anon_vma *root = NULL;
98620@@ -296,7 +331,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98621 * the corresponding VMA in the parent process is attached to.
98622 * Returns 0 on success, non-zero on failure.
98623 */
98624-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
98625+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
98626 {
98627 struct anon_vma_chain *avc;
98628 struct anon_vma *anon_vma;
98629@@ -416,8 +451,10 @@ static void anon_vma_ctor(void *data)
98630 void __init anon_vma_init(void)
98631 {
98632 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
98633- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
98634- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
98635+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
98636+ anon_vma_ctor);
98637+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
98638+ SLAB_PANIC|SLAB_NO_SANITIZE);
98639 }
98640
98641 /*
98642diff --git a/mm/shmem.c b/mm/shmem.c
98643index 993e6ba..a962ba3 100644
98644--- a/mm/shmem.c
98645+++ b/mm/shmem.c
98646@@ -33,7 +33,7 @@
98647 #include <linux/swap.h>
98648 #include <linux/aio.h>
98649
98650-static struct vfsmount *shm_mnt;
98651+struct vfsmount *shm_mnt;
98652
98653 #ifdef CONFIG_SHMEM
98654 /*
98655@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
98656 #define BOGO_DIRENT_SIZE 20
98657
98658 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98659-#define SHORT_SYMLINK_LEN 128
98660+#define SHORT_SYMLINK_LEN 64
98661
98662 /*
98663 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98664@@ -2558,6 +2558,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
98665 static int shmem_xattr_validate(const char *name)
98666 {
98667 struct { const char *prefix; size_t len; } arr[] = {
98668+
98669+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98670+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
98671+#endif
98672+
98673 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
98674 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
98675 };
98676@@ -2613,6 +2618,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
98677 if (err)
98678 return err;
98679
98680+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98681+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
98682+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
98683+ return -EOPNOTSUPP;
98684+ if (size > 8)
98685+ return -EINVAL;
98686+ }
98687+#endif
98688+
98689 return simple_xattr_set(&info->xattrs, name, value, size, flags);
98690 }
98691
98692@@ -2996,8 +3010,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98693 int err = -ENOMEM;
98694
98695 /* Round up to L1_CACHE_BYTES to resist false sharing */
98696- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98697- L1_CACHE_BYTES), GFP_KERNEL);
98698+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98699 if (!sbinfo)
98700 return -ENOMEM;
98701
98702diff --git a/mm/slab.c b/mm/slab.c
98703index 65b5dcb..d53d866 100644
98704--- a/mm/slab.c
98705+++ b/mm/slab.c
98706@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98707 if ((x)->max_freeable < i) \
98708 (x)->max_freeable = i; \
98709 } while (0)
98710-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98711-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98712-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98713-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98714+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98715+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98716+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98717+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98718+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
98719+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
98720 #else
98721 #define STATS_INC_ACTIVE(x) do { } while (0)
98722 #define STATS_DEC_ACTIVE(x) do { } while (0)
98723@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98724 #define STATS_INC_ALLOCMISS(x) do { } while (0)
98725 #define STATS_INC_FREEHIT(x) do { } while (0)
98726 #define STATS_INC_FREEMISS(x) do { } while (0)
98727+#define STATS_INC_SANITIZED(x) do { } while (0)
98728+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
98729 #endif
98730
98731 #if DEBUG
98732@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
98733 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98734 */
98735 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98736- const struct page *page, void *obj)
98737+ const struct page *page, const void *obj)
98738 {
98739 u32 offset = (obj - page->s_mem);
98740 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98741@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
98742 * structures first. Without this, further allocations will bug.
98743 */
98744 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
98745- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
98746+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
98747 slab_state = PARTIAL_NODE;
98748
98749 slab_early_init = 0;
98750@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
98751
98752 cachep = find_mergeable(size, align, flags, name, ctor);
98753 if (cachep) {
98754- cachep->refcount++;
98755+ atomic_inc(&cachep->refcount);
98756
98757 /*
98758 * Adjust the object sizes so that we clear
98759@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
98760 struct array_cache *ac = cpu_cache_get(cachep);
98761
98762 check_irq_off();
98763+
98764+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98765+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
98766+ STATS_INC_NOT_SANITIZED(cachep);
98767+ else {
98768+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
98769+
98770+ if (cachep->ctor)
98771+ cachep->ctor(objp);
98772+
98773+ STATS_INC_SANITIZED(cachep);
98774+ }
98775+#endif
98776+
98777 kmemleak_free_recursive(objp, cachep->flags);
98778 objp = cache_free_debugcheck(cachep, objp, caller);
98779
98780@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
98781 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
98782 }
98783
98784-void *__kmalloc_node(size_t size, gfp_t flags, int node)
98785+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
98786 {
98787 return __do_kmalloc_node(size, flags, node, _RET_IP_);
98788 }
98789@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
98790 * @flags: the type of memory to allocate (see kmalloc).
98791 * @caller: function caller for debug tracking of the caller
98792 */
98793-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
98794+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
98795 unsigned long caller)
98796 {
98797 struct kmem_cache *cachep;
98798@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
98799
98800 if (unlikely(ZERO_OR_NULL_PTR(objp)))
98801 return;
98802+ VM_BUG_ON(!virt_addr_valid(objp));
98803 local_irq_save(flags);
98804 kfree_debugcheck(objp);
98805 c = virt_to_cache(objp);
98806@@ -3984,14 +4003,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
98807 }
98808 /* cpu stats */
98809 {
98810- unsigned long allochit = atomic_read(&cachep->allochit);
98811- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
98812- unsigned long freehit = atomic_read(&cachep->freehit);
98813- unsigned long freemiss = atomic_read(&cachep->freemiss);
98814+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
98815+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
98816+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
98817+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
98818
98819 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
98820 allochit, allocmiss, freehit, freemiss);
98821 }
98822+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98823+ {
98824+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
98825+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
98826+
98827+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
98828+ }
98829+#endif
98830 #endif
98831 }
98832
98833@@ -4199,13 +4226,69 @@ static const struct file_operations proc_slabstats_operations = {
98834 static int __init slab_proc_init(void)
98835 {
98836 #ifdef CONFIG_DEBUG_SLAB_LEAK
98837- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
98838+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
98839 #endif
98840 return 0;
98841 }
98842 module_init(slab_proc_init);
98843 #endif
98844
98845+bool is_usercopy_object(const void *ptr)
98846+{
98847+ struct page *page;
98848+ struct kmem_cache *cachep;
98849+
98850+ if (ZERO_OR_NULL_PTR(ptr))
98851+ return false;
98852+
98853+ if (!slab_is_available())
98854+ return false;
98855+
98856+ if (!virt_addr_valid(ptr))
98857+ return false;
98858+
98859+ page = virt_to_head_page(ptr);
98860+
98861+ if (!PageSlab(page))
98862+ return false;
98863+
98864+ cachep = page->slab_cache;
98865+ return cachep->flags & SLAB_USERCOPY;
98866+}
98867+
98868+#ifdef CONFIG_PAX_USERCOPY
98869+const char *check_heap_object(const void *ptr, unsigned long n)
98870+{
98871+ struct page *page;
98872+ struct kmem_cache *cachep;
98873+ unsigned int objnr;
98874+ unsigned long offset;
98875+
98876+ if (ZERO_OR_NULL_PTR(ptr))
98877+ return "<null>";
98878+
98879+ if (!virt_addr_valid(ptr))
98880+ return NULL;
98881+
98882+ page = virt_to_head_page(ptr);
98883+
98884+ if (!PageSlab(page))
98885+ return NULL;
98886+
98887+ cachep = page->slab_cache;
98888+ if (!(cachep->flags & SLAB_USERCOPY))
98889+ return cachep->name;
98890+
98891+ objnr = obj_to_index(cachep, page, ptr);
98892+ BUG_ON(objnr >= cachep->num);
98893+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
98894+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
98895+ return NULL;
98896+
98897+ return cachep->name;
98898+}
98899+#endif
98900+
98901 /**
98902 * ksize - get the actual amount of memory allocated for a given object
98903 * @objp: Pointer to the object
98904diff --git a/mm/slab.h b/mm/slab.h
98905index 1cf40054..10ad563 100644
98906--- a/mm/slab.h
98907+++ b/mm/slab.h
98908@@ -22,7 +22,7 @@ struct kmem_cache {
98909 unsigned int align; /* Alignment as calculated */
98910 unsigned long flags; /* Active flags on the slab */
98911 const char *name; /* Slab name for sysfs */
98912- int refcount; /* Use counter */
98913+ atomic_t refcount; /* Use counter */
98914 void (*ctor)(void *); /* Called on object slot creation */
98915 struct list_head list; /* List of all slab caches on the system */
98916 };
98917@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
98918 /* The slab cache that manages slab cache information */
98919 extern struct kmem_cache *kmem_cache;
98920
98921+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98922+#ifdef CONFIG_X86_64
98923+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
98924+#else
98925+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
98926+#endif
98927+enum pax_sanitize_mode {
98928+ PAX_SANITIZE_SLAB_OFF = 0,
98929+ PAX_SANITIZE_SLAB_FAST,
98930+ PAX_SANITIZE_SLAB_FULL,
98931+};
98932+extern enum pax_sanitize_mode pax_sanitize_slab;
98933+#endif
98934+
98935 unsigned long calculate_alignment(unsigned long flags,
98936 unsigned long align, unsigned long size);
98937
98938@@ -116,7 +130,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
98939
98940 /* Legal flag mask for kmem_cache_create(), for various configurations */
98941 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
98942- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
98943+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
98944+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
98945
98946 #if defined(CONFIG_DEBUG_SLAB)
98947 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
98948@@ -300,6 +315,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
98949 return s;
98950
98951 page = virt_to_head_page(x);
98952+
98953+ BUG_ON(!PageSlab(page));
98954+
98955 cachep = page->slab_cache;
98956 if (slab_equal_or_root(cachep, s))
98957 return cachep;
98958diff --git a/mm/slab_common.c b/mm/slab_common.c
98959index e03dd6f..c475838 100644
98960--- a/mm/slab_common.c
98961+++ b/mm/slab_common.c
98962@@ -25,11 +25,35 @@
98963
98964 #include "slab.h"
98965
98966-enum slab_state slab_state;
98967+enum slab_state slab_state __read_only;
98968 LIST_HEAD(slab_caches);
98969 DEFINE_MUTEX(slab_mutex);
98970 struct kmem_cache *kmem_cache;
98971
98972+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98973+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
98974+static int __init pax_sanitize_slab_setup(char *str)
98975+{
98976+ if (!str)
98977+ return 0;
98978+
98979+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
98980+ pr_info("PaX slab sanitization: %s\n", "disabled");
98981+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
98982+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
98983+ pr_info("PaX slab sanitization: %s\n", "fast");
98984+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
98985+ } else if (!strcmp(str, "full")) {
98986+ pr_info("PaX slab sanitization: %s\n", "full");
98987+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
98988+ } else
98989+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
98990+
98991+ return 0;
98992+}
98993+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
98994+#endif
98995+
98996 /*
98997 * Set of flags that will prevent slab merging
98998 */
98999@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
99000 * Merge control. If this is set then no merging of slab caches will occur.
99001 * (Could be removed. This was introduced to pacify the merge skeptics.)
99002 */
99003-static int slab_nomerge;
99004+static int slab_nomerge = 1;
99005
99006 static int __init setup_slab_nomerge(char *str)
99007 {
99008@@ -218,7 +242,7 @@ int slab_unmergeable(struct kmem_cache *s)
99009 /*
99010 * We may have set a slab to be unmergeable during bootstrap.
99011 */
99012- if (s->refcount < 0)
99013+ if (atomic_read(&s->refcount) < 0)
99014 return 1;
99015
99016 return 0;
99017@@ -322,7 +346,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
99018 if (err)
99019 goto out_free_cache;
99020
99021- s->refcount = 1;
99022+ atomic_set(&s->refcount, 1);
99023 list_add(&s->list, &slab_caches);
99024 out:
99025 if (err)
99026@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99027 */
99028 flags &= CACHE_CREATE_MASK;
99029
99030+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99031+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99032+ flags |= SLAB_NO_SANITIZE;
99033+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99034+ flags &= ~SLAB_NO_SANITIZE;
99035+#endif
99036+
99037 s = __kmem_cache_alias(name, size, align, flags, ctor);
99038 if (s)
99039 goto out_unlock;
99040@@ -505,8 +536,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99041
99042 mutex_lock(&slab_mutex);
99043
99044- s->refcount--;
99045- if (s->refcount)
99046+ if (!atomic_dec_and_test(&s->refcount))
99047 goto out_unlock;
99048
99049 if (memcg_cleanup_cache_params(s) != 0)
99050@@ -526,7 +556,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99051 rcu_barrier();
99052
99053 memcg_free_cache_params(s);
99054-#ifdef SLAB_SUPPORTS_SYSFS
99055+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99056 sysfs_slab_remove(s);
99057 #else
99058 slab_kmem_cache_release(s);
99059@@ -582,7 +612,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99060 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99061 name, size, err);
99062
99063- s->refcount = -1; /* Exempt from merging for now */
99064+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99065 }
99066
99067 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99068@@ -595,7 +625,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99069
99070 create_boot_cache(s, name, size, flags);
99071 list_add(&s->list, &slab_caches);
99072- s->refcount = 1;
99073+ atomic_set(&s->refcount, 1);
99074 return s;
99075 }
99076
99077@@ -607,6 +637,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99078 EXPORT_SYMBOL(kmalloc_dma_caches);
99079 #endif
99080
99081+#ifdef CONFIG_PAX_USERCOPY_SLABS
99082+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99083+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99084+#endif
99085+
99086 /*
99087 * Conversion table for small slabs sizes / 8 to the index in the
99088 * kmalloc array. This is necessary for slabs < 192 since we have non power
99089@@ -671,6 +706,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99090 return kmalloc_dma_caches[index];
99091
99092 #endif
99093+
99094+#ifdef CONFIG_PAX_USERCOPY_SLABS
99095+ if (unlikely((flags & GFP_USERCOPY)))
99096+ return kmalloc_usercopy_caches[index];
99097+
99098+#endif
99099+
99100 return kmalloc_caches[index];
99101 }
99102
99103@@ -727,7 +769,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99104 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99105 if (!kmalloc_caches[i]) {
99106 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99107- 1 << i, flags);
99108+ 1 << i, SLAB_USERCOPY | flags);
99109 }
99110
99111 /*
99112@@ -736,10 +778,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99113 * earlier power of two caches
99114 */
99115 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99116- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99117+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99118
99119 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
99120- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
99121+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
99122 }
99123
99124 /* Kmalloc array is now usable */
99125@@ -772,6 +814,23 @@ void __init create_kmalloc_caches(unsigned long flags)
99126 }
99127 }
99128 #endif
99129+
99130+#ifdef CONFIG_PAX_USERCOPY_SLABS
99131+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
99132+ struct kmem_cache *s = kmalloc_caches[i];
99133+
99134+ if (s) {
99135+ int size = kmalloc_size(i);
99136+ char *n = kasprintf(GFP_NOWAIT,
99137+ "usercopy-kmalloc-%d", size);
99138+
99139+ BUG_ON(!n);
99140+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
99141+ size, SLAB_USERCOPY | flags);
99142+ }
99143+ }
99144+#endif
99145+
99146 }
99147 #endif /* !CONFIG_SLOB */
99148
99149@@ -830,6 +889,9 @@ static void print_slabinfo_header(struct seq_file *m)
99150 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
99151 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
99152 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
99153+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99154+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
99155+#endif
99156 #endif
99157 seq_putc(m, '\n');
99158 }
99159@@ -964,7 +1026,7 @@ static int __init slab_proc_init(void)
99160 module_init(slab_proc_init);
99161 #endif /* CONFIG_SLABINFO */
99162
99163-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
99164+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
99165 gfp_t flags)
99166 {
99167 void *ret;
99168diff --git a/mm/slob.c b/mm/slob.c
99169index 96a8620..46b3f12 100644
99170--- a/mm/slob.c
99171+++ b/mm/slob.c
99172@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
99173 /*
99174 * Return the size of a slob block.
99175 */
99176-static slobidx_t slob_units(slob_t *s)
99177+static slobidx_t slob_units(const slob_t *s)
99178 {
99179 if (s->units > 0)
99180 return s->units;
99181@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
99182 /*
99183 * Return the next free slob block pointer after this one.
99184 */
99185-static slob_t *slob_next(slob_t *s)
99186+static slob_t *slob_next(const slob_t *s)
99187 {
99188 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
99189 slobidx_t next;
99190@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
99191 /*
99192 * Returns true if s is the last free block in its page.
99193 */
99194-static int slob_last(slob_t *s)
99195+static int slob_last(const slob_t *s)
99196 {
99197 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
99198 }
99199
99200-static void *slob_new_pages(gfp_t gfp, int order, int node)
99201+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
99202 {
99203- void *page;
99204+ struct page *page;
99205
99206 #ifdef CONFIG_NUMA
99207 if (node != NUMA_NO_NODE)
99208@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
99209 if (!page)
99210 return NULL;
99211
99212- return page_address(page);
99213+ __SetPageSlab(page);
99214+ return page;
99215 }
99216
99217-static void slob_free_pages(void *b, int order)
99218+static void slob_free_pages(struct page *sp, int order)
99219 {
99220 if (current->reclaim_state)
99221 current->reclaim_state->reclaimed_slab += 1 << order;
99222- free_pages((unsigned long)b, order);
99223+ __ClearPageSlab(sp);
99224+ page_mapcount_reset(sp);
99225+ sp->private = 0;
99226+ __free_pages(sp, order);
99227 }
99228
99229 /*
99230@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99231
99232 /* Not enough space: must allocate a new page */
99233 if (!b) {
99234- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99235- if (!b)
99236+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99237+ if (!sp)
99238 return NULL;
99239- sp = virt_to_page(b);
99240- __SetPageSlab(sp);
99241+ b = page_address(sp);
99242
99243 spin_lock_irqsave(&slob_lock, flags);
99244 sp->units = SLOB_UNITS(PAGE_SIZE);
99245 sp->freelist = b;
99246+ sp->private = 0;
99247 INIT_LIST_HEAD(&sp->lru);
99248 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
99249 set_slob_page_free(sp, slob_list);
99250@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99251 /*
99252 * slob_free: entry point into the slob allocator.
99253 */
99254-static void slob_free(void *block, int size)
99255+static void slob_free(struct kmem_cache *c, void *block, int size)
99256 {
99257 struct page *sp;
99258 slob_t *prev, *next, *b = (slob_t *)block;
99259@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
99260 if (slob_page_free(sp))
99261 clear_slob_page_free(sp);
99262 spin_unlock_irqrestore(&slob_lock, flags);
99263- __ClearPageSlab(sp);
99264- page_mapcount_reset(sp);
99265- slob_free_pages(b, 0);
99266+ slob_free_pages(sp, 0);
99267 return;
99268 }
99269
99270+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99271+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
99272+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
99273+#endif
99274+
99275 if (!slob_page_free(sp)) {
99276 /* This slob page is about to become partially free. Easy! */
99277 sp->units = units;
99278@@ -424,11 +431,10 @@ out:
99279 */
99280
99281 static __always_inline void *
99282-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99283+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
99284 {
99285- unsigned int *m;
99286- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99287- void *ret;
99288+ slob_t *m;
99289+ void *ret = NULL;
99290
99291 gfp &= gfp_allowed_mask;
99292
99293@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99294
99295 if (!m)
99296 return NULL;
99297- *m = size;
99298+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
99299+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
99300+ m[0].units = size;
99301+ m[1].units = align;
99302 ret = (void *)m + align;
99303
99304 trace_kmalloc_node(caller, ret,
99305 size, size + align, gfp, node);
99306 } else {
99307 unsigned int order = get_order(size);
99308+ struct page *page;
99309
99310 if (likely(order))
99311 gfp |= __GFP_COMP;
99312- ret = slob_new_pages(gfp, order, node);
99313+ page = slob_new_pages(gfp, order, node);
99314+ if (page) {
99315+ ret = page_address(page);
99316+ page->private = size;
99317+ }
99318
99319 trace_kmalloc_node(caller, ret,
99320 size, PAGE_SIZE << order, gfp, node);
99321 }
99322
99323- kmemleak_alloc(ret, size, 1, gfp);
99324 return ret;
99325 }
99326
99327-void *__kmalloc(size_t size, gfp_t gfp)
99328+static __always_inline void *
99329+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99330+{
99331+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99332+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
99333+
99334+ if (!ZERO_OR_NULL_PTR(ret))
99335+ kmemleak_alloc(ret, size, 1, gfp);
99336+ return ret;
99337+}
99338+
99339+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
99340 {
99341 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
99342 }
99343@@ -491,34 +515,112 @@ void kfree(const void *block)
99344 return;
99345 kmemleak_free(block);
99346
99347+ VM_BUG_ON(!virt_addr_valid(block));
99348 sp = virt_to_page(block);
99349- if (PageSlab(sp)) {
99350+ VM_BUG_ON(!PageSlab(sp));
99351+ if (!sp->private) {
99352 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99353- unsigned int *m = (unsigned int *)(block - align);
99354- slob_free(m, *m + align);
99355- } else
99356+ slob_t *m = (slob_t *)(block - align);
99357+ slob_free(NULL, m, m[0].units + align);
99358+ } else {
99359+ __ClearPageSlab(sp);
99360+ page_mapcount_reset(sp);
99361+ sp->private = 0;
99362 __free_pages(sp, compound_order(sp));
99363+ }
99364 }
99365 EXPORT_SYMBOL(kfree);
99366
99367+bool is_usercopy_object(const void *ptr)
99368+{
99369+ if (!slab_is_available())
99370+ return false;
99371+
99372+ // PAX: TODO
99373+
99374+ return false;
99375+}
99376+
99377+#ifdef CONFIG_PAX_USERCOPY
99378+const char *check_heap_object(const void *ptr, unsigned long n)
99379+{
99380+ struct page *page;
99381+ const slob_t *free;
99382+ const void *base;
99383+ unsigned long flags;
99384+
99385+ if (ZERO_OR_NULL_PTR(ptr))
99386+ return "<null>";
99387+
99388+ if (!virt_addr_valid(ptr))
99389+ return NULL;
99390+
99391+ page = virt_to_head_page(ptr);
99392+ if (!PageSlab(page))
99393+ return NULL;
99394+
99395+ if (page->private) {
99396+ base = page;
99397+ if (base <= ptr && n <= page->private - (ptr - base))
99398+ return NULL;
99399+ return "<slob>";
99400+ }
99401+
99402+ /* some tricky double walking to find the chunk */
99403+ spin_lock_irqsave(&slob_lock, flags);
99404+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99405+ free = page->freelist;
99406+
99407+ while (!slob_last(free) && (void *)free <= ptr) {
99408+ base = free + slob_units(free);
99409+ free = slob_next(free);
99410+ }
99411+
99412+ while (base < (void *)free) {
99413+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99414+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99415+ int offset;
99416+
99417+ if (ptr < base + align)
99418+ break;
99419+
99420+ offset = ptr - base - align;
99421+ if (offset >= m) {
99422+ base += size;
99423+ continue;
99424+ }
99425+
99426+ if (n > m - offset)
99427+ break;
99428+
99429+ spin_unlock_irqrestore(&slob_lock, flags);
99430+ return NULL;
99431+ }
99432+
99433+ spin_unlock_irqrestore(&slob_lock, flags);
99434+ return "<slob>";
99435+}
99436+#endif
99437+
99438 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99439 size_t ksize(const void *block)
99440 {
99441 struct page *sp;
99442 int align;
99443- unsigned int *m;
99444+ slob_t *m;
99445
99446 BUG_ON(!block);
99447 if (unlikely(block == ZERO_SIZE_PTR))
99448 return 0;
99449
99450 sp = virt_to_page(block);
99451- if (unlikely(!PageSlab(sp)))
99452- return PAGE_SIZE << compound_order(sp);
99453+ VM_BUG_ON(!PageSlab(sp));
99454+ if (sp->private)
99455+ return sp->private;
99456
99457 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99458- m = (unsigned int *)(block - align);
99459- return SLOB_UNITS(*m) * SLOB_UNIT;
99460+ m = (slob_t *)(block - align);
99461+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99462 }
99463 EXPORT_SYMBOL(ksize);
99464
99465@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
99466
99467 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99468 {
99469- void *b;
99470+ void *b = NULL;
99471
99472 flags &= gfp_allowed_mask;
99473
99474 lockdep_trace_alloc(flags);
99475
99476+#ifdef CONFIG_PAX_USERCOPY_SLABS
99477+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
99478+#else
99479 if (c->size < PAGE_SIZE) {
99480 b = slob_alloc(c->size, flags, c->align, node);
99481 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99482 SLOB_UNITS(c->size) * SLOB_UNIT,
99483 flags, node);
99484 } else {
99485- b = slob_new_pages(flags, get_order(c->size), node);
99486+ struct page *sp;
99487+
99488+ sp = slob_new_pages(flags, get_order(c->size), node);
99489+ if (sp) {
99490+ b = page_address(sp);
99491+ sp->private = c->size;
99492+ }
99493 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99494 PAGE_SIZE << get_order(c->size),
99495 flags, node);
99496 }
99497+#endif
99498
99499 if (b && c->ctor)
99500 c->ctor(b);
99501@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
99502 EXPORT_SYMBOL(kmem_cache_alloc);
99503
99504 #ifdef CONFIG_NUMA
99505-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99506+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
99507 {
99508 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
99509 }
99510@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
99511 EXPORT_SYMBOL(kmem_cache_alloc_node);
99512 #endif
99513
99514-static void __kmem_cache_free(void *b, int size)
99515+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
99516 {
99517- if (size < PAGE_SIZE)
99518- slob_free(b, size);
99519+ struct page *sp;
99520+
99521+ sp = virt_to_page(b);
99522+ BUG_ON(!PageSlab(sp));
99523+ if (!sp->private)
99524+ slob_free(c, b, size);
99525 else
99526- slob_free_pages(b, get_order(size));
99527+ slob_free_pages(sp, get_order(size));
99528 }
99529
99530 static void kmem_rcu_free(struct rcu_head *head)
99531@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
99532 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
99533 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
99534
99535- __kmem_cache_free(b, slob_rcu->size);
99536+ __kmem_cache_free(NULL, b, slob_rcu->size);
99537 }
99538
99539 void kmem_cache_free(struct kmem_cache *c, void *b)
99540 {
99541+ int size = c->size;
99542+
99543+#ifdef CONFIG_PAX_USERCOPY_SLABS
99544+ if (size + c->align < PAGE_SIZE) {
99545+ size += c->align;
99546+ b -= c->align;
99547+ }
99548+#endif
99549+
99550 kmemleak_free_recursive(b, c->flags);
99551 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99552 struct slob_rcu *slob_rcu;
99553- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99554- slob_rcu->size = c->size;
99555+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99556+ slob_rcu->size = size;
99557 call_rcu(&slob_rcu->head, kmem_rcu_free);
99558 } else {
99559- __kmem_cache_free(b, c->size);
99560+ __kmem_cache_free(c, b, size);
99561 }
99562
99563+#ifdef CONFIG_PAX_USERCOPY_SLABS
99564+ trace_kfree(_RET_IP_, b);
99565+#else
99566 trace_kmem_cache_free(_RET_IP_, b);
99567+#endif
99568+
99569 }
99570 EXPORT_SYMBOL(kmem_cache_free);
99571
99572diff --git a/mm/slub.c b/mm/slub.c
99573index fe376fe..2f5757c 100644
99574--- a/mm/slub.c
99575+++ b/mm/slub.c
99576@@ -197,7 +197,7 @@ struct track {
99577
99578 enum track_item { TRACK_ALLOC, TRACK_FREE };
99579
99580-#ifdef CONFIG_SYSFS
99581+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99582 static int sysfs_slab_add(struct kmem_cache *);
99583 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99584 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
99585@@ -535,7 +535,7 @@ static void print_track(const char *s, struct track *t)
99586 if (!t->addr)
99587 return;
99588
99589- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99590+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99591 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99592 #ifdef CONFIG_STACKTRACE
99593 {
99594@@ -2652,6 +2652,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
99595
99596 slab_free_hook(s, x);
99597
99598+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99599+ if (!(s->flags & SLAB_NO_SANITIZE)) {
99600+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
99601+ if (s->ctor)
99602+ s->ctor(x);
99603+ }
99604+#endif
99605+
99606 redo:
99607 /*
99608 * Determine the currently cpus per cpu slab.
99609@@ -2989,6 +2997,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
99610 s->inuse = size;
99611
99612 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
99613+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99614+ (!(flags & SLAB_NO_SANITIZE)) ||
99615+#endif
99616 s->ctor)) {
99617 /*
99618 * Relocate free pointer after the object if it is not
99619@@ -3243,7 +3254,7 @@ static int __init setup_slub_min_objects(char *str)
99620
99621 __setup("slub_min_objects=", setup_slub_min_objects);
99622
99623-void *__kmalloc(size_t size, gfp_t flags)
99624+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
99625 {
99626 struct kmem_cache *s;
99627 void *ret;
99628@@ -3279,7 +3290,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
99629 return ptr;
99630 }
99631
99632-void *__kmalloc_node(size_t size, gfp_t flags, int node)
99633+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
99634 {
99635 struct kmem_cache *s;
99636 void *ret;
99637@@ -3308,6 +3319,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99638 EXPORT_SYMBOL(__kmalloc_node);
99639 #endif
99640
99641+bool is_usercopy_object(const void *ptr)
99642+{
99643+ struct page *page;
99644+ struct kmem_cache *s;
99645+
99646+ if (ZERO_OR_NULL_PTR(ptr))
99647+ return false;
99648+
99649+ if (!slab_is_available())
99650+ return false;
99651+
99652+ if (!virt_addr_valid(ptr))
99653+ return false;
99654+
99655+ page = virt_to_head_page(ptr);
99656+
99657+ if (!PageSlab(page))
99658+ return false;
99659+
99660+ s = page->slab_cache;
99661+ return s->flags & SLAB_USERCOPY;
99662+}
99663+
99664+#ifdef CONFIG_PAX_USERCOPY
99665+const char *check_heap_object(const void *ptr, unsigned long n)
99666+{
99667+ struct page *page;
99668+ struct kmem_cache *s;
99669+ unsigned long offset;
99670+
99671+ if (ZERO_OR_NULL_PTR(ptr))
99672+ return "<null>";
99673+
99674+ if (!virt_addr_valid(ptr))
99675+ return NULL;
99676+
99677+ page = virt_to_head_page(ptr);
99678+
99679+ if (!PageSlab(page))
99680+ return NULL;
99681+
99682+ s = page->slab_cache;
99683+ if (!(s->flags & SLAB_USERCOPY))
99684+ return s->name;
99685+
99686+ offset = (ptr - page_address(page)) % s->size;
99687+ if (offset <= s->object_size && n <= s->object_size - offset)
99688+ return NULL;
99689+
99690+ return s->name;
99691+}
99692+#endif
99693+
99694 size_t ksize(const void *object)
99695 {
99696 struct page *page;
99697@@ -3336,6 +3400,7 @@ void kfree(const void *x)
99698 if (unlikely(ZERO_OR_NULL_PTR(x)))
99699 return;
99700
99701+ VM_BUG_ON(!virt_addr_valid(x));
99702 page = virt_to_head_page(x);
99703 if (unlikely(!PageSlab(page))) {
99704 BUG_ON(!PageCompound(page));
99705@@ -3631,7 +3696,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99706 int i;
99707 struct kmem_cache *c;
99708
99709- s->refcount++;
99710+ atomic_inc(&s->refcount);
99711
99712 /*
99713 * Adjust the object sizes so that we clear
99714@@ -3650,7 +3715,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99715 }
99716
99717 if (sysfs_slab_alias(s, name)) {
99718- s->refcount--;
99719+ atomic_dec(&s->refcount);
99720 s = NULL;
99721 }
99722 }
99723@@ -3767,7 +3832,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
99724 }
99725 #endif
99726
99727-#ifdef CONFIG_SYSFS
99728+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99729 static int count_inuse(struct page *page)
99730 {
99731 return page->inuse;
99732@@ -4048,7 +4113,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
99733 len += sprintf(buf + len, "%7ld ", l->count);
99734
99735 if (l->addr)
99736+#ifdef CONFIG_GRKERNSEC_HIDESYM
99737+ len += sprintf(buf + len, "%pS", NULL);
99738+#else
99739 len += sprintf(buf + len, "%pS", (void *)l->addr);
99740+#endif
99741 else
99742 len += sprintf(buf + len, "<not-available>");
99743
99744@@ -4150,12 +4219,12 @@ static void __init resiliency_test(void)
99745 validate_slab_cache(kmalloc_caches[9]);
99746 }
99747 #else
99748-#ifdef CONFIG_SYSFS
99749+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99750 static void resiliency_test(void) {};
99751 #endif
99752 #endif
99753
99754-#ifdef CONFIG_SYSFS
99755+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99756 enum slab_stat_type {
99757 SL_ALL, /* All slabs */
99758 SL_PARTIAL, /* Only partially allocated slabs */
99759@@ -4392,13 +4461,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
99760 {
99761 if (!s->ctor)
99762 return 0;
99763+#ifdef CONFIG_GRKERNSEC_HIDESYM
99764+ return sprintf(buf, "%pS\n", NULL);
99765+#else
99766 return sprintf(buf, "%pS\n", s->ctor);
99767+#endif
99768 }
99769 SLAB_ATTR_RO(ctor);
99770
99771 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99772 {
99773- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
99774+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
99775 }
99776 SLAB_ATTR_RO(aliases);
99777
99778@@ -4486,6 +4559,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
99779 SLAB_ATTR_RO(cache_dma);
99780 #endif
99781
99782+#ifdef CONFIG_PAX_USERCOPY_SLABS
99783+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
99784+{
99785+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
99786+}
99787+SLAB_ATTR_RO(usercopy);
99788+#endif
99789+
99790+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99791+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
99792+{
99793+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
99794+}
99795+SLAB_ATTR_RO(sanitize);
99796+#endif
99797+
99798 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
99799 {
99800 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
99801@@ -4541,7 +4630,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
99802 * as well as cause other issues like converting a mergeable
99803 * cache into an umergeable one.
99804 */
99805- if (s->refcount > 1)
99806+ if (atomic_read(&s->refcount) > 1)
99807 return -EINVAL;
99808
99809 s->flags &= ~SLAB_TRACE;
99810@@ -4661,7 +4750,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
99811 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
99812 size_t length)
99813 {
99814- if (s->refcount > 1)
99815+ if (atomic_read(&s->refcount) > 1)
99816 return -EINVAL;
99817
99818 s->flags &= ~SLAB_FAILSLAB;
99819@@ -4831,6 +4920,12 @@ static struct attribute *slab_attrs[] = {
99820 #ifdef CONFIG_ZONE_DMA
99821 &cache_dma_attr.attr,
99822 #endif
99823+#ifdef CONFIG_PAX_USERCOPY_SLABS
99824+ &usercopy_attr.attr,
99825+#endif
99826+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99827+ &sanitize_attr.attr,
99828+#endif
99829 #ifdef CONFIG_NUMA
99830 &remote_node_defrag_ratio_attr.attr,
99831 #endif
99832@@ -5075,6 +5170,7 @@ static char *create_unique_id(struct kmem_cache *s)
99833 return name;
99834 }
99835
99836+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99837 static int sysfs_slab_add(struct kmem_cache *s)
99838 {
99839 int err;
99840@@ -5148,6 +5244,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
99841 kobject_del(&s->kobj);
99842 kobject_put(&s->kobj);
99843 }
99844+#endif
99845
99846 /*
99847 * Need to buffer aliases during bootup until sysfs becomes
99848@@ -5161,6 +5258,7 @@ struct saved_alias {
99849
99850 static struct saved_alias *alias_list;
99851
99852+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99853 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99854 {
99855 struct saved_alias *al;
99856@@ -5183,6 +5281,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99857 alias_list = al;
99858 return 0;
99859 }
99860+#endif
99861
99862 static int __init slab_sysfs_init(void)
99863 {
99864diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
99865index 4cba9c2..b4f9fcc 100644
99866--- a/mm/sparse-vmemmap.c
99867+++ b/mm/sparse-vmemmap.c
99868@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
99869 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
99870 if (!p)
99871 return NULL;
99872- pud_populate(&init_mm, pud, p);
99873+ pud_populate_kernel(&init_mm, pud, p);
99874 }
99875 return pud;
99876 }
99877@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
99878 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
99879 if (!p)
99880 return NULL;
99881- pgd_populate(&init_mm, pgd, p);
99882+ pgd_populate_kernel(&init_mm, pgd, p);
99883 }
99884 return pgd;
99885 }
99886diff --git a/mm/sparse.c b/mm/sparse.c
99887index d1b48b6..6e8590e 100644
99888--- a/mm/sparse.c
99889+++ b/mm/sparse.c
99890@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
99891
99892 for (i = 0; i < PAGES_PER_SECTION; i++) {
99893 if (PageHWPoison(&memmap[i])) {
99894- atomic_long_sub(1, &num_poisoned_pages);
99895+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
99896 ClearPageHWPoison(&memmap[i]);
99897 }
99898 }
99899diff --git a/mm/swap.c b/mm/swap.c
99900index 8a12b33..7068e78 100644
99901--- a/mm/swap.c
99902+++ b/mm/swap.c
99903@@ -31,6 +31,7 @@
99904 #include <linux/memcontrol.h>
99905 #include <linux/gfp.h>
99906 #include <linux/uio.h>
99907+#include <linux/hugetlb.h>
99908
99909 #include "internal.h"
99910
99911@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
99912
99913 __page_cache_release(page);
99914 dtor = get_compound_page_dtor(page);
99915+ if (!PageHuge(page))
99916+ BUG_ON(dtor != free_compound_page);
99917 (*dtor)(page);
99918 }
99919
99920diff --git a/mm/swapfile.c b/mm/swapfile.c
99921index 63f55cc..31874e6 100644
99922--- a/mm/swapfile.c
99923+++ b/mm/swapfile.c
99924@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
99925
99926 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
99927 /* Activity counter to indicate that a swapon or swapoff has occurred */
99928-static atomic_t proc_poll_event = ATOMIC_INIT(0);
99929+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
99930
99931 static inline unsigned char swap_count(unsigned char ent)
99932 {
99933@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
99934 spin_unlock(&swap_lock);
99935
99936 err = 0;
99937- atomic_inc(&proc_poll_event);
99938+ atomic_inc_unchecked(&proc_poll_event);
99939 wake_up_interruptible(&proc_poll_wait);
99940
99941 out_dput:
99942@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
99943
99944 poll_wait(file, &proc_poll_wait, wait);
99945
99946- if (seq->poll_event != atomic_read(&proc_poll_event)) {
99947- seq->poll_event = atomic_read(&proc_poll_event);
99948+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
99949+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
99950 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
99951 }
99952
99953@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
99954 return ret;
99955
99956 seq = file->private_data;
99957- seq->poll_event = atomic_read(&proc_poll_event);
99958+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
99959 return 0;
99960 }
99961
99962@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
99963 (frontswap_map) ? "FS" : "");
99964
99965 mutex_unlock(&swapon_mutex);
99966- atomic_inc(&proc_poll_event);
99967+ atomic_inc_unchecked(&proc_poll_event);
99968 wake_up_interruptible(&proc_poll_wait);
99969
99970 if (S_ISREG(inode->i_mode))
99971diff --git a/mm/util.c b/mm/util.c
99972index fec39d4..3e60325 100644
99973--- a/mm/util.c
99974+++ b/mm/util.c
99975@@ -195,6 +195,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
99976 void arch_pick_mmap_layout(struct mm_struct *mm)
99977 {
99978 mm->mmap_base = TASK_UNMAPPED_BASE;
99979+
99980+#ifdef CONFIG_PAX_RANDMMAP
99981+ if (mm->pax_flags & MF_PAX_RANDMMAP)
99982+ mm->mmap_base += mm->delta_mmap;
99983+#endif
99984+
99985 mm->get_unmapped_area = arch_get_unmapped_area;
99986 }
99987 #endif
99988@@ -371,6 +377,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
99989 if (!mm->arg_end)
99990 goto out_mm; /* Shh! No looking before we're done */
99991
99992+ if (gr_acl_handle_procpidmem(task))
99993+ goto out_mm;
99994+
99995 len = mm->arg_end - mm->arg_start;
99996
99997 if (len > buflen)
99998diff --git a/mm/vmalloc.c b/mm/vmalloc.c
99999index 39c3388..7d976d4 100644
100000--- a/mm/vmalloc.c
100001+++ b/mm/vmalloc.c
100002@@ -39,20 +39,65 @@ struct vfree_deferred {
100003 struct work_struct wq;
100004 };
100005 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100006+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
100007+
100008+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100009+struct stack_deferred_llist {
100010+ struct llist_head list;
100011+ void *stack;
100012+ void *lowmem_stack;
100013+};
100014+
100015+struct stack_deferred {
100016+ struct stack_deferred_llist list;
100017+ struct work_struct wq;
100018+};
100019+
100020+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100021+#endif
100022
100023 static void __vunmap(const void *, int);
100024
100025-static void free_work(struct work_struct *w)
100026+static void vfree_work(struct work_struct *w)
100027+{
100028+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100029+ struct llist_node *llnode = llist_del_all(&p->list);
100030+ while (llnode) {
100031+ void *x = llnode;
100032+ llnode = llist_next(llnode);
100033+ __vunmap(x, 1);
100034+ }
100035+}
100036+
100037+static void vunmap_work(struct work_struct *w)
100038 {
100039 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100040 struct llist_node *llnode = llist_del_all(&p->list);
100041 while (llnode) {
100042 void *p = llnode;
100043 llnode = llist_next(llnode);
100044- __vunmap(p, 1);
100045+ __vunmap(p, 0);
100046 }
100047 }
100048
100049+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100050+static void unmap_work(struct work_struct *w)
100051+{
100052+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100053+ struct llist_node *llnode = llist_del_all(&p->list.list);
100054+ while (llnode) {
100055+ struct stack_deferred_llist *x =
100056+ llist_entry((struct llist_head *)llnode,
100057+ struct stack_deferred_llist, list);
100058+ void *stack = ACCESS_ONCE(x->stack);
100059+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100060+ llnode = llist_next(llnode);
100061+ __vunmap(stack, 0);
100062+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100063+ }
100064+}
100065+#endif
100066+
100067 /*** Page table manipulation functions ***/
100068
100069 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100070@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100071
100072 pte = pte_offset_kernel(pmd, addr);
100073 do {
100074- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100075- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100076+
100077+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100078+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100079+ BUG_ON(!pte_exec(*pte));
100080+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100081+ continue;
100082+ }
100083+#endif
100084+
100085+ {
100086+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100087+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100088+ }
100089 } while (pte++, addr += PAGE_SIZE, addr != end);
100090 }
100091
100092@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100093 pte = pte_alloc_kernel(pmd, addr);
100094 if (!pte)
100095 return -ENOMEM;
100096+
100097+ pax_open_kernel();
100098 do {
100099 struct page *page = pages[*nr];
100100
100101- if (WARN_ON(!pte_none(*pte)))
100102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100103+ if (pgprot_val(prot) & _PAGE_NX)
100104+#endif
100105+
100106+ if (!pte_none(*pte)) {
100107+ pax_close_kernel();
100108+ WARN_ON(1);
100109 return -EBUSY;
100110- if (WARN_ON(!page))
100111+ }
100112+ if (!page) {
100113+ pax_close_kernel();
100114+ WARN_ON(1);
100115 return -ENOMEM;
100116+ }
100117 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100118 (*nr)++;
100119 } while (pte++, addr += PAGE_SIZE, addr != end);
100120+ pax_close_kernel();
100121 return 0;
100122 }
100123
100124@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
100125 pmd_t *pmd;
100126 unsigned long next;
100127
100128- pmd = pmd_alloc(&init_mm, pud, addr);
100129+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
100130 if (!pmd)
100131 return -ENOMEM;
100132 do {
100133@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
100134 pud_t *pud;
100135 unsigned long next;
100136
100137- pud = pud_alloc(&init_mm, pgd, addr);
100138+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
100139 if (!pud)
100140 return -ENOMEM;
100141 do {
100142@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
100143 if (addr >= MODULES_VADDR && addr < MODULES_END)
100144 return 1;
100145 #endif
100146+
100147+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100148+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
100149+ return 1;
100150+#endif
100151+
100152 return is_vmalloc_addr(x);
100153 }
100154
100155@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
100156
100157 if (!pgd_none(*pgd)) {
100158 pud_t *pud = pud_offset(pgd, addr);
100159+#ifdef CONFIG_X86
100160+ if (!pud_large(*pud))
100161+#endif
100162 if (!pud_none(*pud)) {
100163 pmd_t *pmd = pmd_offset(pud, addr);
100164+#ifdef CONFIG_X86
100165+ if (!pmd_large(*pmd))
100166+#endif
100167 if (!pmd_none(*pmd)) {
100168 pte_t *ptep, pte;
100169
100170@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
100171 * Allocate a region of KVA of the specified size and alignment, within the
100172 * vstart and vend.
100173 */
100174-static struct vmap_area *alloc_vmap_area(unsigned long size,
100175+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
100176 unsigned long align,
100177 unsigned long vstart, unsigned long vend,
100178 int node, gfp_t gfp_mask)
100179@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
100180 for_each_possible_cpu(i) {
100181 struct vmap_block_queue *vbq;
100182 struct vfree_deferred *p;
100183+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100184+ struct stack_deferred *p2;
100185+#endif
100186
100187 vbq = &per_cpu(vmap_block_queue, i);
100188 spin_lock_init(&vbq->lock);
100189 INIT_LIST_HEAD(&vbq->free);
100190+
100191 p = &per_cpu(vfree_deferred, i);
100192 init_llist_head(&p->list);
100193- INIT_WORK(&p->wq, free_work);
100194+ INIT_WORK(&p->wq, vfree_work);
100195+
100196+ p = &per_cpu(vunmap_deferred, i);
100197+ init_llist_head(&p->list);
100198+ INIT_WORK(&p->wq, vunmap_work);
100199+
100200+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100201+ p2 = &per_cpu(stack_deferred, i);
100202+ init_llist_head(&p2->list.list);
100203+ INIT_WORK(&p2->wq, unmap_work);
100204+#endif
100205 }
100206
100207 /* Import existing vmlist entries. */
100208@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
100209 struct vm_struct *area;
100210
100211 BUG_ON(in_interrupt());
100212+
100213+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100214+ if (flags & VM_KERNEXEC) {
100215+ if (start != VMALLOC_START || end != VMALLOC_END)
100216+ return NULL;
100217+ start = (unsigned long)MODULES_EXEC_VADDR;
100218+ end = (unsigned long)MODULES_EXEC_END;
100219+ }
100220+#endif
100221+
100222 if (flags & VM_IOREMAP)
100223 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
100224
100225@@ -1511,13 +1616,37 @@ EXPORT_SYMBOL(vfree);
100226 */
100227 void vunmap(const void *addr)
100228 {
100229- BUG_ON(in_interrupt());
100230- might_sleep();
100231- if (addr)
100232+ if (!addr)
100233+ return;
100234+
100235+ if (unlikely(in_interrupt())) {
100236+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
100237+ if (llist_add((struct llist_node *)addr, &p->list))
100238+ schedule_work(&p->wq);
100239+ } else {
100240+ might_sleep();
100241 __vunmap(addr, 0);
100242+ }
100243 }
100244 EXPORT_SYMBOL(vunmap);
100245
100246+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100247+void unmap_process_stacks(struct task_struct *task)
100248+{
100249+ if (unlikely(in_interrupt())) {
100250+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
100251+ struct stack_deferred_llist *list = task->stack;
100252+ list->stack = task->stack;
100253+ list->lowmem_stack = task->lowmem_stack;
100254+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
100255+ schedule_work(&p->wq);
100256+ } else {
100257+ __vunmap(task->stack, 0);
100258+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
100259+ }
100260+}
100261+#endif
100262+
100263 /**
100264 * vmap - map an array of pages into virtually contiguous space
100265 * @pages: array of page pointers
100266@@ -1538,6 +1667,11 @@ void *vmap(struct page **pages, unsigned int count,
100267 if (count > totalram_pages)
100268 return NULL;
100269
100270+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100271+ if (!(pgprot_val(prot) & _PAGE_NX))
100272+ flags |= VM_KERNEXEC;
100273+#endif
100274+
100275 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
100276 __builtin_return_address(0));
100277 if (!area)
100278@@ -1640,6 +1774,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
100279 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
100280 goto fail;
100281
100282+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100283+ if (!(pgprot_val(prot) & _PAGE_NX))
100284+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
100285+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
100286+ else
100287+#endif
100288+
100289 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
100290 start, end, node, gfp_mask, caller);
100291 if (!area)
100292@@ -1816,10 +1957,9 @@ EXPORT_SYMBOL(vzalloc_node);
100293 * For tight control over page level allocator and protection flags
100294 * use __vmalloc() instead.
100295 */
100296-
100297 void *vmalloc_exec(unsigned long size)
100298 {
100299- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
100300+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
100301 NUMA_NO_NODE, __builtin_return_address(0));
100302 }
100303
100304@@ -2126,6 +2266,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
100305 {
100306 struct vm_struct *area;
100307
100308+ BUG_ON(vma->vm_mirror);
100309+
100310 size = PAGE_ALIGN(size);
100311
100312 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
100313@@ -2608,7 +2750,11 @@ static int s_show(struct seq_file *m, void *p)
100314 v->addr, v->addr + v->size, v->size);
100315
100316 if (v->caller)
100317+#ifdef CONFIG_GRKERNSEC_HIDESYM
100318+ seq_printf(m, " %pK", v->caller);
100319+#else
100320 seq_printf(m, " %pS", v->caller);
100321+#endif
100322
100323 if (v->nr_pages)
100324 seq_printf(m, " pages=%d", v->nr_pages);
100325diff --git a/mm/vmstat.c b/mm/vmstat.c
100326index cdac773..7dd324e 100644
100327--- a/mm/vmstat.c
100328+++ b/mm/vmstat.c
100329@@ -24,6 +24,7 @@
100330 #include <linux/mm_inline.h>
100331 #include <linux/page_ext.h>
100332 #include <linux/page_owner.h>
100333+#include <linux/grsecurity.h>
100334
100335 #include "internal.h"
100336
100337@@ -83,7 +84,7 @@ void vm_events_fold_cpu(int cpu)
100338 *
100339 * vm_stat contains the global counters
100340 */
100341-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100342+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100343 EXPORT_SYMBOL(vm_stat);
100344
100345 #ifdef CONFIG_SMP
100346@@ -435,7 +436,7 @@ static int fold_diff(int *diff)
100347
100348 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100349 if (diff[i]) {
100350- atomic_long_add(diff[i], &vm_stat[i]);
100351+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
100352 changes++;
100353 }
100354 return changes;
100355@@ -473,7 +474,7 @@ static int refresh_cpu_vm_stats(void)
100356 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
100357 if (v) {
100358
100359- atomic_long_add(v, &zone->vm_stat[i]);
100360+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100361 global_diff[i] += v;
100362 #ifdef CONFIG_NUMA
100363 /* 3 seconds idle till flush */
100364@@ -537,7 +538,7 @@ void cpu_vm_stats_fold(int cpu)
100365
100366 v = p->vm_stat_diff[i];
100367 p->vm_stat_diff[i] = 0;
100368- atomic_long_add(v, &zone->vm_stat[i]);
100369+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100370 global_diff[i] += v;
100371 }
100372 }
100373@@ -557,8 +558,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
100374 if (pset->vm_stat_diff[i]) {
100375 int v = pset->vm_stat_diff[i];
100376 pset->vm_stat_diff[i] = 0;
100377- atomic_long_add(v, &zone->vm_stat[i]);
100378- atomic_long_add(v, &vm_stat[i]);
100379+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100380+ atomic_long_add_unchecked(v, &vm_stat[i]);
100381 }
100382 }
100383 #endif
100384@@ -1291,10 +1292,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
100385 stat_items_size += sizeof(struct vm_event_state);
100386 #endif
100387
100388- v = kmalloc(stat_items_size, GFP_KERNEL);
100389+ v = kzalloc(stat_items_size, GFP_KERNEL);
100390 m->private = v;
100391 if (!v)
100392 return ERR_PTR(-ENOMEM);
100393+
100394+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100395+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
100396+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
100397+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
100398+ && !in_group_p(grsec_proc_gid)
100399+#endif
100400+ )
100401+ return (unsigned long *)m->private + *pos;
100402+#endif
100403+#endif
100404+
100405 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100406 v[i] = global_page_state(i);
100407 v += NR_VM_ZONE_STAT_ITEMS;
100408@@ -1526,10 +1539,16 @@ static int __init setup_vmstat(void)
100409 cpu_notifier_register_done();
100410 #endif
100411 #ifdef CONFIG_PROC_FS
100412- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
100413- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
100414- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100415- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
100416+ {
100417+ mode_t gr_mode = S_IRUGO;
100418+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100419+ gr_mode = S_IRUSR;
100420+#endif
100421+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
100422+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
100423+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100424+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
100425+ }
100426 #endif
100427 return 0;
100428 }
100429diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
100430index 64c6bed..b79a5de 100644
100431--- a/net/8021q/vlan.c
100432+++ b/net/8021q/vlan.c
100433@@ -481,7 +481,7 @@ out:
100434 return NOTIFY_DONE;
100435 }
100436
100437-static struct notifier_block vlan_notifier_block __read_mostly = {
100438+static struct notifier_block vlan_notifier_block = {
100439 .notifier_call = vlan_device_event,
100440 };
100441
100442@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
100443 err = -EPERM;
100444 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
100445 break;
100446- if ((args.u.name_type >= 0) &&
100447- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
100448+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
100449 struct vlan_net *vn;
100450
100451 vn = net_generic(net, vlan_net_id);
100452diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
100453index 8ac8a5c..991defc 100644
100454--- a/net/8021q/vlan_netlink.c
100455+++ b/net/8021q/vlan_netlink.c
100456@@ -238,7 +238,7 @@ nla_put_failure:
100457 return -EMSGSIZE;
100458 }
100459
100460-struct rtnl_link_ops vlan_link_ops __read_mostly = {
100461+struct rtnl_link_ops vlan_link_ops = {
100462 .kind = "vlan",
100463 .maxtype = IFLA_VLAN_MAX,
100464 .policy = vlan_policy,
100465diff --git a/net/9p/client.c b/net/9p/client.c
100466index e86a9bea..e91f70e 100644
100467--- a/net/9p/client.c
100468+++ b/net/9p/client.c
100469@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
100470 len - inline_len);
100471 } else {
100472 err = copy_from_user(ename + inline_len,
100473- uidata, len - inline_len);
100474+ (char __force_user *)uidata, len - inline_len);
100475 if (err) {
100476 err = -EFAULT;
100477 goto out_err;
100478@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
100479 kernel_buf = 1;
100480 indata = data;
100481 } else
100482- indata = (__force char *)udata;
100483+ indata = (__force_kernel char *)udata;
100484 /*
100485 * response header len is 11
100486 * PDU Header(7) + IO Size (4)
100487@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
100488 kernel_buf = 1;
100489 odata = data;
100490 } else
100491- odata = (char *)udata;
100492+ odata = (char __force_kernel *)udata;
100493 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
100494 P9_ZC_HDR_SZ, kernel_buf, "dqd",
100495 fid->fid, offset, rsize);
100496diff --git a/net/9p/mod.c b/net/9p/mod.c
100497index 6ab36ae..6f1841b 100644
100498--- a/net/9p/mod.c
100499+++ b/net/9p/mod.c
100500@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
100501 void v9fs_register_trans(struct p9_trans_module *m)
100502 {
100503 spin_lock(&v9fs_trans_lock);
100504- list_add_tail(&m->list, &v9fs_trans_list);
100505+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
100506 spin_unlock(&v9fs_trans_lock);
100507 }
100508 EXPORT_SYMBOL(v9fs_register_trans);
100509@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
100510 void v9fs_unregister_trans(struct p9_trans_module *m)
100511 {
100512 spin_lock(&v9fs_trans_lock);
100513- list_del_init(&m->list);
100514+ pax_list_del_init((struct list_head *)&m->list);
100515 spin_unlock(&v9fs_trans_lock);
100516 }
100517 EXPORT_SYMBOL(v9fs_unregister_trans);
100518diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
100519index 80d08f6..de63fd1 100644
100520--- a/net/9p/trans_fd.c
100521+++ b/net/9p/trans_fd.c
100522@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
100523 oldfs = get_fs();
100524 set_fs(get_ds());
100525 /* The cast to a user pointer is valid due to the set_fs() */
100526- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
100527+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
100528 set_fs(oldfs);
100529
100530 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
100531diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
100532index af46bc4..f9adfcd 100644
100533--- a/net/appletalk/atalk_proc.c
100534+++ b/net/appletalk/atalk_proc.c
100535@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
100536 struct proc_dir_entry *p;
100537 int rc = -ENOMEM;
100538
100539- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
100540+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
100541 if (!atalk_proc_dir)
100542 goto out;
100543
100544diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
100545index 876fbe8..8bbea9f 100644
100546--- a/net/atm/atm_misc.c
100547+++ b/net/atm/atm_misc.c
100548@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
100549 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
100550 return 1;
100551 atm_return(vcc, truesize);
100552- atomic_inc(&vcc->stats->rx_drop);
100553+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100554 return 0;
100555 }
100556 EXPORT_SYMBOL(atm_charge);
100557@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
100558 }
100559 }
100560 atm_return(vcc, guess);
100561- atomic_inc(&vcc->stats->rx_drop);
100562+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100563 return NULL;
100564 }
100565 EXPORT_SYMBOL(atm_alloc_charge);
100566@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
100567
100568 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100569 {
100570-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100571+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100572 __SONET_ITEMS
100573 #undef __HANDLE_ITEM
100574 }
100575@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
100576
100577 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100578 {
100579-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100580+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
100581 __SONET_ITEMS
100582 #undef __HANDLE_ITEM
100583 }
100584diff --git a/net/atm/lec.c b/net/atm/lec.c
100585index 4b98f89..5a2f6cb 100644
100586--- a/net/atm/lec.c
100587+++ b/net/atm/lec.c
100588@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
100589 }
100590
100591 static struct lane2_ops lane2_ops = {
100592- lane2_resolve, /* resolve, spec 3.1.3 */
100593- lane2_associate_req, /* associate_req, spec 3.1.4 */
100594- NULL /* associate indicator, spec 3.1.5 */
100595+ .resolve = lane2_resolve,
100596+ .associate_req = lane2_associate_req,
100597+ .associate_indicator = NULL
100598 };
100599
100600 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
100601diff --git a/net/atm/lec.h b/net/atm/lec.h
100602index 4149db1..f2ab682 100644
100603--- a/net/atm/lec.h
100604+++ b/net/atm/lec.h
100605@@ -48,7 +48,7 @@ struct lane2_ops {
100606 const u8 *tlvs, u32 sizeoftlvs);
100607 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
100608 const u8 *tlvs, u32 sizeoftlvs);
100609-};
100610+} __no_const;
100611
100612 /*
100613 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
100614diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
100615index d1b2d9a..d549f7f 100644
100616--- a/net/atm/mpoa_caches.c
100617+++ b/net/atm/mpoa_caches.c
100618@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
100619
100620
100621 static struct in_cache_ops ingress_ops = {
100622- in_cache_add_entry, /* add_entry */
100623- in_cache_get, /* get */
100624- in_cache_get_with_mask, /* get_with_mask */
100625- in_cache_get_by_vcc, /* get_by_vcc */
100626- in_cache_put, /* put */
100627- in_cache_remove_entry, /* remove_entry */
100628- cache_hit, /* cache_hit */
100629- clear_count_and_expired, /* clear_count */
100630- check_resolving_entries, /* check_resolving */
100631- refresh_entries, /* refresh */
100632- in_destroy_cache /* destroy_cache */
100633+ .add_entry = in_cache_add_entry,
100634+ .get = in_cache_get,
100635+ .get_with_mask = in_cache_get_with_mask,
100636+ .get_by_vcc = in_cache_get_by_vcc,
100637+ .put = in_cache_put,
100638+ .remove_entry = in_cache_remove_entry,
100639+ .cache_hit = cache_hit,
100640+ .clear_count = clear_count_and_expired,
100641+ .check_resolving = check_resolving_entries,
100642+ .refresh = refresh_entries,
100643+ .destroy_cache = in_destroy_cache
100644 };
100645
100646 static struct eg_cache_ops egress_ops = {
100647- eg_cache_add_entry, /* add_entry */
100648- eg_cache_get_by_cache_id, /* get_by_cache_id */
100649- eg_cache_get_by_tag, /* get_by_tag */
100650- eg_cache_get_by_vcc, /* get_by_vcc */
100651- eg_cache_get_by_src_ip, /* get_by_src_ip */
100652- eg_cache_put, /* put */
100653- eg_cache_remove_entry, /* remove_entry */
100654- update_eg_cache_entry, /* update */
100655- clear_expired, /* clear_expired */
100656- eg_destroy_cache /* destroy_cache */
100657+ .add_entry = eg_cache_add_entry,
100658+ .get_by_cache_id = eg_cache_get_by_cache_id,
100659+ .get_by_tag = eg_cache_get_by_tag,
100660+ .get_by_vcc = eg_cache_get_by_vcc,
100661+ .get_by_src_ip = eg_cache_get_by_src_ip,
100662+ .put = eg_cache_put,
100663+ .remove_entry = eg_cache_remove_entry,
100664+ .update = update_eg_cache_entry,
100665+ .clear_expired = clear_expired,
100666+ .destroy_cache = eg_destroy_cache
100667 };
100668
100669
100670diff --git a/net/atm/proc.c b/net/atm/proc.c
100671index bbb6461..cf04016 100644
100672--- a/net/atm/proc.c
100673+++ b/net/atm/proc.c
100674@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
100675 const struct k_atm_aal_stats *stats)
100676 {
100677 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
100678- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
100679- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
100680- atomic_read(&stats->rx_drop));
100681+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
100682+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
100683+ atomic_read_unchecked(&stats->rx_drop));
100684 }
100685
100686 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
100687diff --git a/net/atm/resources.c b/net/atm/resources.c
100688index 0447d5d..3cf4728 100644
100689--- a/net/atm/resources.c
100690+++ b/net/atm/resources.c
100691@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
100692 static void copy_aal_stats(struct k_atm_aal_stats *from,
100693 struct atm_aal_stats *to)
100694 {
100695-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100696+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100697 __AAL_STAT_ITEMS
100698 #undef __HANDLE_ITEM
100699 }
100700@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
100701 static void subtract_aal_stats(struct k_atm_aal_stats *from,
100702 struct atm_aal_stats *to)
100703 {
100704-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100705+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
100706 __AAL_STAT_ITEMS
100707 #undef __HANDLE_ITEM
100708 }
100709diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
100710index 919a5ce..cc6b444 100644
100711--- a/net/ax25/sysctl_net_ax25.c
100712+++ b/net/ax25/sysctl_net_ax25.c
100713@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
100714 {
100715 char path[sizeof("net/ax25/") + IFNAMSIZ];
100716 int k;
100717- struct ctl_table *table;
100718+ ctl_table_no_const *table;
100719
100720 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
100721 if (!table)
100722diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
100723index 1e80539..676c37a 100644
100724--- a/net/batman-adv/bat_iv_ogm.c
100725+++ b/net/batman-adv/bat_iv_ogm.c
100726@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
100727
100728 /* randomize initial seqno to avoid collision */
100729 get_random_bytes(&random_seqno, sizeof(random_seqno));
100730- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100731+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100732
100733 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
100734 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
100735@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
100736 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
100737
100738 /* change sequence number to network order */
100739- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
100740+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
100741 batadv_ogm_packet->seqno = htonl(seqno);
100742- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
100743+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
100744
100745 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
100746
100747@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
100748 return;
100749
100750 /* could be changed by schedule_own_packet() */
100751- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
100752+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
100753
100754 if (ogm_packet->flags & BATADV_DIRECTLINK)
100755 has_directlink_flag = true;
100756diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
100757index 00f9e14..e1c7203 100644
100758--- a/net/batman-adv/fragmentation.c
100759+++ b/net/batman-adv/fragmentation.c
100760@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
100761 frag_header.packet_type = BATADV_UNICAST_FRAG;
100762 frag_header.version = BATADV_COMPAT_VERSION;
100763 frag_header.ttl = BATADV_TTL;
100764- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
100765+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
100766 frag_header.reserved = 0;
100767 frag_header.no = 0;
100768 frag_header.total_size = htons(skb->len);
100769diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
100770index 5467955..75ad4e3 100644
100771--- a/net/batman-adv/soft-interface.c
100772+++ b/net/batman-adv/soft-interface.c
100773@@ -296,7 +296,7 @@ send:
100774 primary_if->net_dev->dev_addr);
100775
100776 /* set broadcast sequence number */
100777- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
100778+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
100779 bcast_packet->seqno = htonl(seqno);
100780
100781 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
100782@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100783 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
100784
100785 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
100786- atomic_set(&bat_priv->bcast_seqno, 1);
100787+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
100788 atomic_set(&bat_priv->tt.vn, 0);
100789 atomic_set(&bat_priv->tt.local_changes, 0);
100790 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
100791@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100792
100793 /* randomize initial seqno to avoid collision */
100794 get_random_bytes(&random_seqno, sizeof(random_seqno));
100795- atomic_set(&bat_priv->frag_seqno, random_seqno);
100796+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
100797
100798 bat_priv->primary_if = NULL;
100799 bat_priv->num_ifaces = 0;
100800@@ -983,7 +983,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
100801 return 0;
100802 }
100803
100804-struct rtnl_link_ops batadv_link_ops __read_mostly = {
100805+struct rtnl_link_ops batadv_link_ops = {
100806 .kind = "batadv",
100807 .priv_size = sizeof(struct batadv_priv),
100808 .setup = batadv_softif_init_early,
100809diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
100810index 8854c05..ee5d5497 100644
100811--- a/net/batman-adv/types.h
100812+++ b/net/batman-adv/types.h
100813@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
100814 struct batadv_hard_iface_bat_iv {
100815 unsigned char *ogm_buff;
100816 int ogm_buff_len;
100817- atomic_t ogm_seqno;
100818+ atomic_unchecked_t ogm_seqno;
100819 };
100820
100821 /**
100822@@ -768,7 +768,7 @@ struct batadv_priv {
100823 atomic_t bonding;
100824 atomic_t fragmentation;
100825 atomic_t packet_size_max;
100826- atomic_t frag_seqno;
100827+ atomic_unchecked_t frag_seqno;
100828 #ifdef CONFIG_BATMAN_ADV_BLA
100829 atomic_t bridge_loop_avoidance;
100830 #endif
100831@@ -787,7 +787,7 @@ struct batadv_priv {
100832 #endif
100833 uint32_t isolation_mark;
100834 uint32_t isolation_mark_mask;
100835- atomic_t bcast_seqno;
100836+ atomic_unchecked_t bcast_seqno;
100837 atomic_t bcast_queue_left;
100838 atomic_t batman_queue_left;
100839 char num_ifaces;
100840diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
100841index 2c245fd..dccf543 100644
100842--- a/net/bluetooth/hci_sock.c
100843+++ b/net/bluetooth/hci_sock.c
100844@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
100845 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
100846 }
100847
100848- len = min_t(unsigned int, len, sizeof(uf));
100849+ len = min((size_t)len, sizeof(uf));
100850 if (copy_from_user(&uf, optval, len)) {
100851 err = -EFAULT;
100852 break;
100853diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
100854index d04dc00..d25d576 100644
100855--- a/net/bluetooth/l2cap_core.c
100856+++ b/net/bluetooth/l2cap_core.c
100857@@ -3524,8 +3524,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
100858 break;
100859
100860 case L2CAP_CONF_RFC:
100861- if (olen == sizeof(rfc))
100862- memcpy(&rfc, (void *)val, olen);
100863+ if (olen != sizeof(rfc))
100864+ break;
100865+
100866+ memcpy(&rfc, (void *)val, olen);
100867
100868 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
100869 rfc.mode != chan->mode)
100870diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
100871index f65caf4..c07110c 100644
100872--- a/net/bluetooth/l2cap_sock.c
100873+++ b/net/bluetooth/l2cap_sock.c
100874@@ -634,7 +634,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100875 struct sock *sk = sock->sk;
100876 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
100877 struct l2cap_options opts;
100878- int len, err = 0;
100879+ int err = 0;
100880+ size_t len = optlen;
100881 u32 opt;
100882
100883 BT_DBG("sk %p", sk);
100884@@ -661,7 +662,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100885 opts.max_tx = chan->max_tx;
100886 opts.txwin_size = chan->tx_win;
100887
100888- len = min_t(unsigned int, sizeof(opts), optlen);
100889+ len = min(sizeof(opts), len);
100890 if (copy_from_user((char *) &opts, optval, len)) {
100891 err = -EFAULT;
100892 break;
100893@@ -748,7 +749,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100894 struct bt_security sec;
100895 struct bt_power pwr;
100896 struct l2cap_conn *conn;
100897- int len, err = 0;
100898+ int err = 0;
100899+ size_t len = optlen;
100900 u32 opt;
100901
100902 BT_DBG("sk %p", sk);
100903@@ -772,7 +774,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100904
100905 sec.level = BT_SECURITY_LOW;
100906
100907- len = min_t(unsigned int, sizeof(sec), optlen);
100908+ len = min(sizeof(sec), len);
100909 if (copy_from_user((char *) &sec, optval, len)) {
100910 err = -EFAULT;
100911 break;
100912@@ -868,7 +870,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100913
100914 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
100915
100916- len = min_t(unsigned int, sizeof(pwr), optlen);
100917+ len = min(sizeof(pwr), len);
100918 if (copy_from_user((char *) &pwr, optval, len)) {
100919 err = -EFAULT;
100920 break;
100921diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
100922index 2348176..b9b6cf2 100644
100923--- a/net/bluetooth/rfcomm/sock.c
100924+++ b/net/bluetooth/rfcomm/sock.c
100925@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
100926 struct sock *sk = sock->sk;
100927 struct bt_security sec;
100928 int err = 0;
100929- size_t len;
100930+ size_t len = optlen;
100931 u32 opt;
100932
100933 BT_DBG("sk %p", sk);
100934@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
100935
100936 sec.level = BT_SECURITY_LOW;
100937
100938- len = min_t(unsigned int, sizeof(sec), optlen);
100939+ len = min(sizeof(sec), len);
100940 if (copy_from_user((char *) &sec, optval, len)) {
100941 err = -EFAULT;
100942 break;
100943diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
100944index 8e385a0..a5bdd8e 100644
100945--- a/net/bluetooth/rfcomm/tty.c
100946+++ b/net/bluetooth/rfcomm/tty.c
100947@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
100948 BT_DBG("tty %p id %d", tty, tty->index);
100949
100950 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
100951- dev->channel, dev->port.count);
100952+ dev->channel, atomic_read(&dev->port.count));
100953
100954 err = tty_port_open(&dev->port, tty, filp);
100955 if (err)
100956@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
100957 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
100958
100959 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
100960- dev->port.count);
100961+ atomic_read(&dev->port.count));
100962
100963 tty_port_close(&dev->port, tty, filp);
100964 }
100965diff --git a/net/bridge/br.c b/net/bridge/br.c
100966index 44425af..4ee730e 100644
100967--- a/net/bridge/br.c
100968+++ b/net/bridge/br.c
100969@@ -147,6 +147,8 @@ static int __init br_init(void)
100970 {
100971 int err;
100972
100973+ BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
100974+
100975 err = stp_proto_register(&br_stp_proto);
100976 if (err < 0) {
100977 pr_err("bridge: can't register sap for STP\n");
100978diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
100979index 9f5eb55..45ab9c5 100644
100980--- a/net/bridge/br_netlink.c
100981+++ b/net/bridge/br_netlink.c
100982@@ -566,7 +566,7 @@ static struct rtnl_af_ops br_af_ops = {
100983 .get_link_af_size = br_get_link_af_size,
100984 };
100985
100986-struct rtnl_link_ops br_link_ops __read_mostly = {
100987+struct rtnl_link_ops br_link_ops = {
100988 .kind = "bridge",
100989 .priv_size = sizeof(struct net_bridge),
100990 .setup = br_dev_setup,
100991diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
100992index d9a8c05..8dadc6c6 100644
100993--- a/net/bridge/netfilter/ebtables.c
100994+++ b/net/bridge/netfilter/ebtables.c
100995@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
100996 tmp.valid_hooks = t->table->valid_hooks;
100997 }
100998 mutex_unlock(&ebt_mutex);
100999- if (copy_to_user(user, &tmp, *len) != 0) {
101000+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101001 BUGPRINT("c2u Didn't work\n");
101002 ret = -EFAULT;
101003 break;
101004@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101005 goto out;
101006 tmp.valid_hooks = t->valid_hooks;
101007
101008- if (copy_to_user(user, &tmp, *len) != 0) {
101009+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101010 ret = -EFAULT;
101011 break;
101012 }
101013@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101014 tmp.entries_size = t->table->entries_size;
101015 tmp.valid_hooks = t->table->valid_hooks;
101016
101017- if (copy_to_user(user, &tmp, *len) != 0) {
101018+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101019 ret = -EFAULT;
101020 break;
101021 }
101022diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101023index f5afda1..dcf770a 100644
101024--- a/net/caif/cfctrl.c
101025+++ b/net/caif/cfctrl.c
101026@@ -10,6 +10,7 @@
101027 #include <linux/spinlock.h>
101028 #include <linux/slab.h>
101029 #include <linux/pkt_sched.h>
101030+#include <linux/sched.h>
101031 #include <net/caif/caif_layer.h>
101032 #include <net/caif/cfpkt.h>
101033 #include <net/caif/cfctrl.h>
101034@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101035 memset(&dev_info, 0, sizeof(dev_info));
101036 dev_info.id = 0xff;
101037 cfsrvl_init(&this->serv, 0, &dev_info, false);
101038- atomic_set(&this->req_seq_no, 1);
101039- atomic_set(&this->rsp_seq_no, 1);
101040+ atomic_set_unchecked(&this->req_seq_no, 1);
101041+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101042 this->serv.layer.receive = cfctrl_recv;
101043 sprintf(this->serv.layer.name, "ctrl");
101044 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101045@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101046 struct cfctrl_request_info *req)
101047 {
101048 spin_lock_bh(&ctrl->info_list_lock);
101049- atomic_inc(&ctrl->req_seq_no);
101050- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101051+ atomic_inc_unchecked(&ctrl->req_seq_no);
101052+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101053 list_add_tail(&req->list, &ctrl->list);
101054 spin_unlock_bh(&ctrl->info_list_lock);
101055 }
101056@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101057 if (p != first)
101058 pr_warn("Requests are not received in order\n");
101059
101060- atomic_set(&ctrl->rsp_seq_no,
101061+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101062 p->sequence_no);
101063 list_del(&p->list);
101064 goto out;
101065diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
101066index 67a4a36..8d28068 100644
101067--- a/net/caif/chnl_net.c
101068+++ b/net/caif/chnl_net.c
101069@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
101070 };
101071
101072
101073-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
101074+static struct rtnl_link_ops ipcaif_link_ops = {
101075 .kind = "caif",
101076 .priv_size = sizeof(struct chnl_net),
101077 .setup = ipcaif_net_setup,
101078diff --git a/net/can/af_can.c b/net/can/af_can.c
101079index 32d710e..93bcf05 100644
101080--- a/net/can/af_can.c
101081+++ b/net/can/af_can.c
101082@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101083 };
101084
101085 /* notifier block for netdevice event */
101086-static struct notifier_block can_netdev_notifier __read_mostly = {
101087+static struct notifier_block can_netdev_notifier = {
101088 .notifier_call = can_notifier,
101089 };
101090
101091diff --git a/net/can/bcm.c b/net/can/bcm.c
101092index ee9ffd9..dfdf3d4 100644
101093--- a/net/can/bcm.c
101094+++ b/net/can/bcm.c
101095@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
101096 }
101097
101098 /* create /proc/net/can-bcm directory */
101099- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101100+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101101 return 0;
101102 }
101103
101104diff --git a/net/can/gw.c b/net/can/gw.c
101105index 295f62e..0c3b09e 100644
101106--- a/net/can/gw.c
101107+++ b/net/can/gw.c
101108@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101109 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101110
101111 static HLIST_HEAD(cgw_list);
101112-static struct notifier_block notifier;
101113
101114 static struct kmem_cache *cgw_cache __read_mostly;
101115
101116@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101117 return err;
101118 }
101119
101120+static struct notifier_block notifier = {
101121+ .notifier_call = cgw_notifier
101122+};
101123+
101124 static __init int cgw_module_init(void)
101125 {
101126 /* sanitize given module parameter */
101127@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
101128 return -ENOMEM;
101129
101130 /* set notifier */
101131- notifier.notifier_call = cgw_notifier;
101132 register_netdevice_notifier(&notifier);
101133
101134 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101135diff --git a/net/can/proc.c b/net/can/proc.c
101136index 1a19b98..df2b4ec 100644
101137--- a/net/can/proc.c
101138+++ b/net/can/proc.c
101139@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101140 void can_init_proc(void)
101141 {
101142 /* create /proc/net/can directory */
101143- can_dir = proc_mkdir("can", init_net.proc_net);
101144+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101145
101146 if (!can_dir) {
101147 printk(KERN_INFO "can: failed to create /proc/net/can . "
101148diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
101149index 33a2f20..371bd09 100644
101150--- a/net/ceph/messenger.c
101151+++ b/net/ceph/messenger.c
101152@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
101153 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
101154
101155 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
101156-static atomic_t addr_str_seq = ATOMIC_INIT(0);
101157+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
101158
101159 static struct page *zero_page; /* used in certain error cases */
101160
101161@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
101162 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
101163 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
101164
101165- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101166+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101167 s = addr_str[i];
101168
101169 switch (ss->ss_family) {
101170diff --git a/net/compat.c b/net/compat.c
101171index f7bd286..76ea56a 100644
101172--- a/net/compat.c
101173+++ b/net/compat.c
101174@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
101175
101176 #define CMSG_COMPAT_FIRSTHDR(msg) \
101177 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
101178- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
101179+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
101180 (struct compat_cmsghdr __user *)NULL)
101181
101182 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
101183 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
101184 (ucmlen) <= (unsigned long) \
101185 ((mhdr)->msg_controllen - \
101186- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
101187+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
101188
101189 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
101190 struct compat_cmsghdr __user *cmsg, int cmsg_len)
101191 {
101192 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
101193- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
101194+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
101195 msg->msg_controllen)
101196 return NULL;
101197 return (struct compat_cmsghdr __user *)ptr;
101198@@ -203,7 +203,7 @@ Efault:
101199
101200 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
101201 {
101202- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101203+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101204 struct compat_cmsghdr cmhdr;
101205 struct compat_timeval ctv;
101206 struct compat_timespec cts[3];
101207@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
101208
101209 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
101210 {
101211- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101212+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101213 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
101214 int fdnum = scm->fp->count;
101215 struct file **fp = scm->fp->fp;
101216@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
101217 return -EFAULT;
101218 old_fs = get_fs();
101219 set_fs(KERNEL_DS);
101220- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
101221+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
101222 set_fs(old_fs);
101223
101224 return err;
101225@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
101226 len = sizeof(ktime);
101227 old_fs = get_fs();
101228 set_fs(KERNEL_DS);
101229- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
101230+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
101231 set_fs(old_fs);
101232
101233 if (!err) {
101234@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101235 case MCAST_JOIN_GROUP:
101236 case MCAST_LEAVE_GROUP:
101237 {
101238- struct compat_group_req __user *gr32 = (void *)optval;
101239+ struct compat_group_req __user *gr32 = (void __user *)optval;
101240 struct group_req __user *kgr =
101241 compat_alloc_user_space(sizeof(struct group_req));
101242 u32 interface;
101243@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101244 case MCAST_BLOCK_SOURCE:
101245 case MCAST_UNBLOCK_SOURCE:
101246 {
101247- struct compat_group_source_req __user *gsr32 = (void *)optval;
101248+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
101249 struct group_source_req __user *kgsr = compat_alloc_user_space(
101250 sizeof(struct group_source_req));
101251 u32 interface;
101252@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101253 }
101254 case MCAST_MSFILTER:
101255 {
101256- struct compat_group_filter __user *gf32 = (void *)optval;
101257+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101258 struct group_filter __user *kgf;
101259 u32 interface, fmode, numsrc;
101260
101261@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
101262 char __user *optval, int __user *optlen,
101263 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
101264 {
101265- struct compat_group_filter __user *gf32 = (void *)optval;
101266+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101267 struct group_filter __user *kgf;
101268 int __user *koptlen;
101269 u32 interface, fmode, numsrc;
101270@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
101271
101272 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
101273 return -EINVAL;
101274- if (copy_from_user(a, args, nas[call]))
101275+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
101276 return -EFAULT;
101277 a0 = a[0];
101278 a1 = a[1];
101279diff --git a/net/core/datagram.c b/net/core/datagram.c
101280index df493d6..1145766 100644
101281--- a/net/core/datagram.c
101282+++ b/net/core/datagram.c
101283@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
101284 }
101285
101286 kfree_skb(skb);
101287- atomic_inc(&sk->sk_drops);
101288+ atomic_inc_unchecked(&sk->sk_drops);
101289 sk_mem_reclaim_partial(sk);
101290
101291 return err;
101292diff --git a/net/core/dev.c b/net/core/dev.c
101293index 4ff46f8..e877e78 100644
101294--- a/net/core/dev.c
101295+++ b/net/core/dev.c
101296@@ -1680,14 +1680,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
101297 {
101298 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
101299 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
101300- atomic_long_inc(&dev->rx_dropped);
101301+ atomic_long_inc_unchecked(&dev->rx_dropped);
101302 kfree_skb(skb);
101303 return NET_RX_DROP;
101304 }
101305 }
101306
101307 if (unlikely(!is_skb_forwardable(dev, skb))) {
101308- atomic_long_inc(&dev->rx_dropped);
101309+ atomic_long_inc_unchecked(&dev->rx_dropped);
101310 kfree_skb(skb);
101311 return NET_RX_DROP;
101312 }
101313@@ -2958,7 +2958,7 @@ recursion_alert:
101314 drop:
101315 rcu_read_unlock_bh();
101316
101317- atomic_long_inc(&dev->tx_dropped);
101318+ atomic_long_inc_unchecked(&dev->tx_dropped);
101319 kfree_skb_list(skb);
101320 return rc;
101321 out:
101322@@ -3301,7 +3301,7 @@ enqueue:
101323
101324 local_irq_restore(flags);
101325
101326- atomic_long_inc(&skb->dev->rx_dropped);
101327+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101328 kfree_skb(skb);
101329 return NET_RX_DROP;
101330 }
101331@@ -3378,7 +3378,7 @@ int netif_rx_ni(struct sk_buff *skb)
101332 }
101333 EXPORT_SYMBOL(netif_rx_ni);
101334
101335-static void net_tx_action(struct softirq_action *h)
101336+static __latent_entropy void net_tx_action(void)
101337 {
101338 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101339
101340@@ -3711,7 +3711,7 @@ ncls:
101341 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
101342 } else {
101343 drop:
101344- atomic_long_inc(&skb->dev->rx_dropped);
101345+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101346 kfree_skb(skb);
101347 /* Jamal, now you will not able to escape explaining
101348 * me how you were going to use this. :-)
101349@@ -4599,7 +4599,7 @@ out_unlock:
101350 return work;
101351 }
101352
101353-static void net_rx_action(struct softirq_action *h)
101354+static __latent_entropy void net_rx_action(void)
101355 {
101356 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101357 unsigned long time_limit = jiffies + 2;
101358@@ -6610,8 +6610,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
101359 } else {
101360 netdev_stats_to_stats64(storage, &dev->stats);
101361 }
101362- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
101363- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
101364+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
101365+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
101366 return storage;
101367 }
101368 EXPORT_SYMBOL(dev_get_stats);
101369diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
101370index b94b1d2..da3ed7c 100644
101371--- a/net/core/dev_ioctl.c
101372+++ b/net/core/dev_ioctl.c
101373@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
101374 no_module = !dev;
101375 if (no_module && capable(CAP_NET_ADMIN))
101376 no_module = request_module("netdev-%s", name);
101377- if (no_module && capable(CAP_SYS_MODULE))
101378+ if (no_module && capable(CAP_SYS_MODULE)) {
101379+#ifdef CONFIG_GRKERNSEC_MODHARDEN
101380+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
101381+#else
101382 request_module("%s", name);
101383+#endif
101384+ }
101385 }
101386 EXPORT_SYMBOL(dev_load);
101387
101388diff --git a/net/core/filter.c b/net/core/filter.c
101389index ec9baea..dd6195d 100644
101390--- a/net/core/filter.c
101391+++ b/net/core/filter.c
101392@@ -533,7 +533,11 @@ do_pass:
101393
101394 /* Unkown instruction. */
101395 default:
101396- goto err;
101397+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
101398+ fp->code, fp->jt, fp->jf, fp->k);
101399+ kfree(addrs);
101400+ BUG();
101401+ return -EINVAL;
101402 }
101403
101404 insn++;
101405@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
101406 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
101407 int pc, ret = 0;
101408
101409- BUILD_BUG_ON(BPF_MEMWORDS > 16);
101410+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
101411
101412 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
101413 if (!masks)
101414@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
101415 if (!fp)
101416 return -ENOMEM;
101417
101418- memcpy(fp->insns, fprog->filter, fsize);
101419+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
101420
101421 fp->len = fprog->len;
101422 /* Since unattached filters are not copied back to user
101423diff --git a/net/core/flow.c b/net/core/flow.c
101424index 1033725..340f65d 100644
101425--- a/net/core/flow.c
101426+++ b/net/core/flow.c
101427@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
101428 static int flow_entry_valid(struct flow_cache_entry *fle,
101429 struct netns_xfrm *xfrm)
101430 {
101431- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
101432+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
101433 return 0;
101434 if (fle->object && !fle->object->ops->check(fle->object))
101435 return 0;
101436@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
101437 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
101438 fcp->hash_count++;
101439 }
101440- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
101441+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
101442 flo = fle->object;
101443 if (!flo)
101444 goto ret_object;
101445@@ -263,7 +263,7 @@ nocache:
101446 }
101447 flo = resolver(net, key, family, dir, flo, ctx);
101448 if (fle) {
101449- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
101450+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
101451 if (!IS_ERR(flo))
101452 fle->object = flo;
101453 else
101454diff --git a/net/core/neighbour.c b/net/core/neighbour.c
101455index 8d614c9..55752ea 100644
101456--- a/net/core/neighbour.c
101457+++ b/net/core/neighbour.c
101458@@ -2802,7 +2802,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
101459 void __user *buffer, size_t *lenp, loff_t *ppos)
101460 {
101461 int size, ret;
101462- struct ctl_table tmp = *ctl;
101463+ ctl_table_no_const tmp = *ctl;
101464
101465 tmp.extra1 = &zero;
101466 tmp.extra2 = &unres_qlen_max;
101467@@ -2864,7 +2864,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
101468 void __user *buffer,
101469 size_t *lenp, loff_t *ppos)
101470 {
101471- struct ctl_table tmp = *ctl;
101472+ ctl_table_no_const tmp = *ctl;
101473 int ret;
101474
101475 tmp.extra1 = &zero;
101476diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
101477index 2bf8329..2eb1423 100644
101478--- a/net/core/net-procfs.c
101479+++ b/net/core/net-procfs.c
101480@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
101481 struct rtnl_link_stats64 temp;
101482 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
101483
101484- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101485+ if (gr_proc_is_restricted())
101486+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101487+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101488+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
101489+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
101490+ else
101491+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101492 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101493 dev->name, stats->rx_bytes, stats->rx_packets,
101494 stats->rx_errors,
101495@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
101496 return 0;
101497 }
101498
101499-static const struct seq_operations dev_seq_ops = {
101500+const struct seq_operations dev_seq_ops = {
101501 .start = dev_seq_start,
101502 .next = dev_seq_next,
101503 .stop = dev_seq_stop,
101504@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
101505
101506 static int softnet_seq_open(struct inode *inode, struct file *file)
101507 {
101508- return seq_open(file, &softnet_seq_ops);
101509+ return seq_open_restrict(file, &softnet_seq_ops);
101510 }
101511
101512 static const struct file_operations softnet_seq_fops = {
101513@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
101514 else
101515 seq_printf(seq, "%04x", ntohs(pt->type));
101516
101517+#ifdef CONFIG_GRKERNSEC_HIDESYM
101518+ seq_printf(seq, " %-8s %pf\n",
101519+ pt->dev ? pt->dev->name : "", NULL);
101520+#else
101521 seq_printf(seq, " %-8s %pf\n",
101522 pt->dev ? pt->dev->name : "", pt->func);
101523+#endif
101524 }
101525
101526 return 0;
101527diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
101528index 9993412..2a4672b 100644
101529--- a/net/core/net-sysfs.c
101530+++ b/net/core/net-sysfs.c
101531@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
101532 {
101533 struct net_device *netdev = to_net_dev(dev);
101534 return sprintf(buf, fmt_dec,
101535- atomic_read(&netdev->carrier_changes));
101536+ atomic_read_unchecked(&netdev->carrier_changes));
101537 }
101538 static DEVICE_ATTR_RO(carrier_changes);
101539
101540diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
101541index ce780c7..6d296b3 100644
101542--- a/net/core/net_namespace.c
101543+++ b/net/core/net_namespace.c
101544@@ -448,7 +448,7 @@ static int __register_pernet_operations(struct list_head *list,
101545 int error;
101546 LIST_HEAD(net_exit_list);
101547
101548- list_add_tail(&ops->list, list);
101549+ pax_list_add_tail((struct list_head *)&ops->list, list);
101550 if (ops->init || (ops->id && ops->size)) {
101551 for_each_net(net) {
101552 error = ops_init(ops, net);
101553@@ -461,7 +461,7 @@ static int __register_pernet_operations(struct list_head *list,
101554
101555 out_undo:
101556 /* If I have an error cleanup all namespaces I initialized */
101557- list_del(&ops->list);
101558+ pax_list_del((struct list_head *)&ops->list);
101559 ops_exit_list(ops, &net_exit_list);
101560 ops_free_list(ops, &net_exit_list);
101561 return error;
101562@@ -472,7 +472,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
101563 struct net *net;
101564 LIST_HEAD(net_exit_list);
101565
101566- list_del(&ops->list);
101567+ pax_list_del((struct list_head *)&ops->list);
101568 for_each_net(net)
101569 list_add_tail(&net->exit_list, &net_exit_list);
101570 ops_exit_list(ops, &net_exit_list);
101571@@ -606,7 +606,7 @@ int register_pernet_device(struct pernet_operations *ops)
101572 mutex_lock(&net_mutex);
101573 error = register_pernet_operations(&pernet_list, ops);
101574 if (!error && (first_device == &pernet_list))
101575- first_device = &ops->list;
101576+ first_device = (struct list_head *)&ops->list;
101577 mutex_unlock(&net_mutex);
101578 return error;
101579 }
101580diff --git a/net/core/netpoll.c b/net/core/netpoll.c
101581index e0ad5d1..04fa7f7 100644
101582--- a/net/core/netpoll.c
101583+++ b/net/core/netpoll.c
101584@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101585 struct udphdr *udph;
101586 struct iphdr *iph;
101587 struct ethhdr *eth;
101588- static atomic_t ip_ident;
101589+ static atomic_unchecked_t ip_ident;
101590 struct ipv6hdr *ip6h;
101591
101592 udp_len = len + sizeof(*udph);
101593@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101594 put_unaligned(0x45, (unsigned char *)iph);
101595 iph->tos = 0;
101596 put_unaligned(htons(ip_len), &(iph->tot_len));
101597- iph->id = htons(atomic_inc_return(&ip_ident));
101598+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
101599 iph->frag_off = 0;
101600 iph->ttl = 64;
101601 iph->protocol = IPPROTO_UDP;
101602diff --git a/net/core/pktgen.c b/net/core/pktgen.c
101603index 352d183..1bddfaf 100644
101604--- a/net/core/pktgen.c
101605+++ b/net/core/pktgen.c
101606@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
101607 pn->net = net;
101608 INIT_LIST_HEAD(&pn->pktgen_threads);
101609 pn->pktgen_exiting = false;
101610- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
101611+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
101612 if (!pn->proc_dir) {
101613 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
101614 return -ENODEV;
101615diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
101616index 76ec6c5..9cfb81c 100644
101617--- a/net/core/rtnetlink.c
101618+++ b/net/core/rtnetlink.c
101619@@ -60,7 +60,7 @@ struct rtnl_link {
101620 rtnl_doit_func doit;
101621 rtnl_dumpit_func dumpit;
101622 rtnl_calcit_func calcit;
101623-};
101624+} __no_const;
101625
101626 static DEFINE_MUTEX(rtnl_mutex);
101627
101628@@ -306,10 +306,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
101629 * to use the ops for creating device. So do not
101630 * fill up dellink as well. That disables rtnl_dellink.
101631 */
101632- if (ops->setup && !ops->dellink)
101633- ops->dellink = unregister_netdevice_queue;
101634+ if (ops->setup && !ops->dellink) {
101635+ pax_open_kernel();
101636+ *(void **)&ops->dellink = unregister_netdevice_queue;
101637+ pax_close_kernel();
101638+ }
101639
101640- list_add_tail(&ops->list, &link_ops);
101641+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
101642 return 0;
101643 }
101644 EXPORT_SYMBOL_GPL(__rtnl_link_register);
101645@@ -356,7 +359,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
101646 for_each_net(net) {
101647 __rtnl_kill_links(net, ops);
101648 }
101649- list_del(&ops->list);
101650+ pax_list_del((struct list_head *)&ops->list);
101651 }
101652 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
101653
101654@@ -1035,7 +1038,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
101655 (dev->ifalias &&
101656 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
101657 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
101658- atomic_read(&dev->carrier_changes)))
101659+ atomic_read_unchecked(&dev->carrier_changes)))
101660 goto nla_put_failure;
101661
101662 if (1) {
101663@@ -2094,6 +2097,10 @@ replay:
101664 if (IS_ERR(dest_net))
101665 return PTR_ERR(dest_net);
101666
101667+ err = -EPERM;
101668+ if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
101669+ goto out;
101670+
101671 dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
101672 if (IS_ERR(dev)) {
101673 err = PTR_ERR(dev);
101674diff --git a/net/core/scm.c b/net/core/scm.c
101675index 3b6899b..cf36238 100644
101676--- a/net/core/scm.c
101677+++ b/net/core/scm.c
101678@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
101679 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101680 {
101681 struct cmsghdr __user *cm
101682- = (__force struct cmsghdr __user *)msg->msg_control;
101683+ = (struct cmsghdr __force_user *)msg->msg_control;
101684 struct cmsghdr cmhdr;
101685 int cmlen = CMSG_LEN(len);
101686 int err;
101687@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101688 err = -EFAULT;
101689 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
101690 goto out;
101691- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
101692+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
101693 goto out;
101694 cmlen = CMSG_SPACE(len);
101695 if (msg->msg_controllen < cmlen)
101696@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
101697 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101698 {
101699 struct cmsghdr __user *cm
101700- = (__force struct cmsghdr __user*)msg->msg_control;
101701+ = (struct cmsghdr __force_user *)msg->msg_control;
101702
101703 int fdmax = 0;
101704 int fdnum = scm->fp->count;
101705@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101706 if (fdnum < fdmax)
101707 fdmax = fdnum;
101708
101709- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
101710+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
101711 i++, cmfptr++)
101712 {
101713 struct socket *sock;
101714diff --git a/net/core/skbuff.c b/net/core/skbuff.c
101715index 62c67be..01893a0a 100644
101716--- a/net/core/skbuff.c
101717+++ b/net/core/skbuff.c
101718@@ -2123,7 +2123,7 @@ EXPORT_SYMBOL(__skb_checksum);
101719 __wsum skb_checksum(const struct sk_buff *skb, int offset,
101720 int len, __wsum csum)
101721 {
101722- const struct skb_checksum_ops ops = {
101723+ static const struct skb_checksum_ops ops = {
101724 .update = csum_partial_ext,
101725 .combine = csum_block_add_ext,
101726 };
101727@@ -3363,12 +3363,14 @@ void __init skb_init(void)
101728 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
101729 sizeof(struct sk_buff),
101730 0,
101731- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101732+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101733+ SLAB_NO_SANITIZE,
101734 NULL);
101735 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
101736 sizeof(struct sk_buff_fclones),
101737 0,
101738- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101739+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101740+ SLAB_NO_SANITIZE,
101741 NULL);
101742 }
101743
101744diff --git a/net/core/sock.c b/net/core/sock.c
101745index 1c7a33d..a3817e2 100644
101746--- a/net/core/sock.c
101747+++ b/net/core/sock.c
101748@@ -441,7 +441,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101749 struct sk_buff_head *list = &sk->sk_receive_queue;
101750
101751 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
101752- atomic_inc(&sk->sk_drops);
101753+ atomic_inc_unchecked(&sk->sk_drops);
101754 trace_sock_rcvqueue_full(sk, skb);
101755 return -ENOMEM;
101756 }
101757@@ -451,7 +451,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101758 return err;
101759
101760 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
101761- atomic_inc(&sk->sk_drops);
101762+ atomic_inc_unchecked(&sk->sk_drops);
101763 return -ENOBUFS;
101764 }
101765
101766@@ -464,7 +464,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101767 skb_dst_force(skb);
101768
101769 spin_lock_irqsave(&list->lock, flags);
101770- skb->dropcount = atomic_read(&sk->sk_drops);
101771+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
101772 __skb_queue_tail(list, skb);
101773 spin_unlock_irqrestore(&list->lock, flags);
101774
101775@@ -484,7 +484,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101776 skb->dev = NULL;
101777
101778 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
101779- atomic_inc(&sk->sk_drops);
101780+ atomic_inc_unchecked(&sk->sk_drops);
101781 goto discard_and_relse;
101782 }
101783 if (nested)
101784@@ -502,7 +502,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101785 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
101786 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
101787 bh_unlock_sock(sk);
101788- atomic_inc(&sk->sk_drops);
101789+ atomic_inc_unchecked(&sk->sk_drops);
101790 goto discard_and_relse;
101791 }
101792
101793@@ -888,6 +888,7 @@ set_rcvbuf:
101794 }
101795 break;
101796
101797+#ifndef GRKERNSEC_BPF_HARDEN
101798 case SO_ATTACH_BPF:
101799 ret = -EINVAL;
101800 if (optlen == sizeof(u32)) {
101801@@ -900,7 +901,7 @@ set_rcvbuf:
101802 ret = sk_attach_bpf(ufd, sk);
101803 }
101804 break;
101805-
101806+#endif
101807 case SO_DETACH_FILTER:
101808 ret = sk_detach_filter(sk);
101809 break;
101810@@ -1004,12 +1005,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101811 struct timeval tm;
101812 } v;
101813
101814- int lv = sizeof(int);
101815- int len;
101816+ unsigned int lv = sizeof(int);
101817+ unsigned int len;
101818
101819 if (get_user(len, optlen))
101820 return -EFAULT;
101821- if (len < 0)
101822+ if (len > INT_MAX)
101823 return -EINVAL;
101824
101825 memset(&v, 0, sizeof(v));
101826@@ -1147,11 +1148,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101827
101828 case SO_PEERNAME:
101829 {
101830- char address[128];
101831+ char address[_K_SS_MAXSIZE];
101832
101833 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
101834 return -ENOTCONN;
101835- if (lv < len)
101836+ if (lv < len || sizeof address < len)
101837 return -EINVAL;
101838 if (copy_to_user(optval, address, len))
101839 return -EFAULT;
101840@@ -1236,7 +1237,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101841
101842 if (len > lv)
101843 len = lv;
101844- if (copy_to_user(optval, &v, len))
101845+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
101846 return -EFAULT;
101847 lenout:
101848 if (put_user(len, optlen))
101849@@ -2349,7 +2350,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
101850 */
101851 smp_wmb();
101852 atomic_set(&sk->sk_refcnt, 1);
101853- atomic_set(&sk->sk_drops, 0);
101854+ atomic_set_unchecked(&sk->sk_drops, 0);
101855 }
101856 EXPORT_SYMBOL(sock_init_data);
101857
101858@@ -2477,6 +2478,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
101859 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
101860 int level, int type)
101861 {
101862+ struct sock_extended_err ee;
101863 struct sock_exterr_skb *serr;
101864 struct sk_buff *skb;
101865 int copied, err;
101866@@ -2498,7 +2500,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
101867 sock_recv_timestamp(msg, sk, skb);
101868
101869 serr = SKB_EXT_ERR(skb);
101870- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
101871+ ee = serr->ee;
101872+ put_cmsg(msg, level, type, sizeof ee, &ee);
101873
101874 msg->msg_flags |= MSG_ERRQUEUE;
101875 err = copied;
101876diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
101877index ad704c7..ca48aff 100644
101878--- a/net/core/sock_diag.c
101879+++ b/net/core/sock_diag.c
101880@@ -9,26 +9,33 @@
101881 #include <linux/inet_diag.h>
101882 #include <linux/sock_diag.h>
101883
101884-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
101885+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
101886 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
101887 static DEFINE_MUTEX(sock_diag_table_mutex);
101888
101889 int sock_diag_check_cookie(void *sk, __u32 *cookie)
101890 {
101891+#ifndef CONFIG_GRKERNSEC_HIDESYM
101892 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
101893 cookie[1] != INET_DIAG_NOCOOKIE) &&
101894 ((u32)(unsigned long)sk != cookie[0] ||
101895 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
101896 return -ESTALE;
101897 else
101898+#endif
101899 return 0;
101900 }
101901 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
101902
101903 void sock_diag_save_cookie(void *sk, __u32 *cookie)
101904 {
101905+#ifdef CONFIG_GRKERNSEC_HIDESYM
101906+ cookie[0] = 0;
101907+ cookie[1] = 0;
101908+#else
101909 cookie[0] = (u32)(unsigned long)sk;
101910 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
101911+#endif
101912 }
101913 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
101914
101915@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
101916 mutex_lock(&sock_diag_table_mutex);
101917 if (sock_diag_handlers[hndl->family])
101918 err = -EBUSY;
101919- else
101920+ else {
101921+ pax_open_kernel();
101922 sock_diag_handlers[hndl->family] = hndl;
101923+ pax_close_kernel();
101924+ }
101925 mutex_unlock(&sock_diag_table_mutex);
101926
101927 return err;
101928@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
101929
101930 mutex_lock(&sock_diag_table_mutex);
101931 BUG_ON(sock_diag_handlers[family] != hnld);
101932+ pax_open_kernel();
101933 sock_diag_handlers[family] = NULL;
101934+ pax_close_kernel();
101935 mutex_unlock(&sock_diag_table_mutex);
101936 }
101937 EXPORT_SYMBOL_GPL(sock_diag_unregister);
101938diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
101939index bbb1d5a..754e2e5 100644
101940--- a/net/core/sysctl_net_core.c
101941+++ b/net/core/sysctl_net_core.c
101942@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
101943 {
101944 unsigned int orig_size, size;
101945 int ret, i;
101946- struct ctl_table tmp = {
101947+ ctl_table_no_const tmp = {
101948 .data = &size,
101949 .maxlen = sizeof(size),
101950 .mode = table->mode
101951@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
101952 void __user *buffer, size_t *lenp, loff_t *ppos)
101953 {
101954 char id[IFNAMSIZ];
101955- struct ctl_table tbl = {
101956+ ctl_table_no_const tbl = {
101957 .data = id,
101958 .maxlen = IFNAMSIZ,
101959 };
101960@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
101961 static int proc_do_rss_key(struct ctl_table *table, int write,
101962 void __user *buffer, size_t *lenp, loff_t *ppos)
101963 {
101964- struct ctl_table fake_table;
101965+ ctl_table_no_const fake_table;
101966 char buf[NETDEV_RSS_KEY_LEN * 3];
101967
101968 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
101969@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
101970 .mode = 0444,
101971 .proc_handler = proc_do_rss_key,
101972 },
101973-#ifdef CONFIG_BPF_JIT
101974+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
101975 {
101976 .procname = "bpf_jit_enable",
101977 .data = &bpf_jit_enable,
101978@@ -402,13 +402,12 @@ static struct ctl_table netns_core_table[] = {
101979
101980 static __net_init int sysctl_core_net_init(struct net *net)
101981 {
101982- struct ctl_table *tbl;
101983+ ctl_table_no_const *tbl = NULL;
101984
101985 net->core.sysctl_somaxconn = SOMAXCONN;
101986
101987- tbl = netns_core_table;
101988 if (!net_eq(net, &init_net)) {
101989- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
101990+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
101991 if (tbl == NULL)
101992 goto err_dup;
101993
101994@@ -418,17 +417,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
101995 if (net->user_ns != &init_user_ns) {
101996 tbl[0].procname = NULL;
101997 }
101998- }
101999-
102000- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102001+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102002+ } else
102003+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102004 if (net->core.sysctl_hdr == NULL)
102005 goto err_reg;
102006
102007 return 0;
102008
102009 err_reg:
102010- if (tbl != netns_core_table)
102011- kfree(tbl);
102012+ kfree(tbl);
102013 err_dup:
102014 return -ENOMEM;
102015 }
102016@@ -443,7 +441,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102017 kfree(tbl);
102018 }
102019
102020-static __net_initdata struct pernet_operations sysctl_core_ops = {
102021+static __net_initconst struct pernet_operations sysctl_core_ops = {
102022 .init = sysctl_core_net_init,
102023 .exit = sysctl_core_net_exit,
102024 };
102025diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102026index 8102286..a0c2755 100644
102027--- a/net/decnet/af_decnet.c
102028+++ b/net/decnet/af_decnet.c
102029@@ -466,6 +466,7 @@ static struct proto dn_proto = {
102030 .sysctl_rmem = sysctl_decnet_rmem,
102031 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102032 .obj_size = sizeof(struct dn_sock),
102033+ .slab_flags = SLAB_USERCOPY,
102034 };
102035
102036 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102037diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102038index 4400da7..3429972 100644
102039--- a/net/decnet/dn_dev.c
102040+++ b/net/decnet/dn_dev.c
102041@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
102042 .extra1 = &min_t3,
102043 .extra2 = &max_t3
102044 },
102045- {0}
102046+ { }
102047 },
102048 };
102049
102050diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102051index 5325b54..a0d4d69 100644
102052--- a/net/decnet/sysctl_net_decnet.c
102053+++ b/net/decnet/sysctl_net_decnet.c
102054@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102055
102056 if (len > *lenp) len = *lenp;
102057
102058- if (copy_to_user(buffer, addr, len))
102059+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102060 return -EFAULT;
102061
102062 *lenp = len;
102063@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102064
102065 if (len > *lenp) len = *lenp;
102066
102067- if (copy_to_user(buffer, devname, len))
102068+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102069 return -EFAULT;
102070
102071 *lenp = len;
102072diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
102073index a2c7e4c..3dc9f67 100644
102074--- a/net/hsr/hsr_netlink.c
102075+++ b/net/hsr/hsr_netlink.c
102076@@ -102,7 +102,7 @@ nla_put_failure:
102077 return -EMSGSIZE;
102078 }
102079
102080-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
102081+static struct rtnl_link_ops hsr_link_ops = {
102082 .kind = "hsr",
102083 .maxtype = IFLA_HSR_MAX,
102084 .policy = hsr_policy,
102085diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
102086index 27eaa65..7083217 100644
102087--- a/net/ieee802154/6lowpan_rtnl.c
102088+++ b/net/ieee802154/6lowpan_rtnl.c
102089@@ -642,7 +642,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
102090 dev_put(real_dev);
102091 }
102092
102093-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
102094+static struct rtnl_link_ops lowpan_link_ops = {
102095 .kind = "lowpan",
102096 .priv_size = sizeof(struct lowpan_dev_info),
102097 .setup = lowpan_setup,
102098diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
102099index 9d980ed..7d01e12 100644
102100--- a/net/ieee802154/reassembly.c
102101+++ b/net/ieee802154/reassembly.c
102102@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102103
102104 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102105 {
102106- struct ctl_table *table;
102107+ ctl_table_no_const *table = NULL;
102108 struct ctl_table_header *hdr;
102109 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102110 net_ieee802154_lowpan(net);
102111
102112- table = lowpan_frags_ns_ctl_table;
102113 if (!net_eq(net, &init_net)) {
102114- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102115+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102116 GFP_KERNEL);
102117 if (table == NULL)
102118 goto err_alloc;
102119@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102120 /* Don't export sysctls to unprivileged users */
102121 if (net->user_ns != &init_user_ns)
102122 table[0].procname = NULL;
102123- }
102124-
102125- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102126+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102127+ } else
102128+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102129 if (hdr == NULL)
102130 goto err_reg;
102131
102132@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102133 return 0;
102134
102135 err_reg:
102136- if (!net_eq(net, &init_net))
102137- kfree(table);
102138+ kfree(table);
102139 err_alloc:
102140 return -ENOMEM;
102141 }
102142diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
102143index a44773c..a6ae415 100644
102144--- a/net/ipv4/af_inet.c
102145+++ b/net/ipv4/af_inet.c
102146@@ -1392,7 +1392,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
102147 return ip_recv_error(sk, msg, len, addr_len);
102148 #if IS_ENABLED(CONFIG_IPV6)
102149 if (sk->sk_family == AF_INET6)
102150- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
102151+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
102152 #endif
102153 return -EINVAL;
102154 }
102155diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
102156index 214882e..ec032f6 100644
102157--- a/net/ipv4/devinet.c
102158+++ b/net/ipv4/devinet.c
102159@@ -69,7 +69,8 @@
102160
102161 static struct ipv4_devconf ipv4_devconf = {
102162 .data = {
102163- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102164+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102165+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102166 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102167 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102168 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102169@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
102170
102171 static struct ipv4_devconf ipv4_devconf_dflt = {
102172 .data = {
102173- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102174+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102175+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102176 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102177 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102178 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102179@@ -1548,7 +1550,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
102180 idx = 0;
102181 head = &net->dev_index_head[h];
102182 rcu_read_lock();
102183- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102184+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102185 net->dev_base_seq;
102186 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102187 if (idx < s_idx)
102188@@ -1866,7 +1868,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
102189 idx = 0;
102190 head = &net->dev_index_head[h];
102191 rcu_read_lock();
102192- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102193+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102194 net->dev_base_seq;
102195 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102196 if (idx < s_idx)
102197@@ -2101,7 +2103,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
102198 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
102199 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
102200
102201-static struct devinet_sysctl_table {
102202+static const struct devinet_sysctl_table {
102203 struct ctl_table_header *sysctl_header;
102204 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
102205 } devinet_sysctl = {
102206@@ -2233,7 +2235,7 @@ static __net_init int devinet_init_net(struct net *net)
102207 int err;
102208 struct ipv4_devconf *all, *dflt;
102209 #ifdef CONFIG_SYSCTL
102210- struct ctl_table *tbl = ctl_forward_entry;
102211+ ctl_table_no_const *tbl = NULL;
102212 struct ctl_table_header *forw_hdr;
102213 #endif
102214
102215@@ -2251,7 +2253,7 @@ static __net_init int devinet_init_net(struct net *net)
102216 goto err_alloc_dflt;
102217
102218 #ifdef CONFIG_SYSCTL
102219- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
102220+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
102221 if (tbl == NULL)
102222 goto err_alloc_ctl;
102223
102224@@ -2271,7 +2273,10 @@ static __net_init int devinet_init_net(struct net *net)
102225 goto err_reg_dflt;
102226
102227 err = -ENOMEM;
102228- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102229+ if (!net_eq(net, &init_net))
102230+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102231+ else
102232+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
102233 if (forw_hdr == NULL)
102234 goto err_reg_ctl;
102235 net->ipv4.forw_hdr = forw_hdr;
102236@@ -2287,8 +2292,7 @@ err_reg_ctl:
102237 err_reg_dflt:
102238 __devinet_sysctl_unregister(all);
102239 err_reg_all:
102240- if (tbl != ctl_forward_entry)
102241- kfree(tbl);
102242+ kfree(tbl);
102243 err_alloc_ctl:
102244 #endif
102245 if (dflt != &ipv4_devconf_dflt)
102246diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
102247index 23104a3..9f5570b 100644
102248--- a/net/ipv4/fib_frontend.c
102249+++ b/net/ipv4/fib_frontend.c
102250@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
102251 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102252 fib_sync_up(dev);
102253 #endif
102254- atomic_inc(&net->ipv4.dev_addr_genid);
102255+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102256 rt_cache_flush(dev_net(dev));
102257 break;
102258 case NETDEV_DOWN:
102259 fib_del_ifaddr(ifa, NULL);
102260- atomic_inc(&net->ipv4.dev_addr_genid);
102261+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102262 if (ifa->ifa_dev->ifa_list == NULL) {
102263 /* Last address was deleted from this interface.
102264 * Disable IP.
102265@@ -1060,7 +1060,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
102266 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102267 fib_sync_up(dev);
102268 #endif
102269- atomic_inc(&net->ipv4.dev_addr_genid);
102270+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102271 rt_cache_flush(net);
102272 break;
102273 case NETDEV_DOWN:
102274diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
102275index f99f41b..1879da9 100644
102276--- a/net/ipv4/fib_semantics.c
102277+++ b/net/ipv4/fib_semantics.c
102278@@ -770,7 +770,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
102279 nh->nh_saddr = inet_select_addr(nh->nh_dev,
102280 nh->nh_gw,
102281 nh->nh_parent->fib_scope);
102282- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
102283+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
102284
102285 return nh->nh_saddr;
102286 }
102287diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
102288index 9111a4e..3576905 100644
102289--- a/net/ipv4/inet_hashtables.c
102290+++ b/net/ipv4/inet_hashtables.c
102291@@ -18,6 +18,7 @@
102292 #include <linux/sched.h>
102293 #include <linux/slab.h>
102294 #include <linux/wait.h>
102295+#include <linux/security.h>
102296
102297 #include <net/inet_connection_sock.h>
102298 #include <net/inet_hashtables.h>
102299@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
102300 return inet_ehashfn(net, laddr, lport, faddr, fport);
102301 }
102302
102303+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
102304+
102305 /*
102306 * Allocate and initialize a new local port bind bucket.
102307 * The bindhash mutex for snum's hash chain must be held here.
102308@@ -554,6 +557,8 @@ ok:
102309 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
102310 spin_unlock(&head->lock);
102311
102312+ gr_update_task_in_ip_table(inet_sk(sk));
102313+
102314 if (tw) {
102315 inet_twsk_deschedule(tw, death_row);
102316 while (twrefcnt) {
102317diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
102318index 241afd7..31b95d5 100644
102319--- a/net/ipv4/inetpeer.c
102320+++ b/net/ipv4/inetpeer.c
102321@@ -461,7 +461,7 @@ relookup:
102322 if (p) {
102323 p->daddr = *daddr;
102324 atomic_set(&p->refcnt, 1);
102325- atomic_set(&p->rid, 0);
102326+ atomic_set_unchecked(&p->rid, 0);
102327 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
102328 p->rate_tokens = 0;
102329 /* 60*HZ is arbitrary, but chosen enough high so that the first
102330diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
102331index 145a50c..5dd8cc5 100644
102332--- a/net/ipv4/ip_fragment.c
102333+++ b/net/ipv4/ip_fragment.c
102334@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
102335 return 0;
102336
102337 start = qp->rid;
102338- end = atomic_inc_return(&peer->rid);
102339+ end = atomic_inc_return_unchecked(&peer->rid);
102340 qp->rid = end;
102341
102342 rc = qp->q.fragments && (end - start) > max;
102343@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
102344
102345 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102346 {
102347- struct ctl_table *table;
102348+ ctl_table_no_const *table = NULL;
102349 struct ctl_table_header *hdr;
102350
102351- table = ip4_frags_ns_ctl_table;
102352 if (!net_eq(net, &init_net)) {
102353- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102354+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102355 if (table == NULL)
102356 goto err_alloc;
102357
102358@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102359 /* Don't export sysctls to unprivileged users */
102360 if (net->user_ns != &init_user_ns)
102361 table[0].procname = NULL;
102362- }
102363+ hdr = register_net_sysctl(net, "net/ipv4", table);
102364+ } else
102365+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
102366
102367- hdr = register_net_sysctl(net, "net/ipv4", table);
102368 if (hdr == NULL)
102369 goto err_reg;
102370
102371@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102372 return 0;
102373
102374 err_reg:
102375- if (!net_eq(net, &init_net))
102376- kfree(table);
102377+ kfree(table);
102378 err_alloc:
102379 return -ENOMEM;
102380 }
102381diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
102382index 4f4bf5b..2c936fe 100644
102383--- a/net/ipv4/ip_gre.c
102384+++ b/net/ipv4/ip_gre.c
102385@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
102386 module_param(log_ecn_error, bool, 0644);
102387 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102388
102389-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
102390+static struct rtnl_link_ops ipgre_link_ops;
102391 static int ipgre_tunnel_init(struct net_device *dev);
102392
102393 static int ipgre_net_id __read_mostly;
102394@@ -816,7 +816,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
102395 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
102396 };
102397
102398-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102399+static struct rtnl_link_ops ipgre_link_ops = {
102400 .kind = "gre",
102401 .maxtype = IFLA_GRE_MAX,
102402 .policy = ipgre_policy,
102403@@ -830,7 +830,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102404 .fill_info = ipgre_fill_info,
102405 };
102406
102407-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
102408+static struct rtnl_link_ops ipgre_tap_ops = {
102409 .kind = "gretap",
102410 .maxtype = IFLA_GRE_MAX,
102411 .policy = ipgre_policy,
102412diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
102413index 3d4da2c..40f9c29 100644
102414--- a/net/ipv4/ip_input.c
102415+++ b/net/ipv4/ip_input.c
102416@@ -147,6 +147,10 @@
102417 #include <linux/mroute.h>
102418 #include <linux/netlink.h>
102419
102420+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102421+extern int grsec_enable_blackhole;
102422+#endif
102423+
102424 /*
102425 * Process Router Attention IP option (RFC 2113)
102426 */
102427@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
102428 if (!raw) {
102429 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
102430 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
102431+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102432+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
102433+#endif
102434 icmp_send(skb, ICMP_DEST_UNREACH,
102435 ICMP_PROT_UNREACH, 0);
102436 }
102437diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
102438index 6b85adb..cd7e5d3 100644
102439--- a/net/ipv4/ip_sockglue.c
102440+++ b/net/ipv4/ip_sockglue.c
102441@@ -1193,7 +1193,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102442 len = min_t(unsigned int, len, opt->optlen);
102443 if (put_user(len, optlen))
102444 return -EFAULT;
102445- if (copy_to_user(optval, opt->__data, len))
102446+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
102447+ copy_to_user(optval, opt->__data, len))
102448 return -EFAULT;
102449 return 0;
102450 }
102451@@ -1324,7 +1325,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102452 if (sk->sk_type != SOCK_STREAM)
102453 return -ENOPROTOOPT;
102454
102455- msg.msg_control = (__force void *) optval;
102456+ msg.msg_control = (__force_kernel void *) optval;
102457 msg.msg_controllen = len;
102458 msg.msg_flags = flags;
102459
102460diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
102461index 1a7e979..fd05aa4 100644
102462--- a/net/ipv4/ip_vti.c
102463+++ b/net/ipv4/ip_vti.c
102464@@ -45,7 +45,7 @@
102465 #include <net/net_namespace.h>
102466 #include <net/netns/generic.h>
102467
102468-static struct rtnl_link_ops vti_link_ops __read_mostly;
102469+static struct rtnl_link_ops vti_link_ops;
102470
102471 static int vti_net_id __read_mostly;
102472 static int vti_tunnel_init(struct net_device *dev);
102473@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
102474 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
102475 };
102476
102477-static struct rtnl_link_ops vti_link_ops __read_mostly = {
102478+static struct rtnl_link_ops vti_link_ops = {
102479 .kind = "vti",
102480 .maxtype = IFLA_VTI_MAX,
102481 .policy = vti_policy,
102482diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
102483index 7fa18bc..bea16af 100644
102484--- a/net/ipv4/ipconfig.c
102485+++ b/net/ipv4/ipconfig.c
102486@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
102487
102488 mm_segment_t oldfs = get_fs();
102489 set_fs(get_ds());
102490- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102491+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102492 set_fs(oldfs);
102493 return res;
102494 }
102495@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
102496
102497 mm_segment_t oldfs = get_fs();
102498 set_fs(get_ds());
102499- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102500+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102501 set_fs(oldfs);
102502 return res;
102503 }
102504@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
102505
102506 mm_segment_t oldfs = get_fs();
102507 set_fs(get_ds());
102508- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
102509+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
102510 set_fs(oldfs);
102511 return res;
102512 }
102513diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
102514index 40403114..c35c647 100644
102515--- a/net/ipv4/ipip.c
102516+++ b/net/ipv4/ipip.c
102517@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102518 static int ipip_net_id __read_mostly;
102519
102520 static int ipip_tunnel_init(struct net_device *dev);
102521-static struct rtnl_link_ops ipip_link_ops __read_mostly;
102522+static struct rtnl_link_ops ipip_link_ops;
102523
102524 static int ipip_err(struct sk_buff *skb, u32 info)
102525 {
102526@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
102527 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
102528 };
102529
102530-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
102531+static struct rtnl_link_ops ipip_link_ops = {
102532 .kind = "ipip",
102533 .maxtype = IFLA_IPTUN_MAX,
102534 .policy = ipip_policy,
102535diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
102536index f95b6f9..2ee2097 100644
102537--- a/net/ipv4/netfilter/arp_tables.c
102538+++ b/net/ipv4/netfilter/arp_tables.c
102539@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
102540 #endif
102541
102542 static int get_info(struct net *net, void __user *user,
102543- const int *len, int compat)
102544+ int len, int compat)
102545 {
102546 char name[XT_TABLE_MAXNAMELEN];
102547 struct xt_table *t;
102548 int ret;
102549
102550- if (*len != sizeof(struct arpt_getinfo)) {
102551- duprintf("length %u != %Zu\n", *len,
102552+ if (len != sizeof(struct arpt_getinfo)) {
102553+ duprintf("length %u != %Zu\n", len,
102554 sizeof(struct arpt_getinfo));
102555 return -EINVAL;
102556 }
102557@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
102558 info.size = private->size;
102559 strcpy(info.name, name);
102560
102561- if (copy_to_user(user, &info, *len) != 0)
102562+ if (copy_to_user(user, &info, len) != 0)
102563 ret = -EFAULT;
102564 else
102565 ret = 0;
102566@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
102567
102568 switch (cmd) {
102569 case ARPT_SO_GET_INFO:
102570- ret = get_info(sock_net(sk), user, len, 1);
102571+ ret = get_info(sock_net(sk), user, *len, 1);
102572 break;
102573 case ARPT_SO_GET_ENTRIES:
102574 ret = compat_get_entries(sock_net(sk), user, len);
102575@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
102576
102577 switch (cmd) {
102578 case ARPT_SO_GET_INFO:
102579- ret = get_info(sock_net(sk), user, len, 0);
102580+ ret = get_info(sock_net(sk), user, *len, 0);
102581 break;
102582
102583 case ARPT_SO_GET_ENTRIES:
102584diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
102585index 99e810f..3711b81 100644
102586--- a/net/ipv4/netfilter/ip_tables.c
102587+++ b/net/ipv4/netfilter/ip_tables.c
102588@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
102589 #endif
102590
102591 static int get_info(struct net *net, void __user *user,
102592- const int *len, int compat)
102593+ int len, int compat)
102594 {
102595 char name[XT_TABLE_MAXNAMELEN];
102596 struct xt_table *t;
102597 int ret;
102598
102599- if (*len != sizeof(struct ipt_getinfo)) {
102600- duprintf("length %u != %zu\n", *len,
102601+ if (len != sizeof(struct ipt_getinfo)) {
102602+ duprintf("length %u != %zu\n", len,
102603 sizeof(struct ipt_getinfo));
102604 return -EINVAL;
102605 }
102606@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
102607 info.size = private->size;
102608 strcpy(info.name, name);
102609
102610- if (copy_to_user(user, &info, *len) != 0)
102611+ if (copy_to_user(user, &info, len) != 0)
102612 ret = -EFAULT;
102613 else
102614 ret = 0;
102615@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102616
102617 switch (cmd) {
102618 case IPT_SO_GET_INFO:
102619- ret = get_info(sock_net(sk), user, len, 1);
102620+ ret = get_info(sock_net(sk), user, *len, 1);
102621 break;
102622 case IPT_SO_GET_ENTRIES:
102623 ret = compat_get_entries(sock_net(sk), user, len);
102624@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102625
102626 switch (cmd) {
102627 case IPT_SO_GET_INFO:
102628- ret = get_info(sock_net(sk), user, len, 0);
102629+ ret = get_info(sock_net(sk), user, *len, 0);
102630 break;
102631
102632 case IPT_SO_GET_ENTRIES:
102633diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102634index e90f83a..3e6acca 100644
102635--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
102636+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102637@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
102638 spin_lock_init(&cn->lock);
102639
102640 #ifdef CONFIG_PROC_FS
102641- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
102642+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
102643 if (!cn->procdir) {
102644 pr_err("Unable to proc dir entry\n");
102645 return -ENOMEM;
102646diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
102647index 0ae28f5..d32b565 100644
102648--- a/net/ipv4/ping.c
102649+++ b/net/ipv4/ping.c
102650@@ -59,7 +59,7 @@ struct ping_table {
102651 };
102652
102653 static struct ping_table ping_table;
102654-struct pingv6_ops pingv6_ops;
102655+struct pingv6_ops *pingv6_ops;
102656 EXPORT_SYMBOL_GPL(pingv6_ops);
102657
102658 static u16 ping_port_rover;
102659@@ -358,7 +358,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102660 return -ENODEV;
102661 }
102662 }
102663- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
102664+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
102665 scoped);
102666 rcu_read_unlock();
102667
102668@@ -566,7 +566,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102669 }
102670 #if IS_ENABLED(CONFIG_IPV6)
102671 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102672- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
102673+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
102674 #endif
102675 }
102676
102677@@ -584,7 +584,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102678 info, (u8 *)icmph);
102679 #if IS_ENABLED(CONFIG_IPV6)
102680 } else if (family == AF_INET6) {
102681- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
102682+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
102683 info, (u8 *)icmph);
102684 #endif
102685 }
102686@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102687 }
102688
102689 if (inet6_sk(sk)->rxopt.all)
102690- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
102691+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
102692 if (skb->protocol == htons(ETH_P_IPV6) &&
102693 inet6_sk(sk)->rxopt.all)
102694- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
102695+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
102696 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
102697 ip_cmsg_recv(msg, skb);
102698 #endif
102699@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
102700 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
102701 0, sock_i_ino(sp),
102702 atomic_read(&sp->sk_refcnt), sp,
102703- atomic_read(&sp->sk_drops));
102704+ atomic_read_unchecked(&sp->sk_drops));
102705 }
102706
102707 static int ping_v4_seq_show(struct seq_file *seq, void *v)
102708diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
102709index 0bb68df..59405fc 100644
102710--- a/net/ipv4/raw.c
102711+++ b/net/ipv4/raw.c
102712@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
102713 int raw_rcv(struct sock *sk, struct sk_buff *skb)
102714 {
102715 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
102716- atomic_inc(&sk->sk_drops);
102717+ atomic_inc_unchecked(&sk->sk_drops);
102718 kfree_skb(skb);
102719 return NET_RX_DROP;
102720 }
102721@@ -774,16 +774,20 @@ static int raw_init(struct sock *sk)
102722
102723 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
102724 {
102725+ struct icmp_filter filter;
102726+
102727 if (optlen > sizeof(struct icmp_filter))
102728 optlen = sizeof(struct icmp_filter);
102729- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
102730+ if (copy_from_user(&filter, optval, optlen))
102731 return -EFAULT;
102732+ raw_sk(sk)->filter = filter;
102733 return 0;
102734 }
102735
102736 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
102737 {
102738 int len, ret = -EFAULT;
102739+ struct icmp_filter filter;
102740
102741 if (get_user(len, optlen))
102742 goto out;
102743@@ -793,8 +797,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
102744 if (len > sizeof(struct icmp_filter))
102745 len = sizeof(struct icmp_filter);
102746 ret = -EFAULT;
102747- if (put_user(len, optlen) ||
102748- copy_to_user(optval, &raw_sk(sk)->filter, len))
102749+ filter = raw_sk(sk)->filter;
102750+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
102751 goto out;
102752 ret = 0;
102753 out: return ret;
102754@@ -1023,7 +1027,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
102755 0, 0L, 0,
102756 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
102757 0, sock_i_ino(sp),
102758- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
102759+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
102760 }
102761
102762 static int raw_seq_show(struct seq_file *seq, void *v)
102763diff --git a/net/ipv4/route.c b/net/ipv4/route.c
102764index 52e1f2b..e736cb4 100644
102765--- a/net/ipv4/route.c
102766+++ b/net/ipv4/route.c
102767@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
102768
102769 static int rt_cache_seq_open(struct inode *inode, struct file *file)
102770 {
102771- return seq_open(file, &rt_cache_seq_ops);
102772+ return seq_open_restrict(file, &rt_cache_seq_ops);
102773 }
102774
102775 static const struct file_operations rt_cache_seq_fops = {
102776@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
102777
102778 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
102779 {
102780- return seq_open(file, &rt_cpu_seq_ops);
102781+ return seq_open_restrict(file, &rt_cpu_seq_ops);
102782 }
102783
102784 static const struct file_operations rt_cpu_seq_fops = {
102785@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
102786
102787 static int rt_acct_proc_open(struct inode *inode, struct file *file)
102788 {
102789- return single_open(file, rt_acct_proc_show, NULL);
102790+ return single_open_restrict(file, rt_acct_proc_show, NULL);
102791 }
102792
102793 static const struct file_operations rt_acct_proc_fops = {
102794@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
102795
102796 #define IP_IDENTS_SZ 2048u
102797 struct ip_ident_bucket {
102798- atomic_t id;
102799+ atomic_unchecked_t id;
102800 u32 stamp32;
102801 };
102802
102803-static struct ip_ident_bucket *ip_idents __read_mostly;
102804+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
102805
102806 /* In order to protect privacy, we add a perturbation to identifiers
102807 * if one generator is seldom used. This makes hard for an attacker
102808@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
102809 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
102810 delta = prandom_u32_max(now - old);
102811
102812- return atomic_add_return(segs + delta, &bucket->id) - segs;
102813+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
102814 }
102815 EXPORT_SYMBOL(ip_idents_reserve);
102816
102817@@ -2628,34 +2628,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
102818 .maxlen = sizeof(int),
102819 .mode = 0200,
102820 .proc_handler = ipv4_sysctl_rtcache_flush,
102821+ .extra1 = &init_net,
102822 },
102823 { },
102824 };
102825
102826 static __net_init int sysctl_route_net_init(struct net *net)
102827 {
102828- struct ctl_table *tbl;
102829+ ctl_table_no_const *tbl = NULL;
102830
102831- tbl = ipv4_route_flush_table;
102832 if (!net_eq(net, &init_net)) {
102833- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
102834+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
102835 if (tbl == NULL)
102836 goto err_dup;
102837
102838 /* Don't export sysctls to unprivileged users */
102839 if (net->user_ns != &init_user_ns)
102840 tbl[0].procname = NULL;
102841- }
102842- tbl[0].extra1 = net;
102843+ tbl[0].extra1 = net;
102844+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
102845+ } else
102846+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
102847
102848- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
102849 if (net->ipv4.route_hdr == NULL)
102850 goto err_reg;
102851 return 0;
102852
102853 err_reg:
102854- if (tbl != ipv4_route_flush_table)
102855- kfree(tbl);
102856+ kfree(tbl);
102857 err_dup:
102858 return -ENOMEM;
102859 }
102860@@ -2678,8 +2678,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
102861
102862 static __net_init int rt_genid_init(struct net *net)
102863 {
102864- atomic_set(&net->ipv4.rt_genid, 0);
102865- atomic_set(&net->fnhe_genid, 0);
102866+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
102867+ atomic_set_unchecked(&net->fnhe_genid, 0);
102868 get_random_bytes(&net->ipv4.dev_addr_genid,
102869 sizeof(net->ipv4.dev_addr_genid));
102870 return 0;
102871@@ -2722,11 +2722,7 @@ int __init ip_rt_init(void)
102872 {
102873 int rc = 0;
102874
102875- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
102876- if (!ip_idents)
102877- panic("IP: failed to allocate ip_idents\n");
102878-
102879- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
102880+ prandom_bytes(ip_idents, sizeof(ip_idents));
102881
102882 #ifdef CONFIG_IP_ROUTE_CLASSID
102883 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
102884diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
102885index e0ee384..e2688d9 100644
102886--- a/net/ipv4/sysctl_net_ipv4.c
102887+++ b/net/ipv4/sysctl_net_ipv4.c
102888@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
102889 container_of(table->data, struct net, ipv4.ip_local_ports.range);
102890 int ret;
102891 int range[2];
102892- struct ctl_table tmp = {
102893+ ctl_table_no_const tmp = {
102894 .data = &range,
102895 .maxlen = sizeof(range),
102896 .mode = table->mode,
102897@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
102898 int ret;
102899 gid_t urange[2];
102900 kgid_t low, high;
102901- struct ctl_table tmp = {
102902+ ctl_table_no_const tmp = {
102903 .data = &urange,
102904 .maxlen = sizeof(urange),
102905 .mode = table->mode,
102906@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
102907 void __user *buffer, size_t *lenp, loff_t *ppos)
102908 {
102909 char val[TCP_CA_NAME_MAX];
102910- struct ctl_table tbl = {
102911+ ctl_table_no_const tbl = {
102912 .data = val,
102913 .maxlen = TCP_CA_NAME_MAX,
102914 };
102915@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
102916 void __user *buffer, size_t *lenp,
102917 loff_t *ppos)
102918 {
102919- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
102920+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
102921 int ret;
102922
102923 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
102924@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
102925 void __user *buffer, size_t *lenp,
102926 loff_t *ppos)
102927 {
102928- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
102929+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
102930 int ret;
102931
102932 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
102933@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
102934 void __user *buffer, size_t *lenp,
102935 loff_t *ppos)
102936 {
102937- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
102938+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
102939 struct tcp_fastopen_context *ctxt;
102940 int ret;
102941 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
102942@@ -881,13 +881,12 @@ static struct ctl_table ipv4_net_table[] = {
102943
102944 static __net_init int ipv4_sysctl_init_net(struct net *net)
102945 {
102946- struct ctl_table *table;
102947+ ctl_table_no_const *table = NULL;
102948
102949- table = ipv4_net_table;
102950 if (!net_eq(net, &init_net)) {
102951 int i;
102952
102953- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
102954+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
102955 if (table == NULL)
102956 goto err_alloc;
102957
102958@@ -896,7 +895,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
102959 table[i].data += (void *)net - (void *)&init_net;
102960 }
102961
102962- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
102963+ if (!net_eq(net, &init_net))
102964+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
102965+ else
102966+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
102967 if (net->ipv4.ipv4_hdr == NULL)
102968 goto err_reg;
102969
102970diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
102971index 075ab4d..623bb9d 100644
102972--- a/net/ipv4/tcp_input.c
102973+++ b/net/ipv4/tcp_input.c
102974@@ -766,7 +766,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
102975 * without any lock. We want to make sure compiler wont store
102976 * intermediate values in this location.
102977 */
102978- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
102979+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
102980 sk->sk_max_pacing_rate);
102981 }
102982
102983@@ -4528,7 +4528,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
102984 * simplifies code)
102985 */
102986 static void
102987-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
102988+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
102989 struct sk_buff *head, struct sk_buff *tail,
102990 u32 start, u32 end)
102991 {
102992@@ -5506,6 +5506,7 @@ discard:
102993 tcp_paws_reject(&tp->rx_opt, 0))
102994 goto discard_and_undo;
102995
102996+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
102997 if (th->syn) {
102998 /* We see SYN without ACK. It is attempt of
102999 * simultaneous connect with crossed SYNs.
103000@@ -5556,6 +5557,7 @@ discard:
103001 goto discard;
103002 #endif
103003 }
103004+#endif
103005 /* "fifth, if neither of the SYN or RST bits is set then
103006 * drop the segment and return."
103007 */
103008@@ -5602,7 +5604,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103009 goto discard;
103010
103011 if (th->syn) {
103012- if (th->fin)
103013+ if (th->fin || th->urg || th->psh)
103014 goto discard;
103015 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103016 return 1;
103017diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103018index d22f544..62f6787 100644
103019--- a/net/ipv4/tcp_ipv4.c
103020+++ b/net/ipv4/tcp_ipv4.c
103021@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
103022 int sysctl_tcp_low_latency __read_mostly;
103023 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103024
103025+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103026+extern int grsec_enable_blackhole;
103027+#endif
103028+
103029 #ifdef CONFIG_TCP_MD5SIG
103030 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103031 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103032@@ -1473,6 +1477,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103033 return 0;
103034
103035 reset:
103036+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103037+ if (!grsec_enable_blackhole)
103038+#endif
103039 tcp_v4_send_reset(rsk, skb);
103040 discard:
103041 kfree_skb(skb);
103042@@ -1637,12 +1644,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103043 TCP_SKB_CB(skb)->sacked = 0;
103044
103045 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103046- if (!sk)
103047+ if (!sk) {
103048+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103049+ ret = 1;
103050+#endif
103051 goto no_tcp_socket;
103052-
103053+ }
103054 process:
103055- if (sk->sk_state == TCP_TIME_WAIT)
103056+ if (sk->sk_state == TCP_TIME_WAIT) {
103057+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103058+ ret = 2;
103059+#endif
103060 goto do_time_wait;
103061+ }
103062
103063 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103064 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103065@@ -1698,6 +1712,10 @@ csum_error:
103066 bad_packet:
103067 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103068 } else {
103069+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103070+ if (!grsec_enable_blackhole || (ret == 1 &&
103071+ (skb->dev->flags & IFF_LOOPBACK)))
103072+#endif
103073 tcp_v4_send_reset(NULL, skb);
103074 }
103075
103076diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103077index 63d2680..2db9d6b 100644
103078--- a/net/ipv4/tcp_minisocks.c
103079+++ b/net/ipv4/tcp_minisocks.c
103080@@ -27,6 +27,10 @@
103081 #include <net/inet_common.h>
103082 #include <net/xfrm.h>
103083
103084+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103085+extern int grsec_enable_blackhole;
103086+#endif
103087+
103088 int sysctl_tcp_syncookies __read_mostly = 1;
103089 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103090
103091@@ -739,7 +743,10 @@ embryonic_reset:
103092 * avoid becoming vulnerable to outside attack aiming at
103093 * resetting legit local connections.
103094 */
103095- req->rsk_ops->send_reset(sk, skb);
103096+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103097+ if (!grsec_enable_blackhole)
103098+#endif
103099+ req->rsk_ops->send_reset(sk, skb);
103100 } else if (fastopen) { /* received a valid RST pkt */
103101 reqsk_fastopen_remove(sk, req, true);
103102 tcp_reset(sk);
103103diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
103104index ebf5ff5..4d1ff32 100644
103105--- a/net/ipv4/tcp_probe.c
103106+++ b/net/ipv4/tcp_probe.c
103107@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
103108 if (cnt + width >= len)
103109 break;
103110
103111- if (copy_to_user(buf + cnt, tbuf, width))
103112+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
103113 return -EFAULT;
103114 cnt += width;
103115 }
103116diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
103117index 1829c7f..c0b3d52 100644
103118--- a/net/ipv4/tcp_timer.c
103119+++ b/net/ipv4/tcp_timer.c
103120@@ -22,6 +22,10 @@
103121 #include <linux/gfp.h>
103122 #include <net/tcp.h>
103123
103124+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103125+extern int grsec_lastack_retries;
103126+#endif
103127+
103128 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
103129 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
103130 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
103131@@ -191,6 +195,13 @@ static int tcp_write_timeout(struct sock *sk)
103132 }
103133 }
103134
103135+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103136+ if ((sk->sk_state == TCP_LAST_ACK) &&
103137+ (grsec_lastack_retries > 0) &&
103138+ (grsec_lastack_retries < retry_until))
103139+ retry_until = grsec_lastack_retries;
103140+#endif
103141+
103142 if (retransmits_timed_out(sk, retry_until,
103143 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
103144 /* Has it gone just too far? */
103145diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
103146index 13b4dcf..b866a2a 100644
103147--- a/net/ipv4/udp.c
103148+++ b/net/ipv4/udp.c
103149@@ -87,6 +87,7 @@
103150 #include <linux/types.h>
103151 #include <linux/fcntl.h>
103152 #include <linux/module.h>
103153+#include <linux/security.h>
103154 #include <linux/socket.h>
103155 #include <linux/sockios.h>
103156 #include <linux/igmp.h>
103157@@ -114,6 +115,10 @@
103158 #include <net/busy_poll.h>
103159 #include "udp_impl.h"
103160
103161+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103162+extern int grsec_enable_blackhole;
103163+#endif
103164+
103165 struct udp_table udp_table __read_mostly;
103166 EXPORT_SYMBOL(udp_table);
103167
103168@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
103169 return true;
103170 }
103171
103172+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
103173+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
103174+
103175 /*
103176 * This routine is called by the ICMP module when it gets some
103177 * sort of error condition. If err < 0 then the socket should
103178@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103179 dport = usin->sin_port;
103180 if (dport == 0)
103181 return -EINVAL;
103182+
103183+ err = gr_search_udp_sendmsg(sk, usin);
103184+ if (err)
103185+ return err;
103186 } else {
103187 if (sk->sk_state != TCP_ESTABLISHED)
103188 return -EDESTADDRREQ;
103189+
103190+ err = gr_search_udp_sendmsg(sk, NULL);
103191+ if (err)
103192+ return err;
103193+
103194 daddr = inet->inet_daddr;
103195 dport = inet->inet_dport;
103196 /* Open fast path for connected socket.
103197@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
103198 IS_UDPLITE(sk));
103199 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103200 IS_UDPLITE(sk));
103201- atomic_inc(&sk->sk_drops);
103202+ atomic_inc_unchecked(&sk->sk_drops);
103203 __skb_unlink(skb, rcvq);
103204 __skb_queue_tail(&list_kill, skb);
103205 }
103206@@ -1275,6 +1292,10 @@ try_again:
103207 if (!skb)
103208 goto out;
103209
103210+ err = gr_search_udp_recvmsg(sk, skb);
103211+ if (err)
103212+ goto out_free;
103213+
103214 ulen = skb->len - sizeof(struct udphdr);
103215 copied = len;
103216 if (copied > ulen)
103217@@ -1307,7 +1328,7 @@ try_again:
103218 if (unlikely(err)) {
103219 trace_kfree_skb(skb, udp_recvmsg);
103220 if (!peeked) {
103221- atomic_inc(&sk->sk_drops);
103222+ atomic_inc_unchecked(&sk->sk_drops);
103223 UDP_INC_STATS_USER(sock_net(sk),
103224 UDP_MIB_INERRORS, is_udplite);
103225 }
103226@@ -1605,7 +1626,7 @@ csum_error:
103227 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
103228 drop:
103229 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
103230- atomic_inc(&sk->sk_drops);
103231+ atomic_inc_unchecked(&sk->sk_drops);
103232 kfree_skb(skb);
103233 return -1;
103234 }
103235@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
103236 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
103237
103238 if (!skb1) {
103239- atomic_inc(&sk->sk_drops);
103240+ atomic_inc_unchecked(&sk->sk_drops);
103241 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
103242 IS_UDPLITE(sk));
103243 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103244@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
103245 goto csum_error;
103246
103247 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
103248+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103249+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103250+#endif
103251 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
103252
103253 /*
103254@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
103255 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103256 0, sock_i_ino(sp),
103257 atomic_read(&sp->sk_refcnt), sp,
103258- atomic_read(&sp->sk_drops));
103259+ atomic_read_unchecked(&sp->sk_drops));
103260 }
103261
103262 int udp4_seq_show(struct seq_file *seq, void *v)
103263diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
103264index 6156f68..d6ab46d 100644
103265--- a/net/ipv4/xfrm4_policy.c
103266+++ b/net/ipv4/xfrm4_policy.c
103267@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
103268 fl4->flowi4_tos = iph->tos;
103269 }
103270
103271-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
103272+static int xfrm4_garbage_collect(struct dst_ops *ops)
103273 {
103274 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
103275
103276- xfrm4_policy_afinfo.garbage_collect(net);
103277+ xfrm_garbage_collect_deferred(net);
103278 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
103279 }
103280
103281@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
103282
103283 static int __net_init xfrm4_net_init(struct net *net)
103284 {
103285- struct ctl_table *table;
103286+ ctl_table_no_const *table = NULL;
103287 struct ctl_table_header *hdr;
103288
103289- table = xfrm4_policy_table;
103290 if (!net_eq(net, &init_net)) {
103291- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103292+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103293 if (!table)
103294 goto err_alloc;
103295
103296 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
103297- }
103298-
103299- hdr = register_net_sysctl(net, "net/ipv4", table);
103300+ hdr = register_net_sysctl(net, "net/ipv4", table);
103301+ } else
103302+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
103303 if (!hdr)
103304 goto err_reg;
103305
103306@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
103307 return 0;
103308
103309 err_reg:
103310- if (!net_eq(net, &init_net))
103311- kfree(table);
103312+ kfree(table);
103313 err_alloc:
103314 return -ENOMEM;
103315 }
103316diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
103317index dac9419..534fa31 100644
103318--- a/net/ipv6/addrconf.c
103319+++ b/net/ipv6/addrconf.c
103320@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
103321 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103322 .mtu6 = IPV6_MIN_MTU,
103323 .accept_ra = 1,
103324- .accept_redirects = 1,
103325+ .accept_redirects = 0,
103326 .autoconf = 1,
103327 .force_mld_version = 0,
103328 .mldv1_unsolicited_report_interval = 10 * HZ,
103329@@ -208,7 +208,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
103330 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103331 .mtu6 = IPV6_MIN_MTU,
103332 .accept_ra = 1,
103333- .accept_redirects = 1,
103334+ .accept_redirects = 0,
103335 .autoconf = 1,
103336 .force_mld_version = 0,
103337 .mldv1_unsolicited_report_interval = 10 * HZ,
103338@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
103339 idx = 0;
103340 head = &net->dev_index_head[h];
103341 rcu_read_lock();
103342- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
103343+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
103344 net->dev_base_seq;
103345 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103346 if (idx < s_idx)
103347@@ -2420,7 +2420,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
103348 p.iph.ihl = 5;
103349 p.iph.protocol = IPPROTO_IPV6;
103350 p.iph.ttl = 64;
103351- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
103352+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
103353
103354 if (ops->ndo_do_ioctl) {
103355 mm_segment_t oldfs = get_fs();
103356@@ -3569,16 +3569,23 @@ static const struct file_operations if6_fops = {
103357 .release = seq_release_net,
103358 };
103359
103360+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
103361+extern void unregister_ipv6_seq_ops_addr(void);
103362+
103363 static int __net_init if6_proc_net_init(struct net *net)
103364 {
103365- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
103366+ register_ipv6_seq_ops_addr(&if6_seq_ops);
103367+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
103368+ unregister_ipv6_seq_ops_addr();
103369 return -ENOMEM;
103370+ }
103371 return 0;
103372 }
103373
103374 static void __net_exit if6_proc_net_exit(struct net *net)
103375 {
103376 remove_proc_entry("if_inet6", net->proc_net);
103377+ unregister_ipv6_seq_ops_addr();
103378 }
103379
103380 static struct pernet_operations if6_proc_net_ops = {
103381@@ -4194,7 +4201,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
103382 s_ip_idx = ip_idx = cb->args[2];
103383
103384 rcu_read_lock();
103385- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103386+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103387 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
103388 idx = 0;
103389 head = &net->dev_index_head[h];
103390@@ -4840,7 +4847,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103391 rt_genid_bump_ipv6(net);
103392 break;
103393 }
103394- atomic_inc(&net->ipv6.dev_addr_genid);
103395+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
103396 }
103397
103398 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103399@@ -4860,7 +4867,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
103400 int *valp = ctl->data;
103401 int val = *valp;
103402 loff_t pos = *ppos;
103403- struct ctl_table lctl;
103404+ ctl_table_no_const lctl;
103405 int ret;
103406
103407 /*
103408@@ -4945,7 +4952,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
103409 int *valp = ctl->data;
103410 int val = *valp;
103411 loff_t pos = *ppos;
103412- struct ctl_table lctl;
103413+ ctl_table_no_const lctl;
103414 int ret;
103415
103416 /*
103417diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
103418index e8c4400..a4cd5da 100644
103419--- a/net/ipv6/af_inet6.c
103420+++ b/net/ipv6/af_inet6.c
103421@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
103422 net->ipv6.sysctl.icmpv6_time = 1*HZ;
103423 net->ipv6.sysctl.flowlabel_consistency = 1;
103424 net->ipv6.sysctl.auto_flowlabels = 0;
103425- atomic_set(&net->ipv6.fib6_sernum, 1);
103426+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
103427
103428 err = ipv6_init_mibs(net);
103429 if (err)
103430diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
103431index 49f5e73..ae02d54 100644
103432--- a/net/ipv6/datagram.c
103433+++ b/net/ipv6/datagram.c
103434@@ -941,5 +941,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
103435 0,
103436 sock_i_ino(sp),
103437 atomic_read(&sp->sk_refcnt), sp,
103438- atomic_read(&sp->sk_drops));
103439+ atomic_read_unchecked(&sp->sk_drops));
103440 }
103441diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
103442index d674152..fb5a01d 100644
103443--- a/net/ipv6/icmp.c
103444+++ b/net/ipv6/icmp.c
103445@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
103446
103447 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
103448 {
103449- struct ctl_table *table;
103450+ ctl_table_no_const *table;
103451
103452 table = kmemdup(ipv6_icmp_table_template,
103453 sizeof(ipv6_icmp_table_template),
103454diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
103455index f1c6d5e..faabef6 100644
103456--- a/net/ipv6/ip6_fib.c
103457+++ b/net/ipv6/ip6_fib.c
103458@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
103459 int new, old;
103460
103461 do {
103462- old = atomic_read(&net->ipv6.fib6_sernum);
103463+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
103464 new = old < INT_MAX ? old + 1 : 1;
103465- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
103466+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
103467 old, new) != old);
103468 return new;
103469 }
103470diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
103471index 01ccc28..66861c7 100644
103472--- a/net/ipv6/ip6_gre.c
103473+++ b/net/ipv6/ip6_gre.c
103474@@ -71,8 +71,8 @@ struct ip6gre_net {
103475 struct net_device *fb_tunnel_dev;
103476 };
103477
103478-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
103479-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
103480+static struct rtnl_link_ops ip6gre_link_ops;
103481+static struct rtnl_link_ops ip6gre_tap_ops;
103482 static int ip6gre_tunnel_init(struct net_device *dev);
103483 static void ip6gre_tunnel_setup(struct net_device *dev);
103484 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
103485@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
103486 }
103487
103488
103489-static struct inet6_protocol ip6gre_protocol __read_mostly = {
103490+static struct inet6_protocol ip6gre_protocol = {
103491 .handler = ip6gre_rcv,
103492 .err_handler = ip6gre_err,
103493 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
103494@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
103495 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
103496 };
103497
103498-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103499+static struct rtnl_link_ops ip6gre_link_ops = {
103500 .kind = "ip6gre",
103501 .maxtype = IFLA_GRE_MAX,
103502 .policy = ip6gre_policy,
103503@@ -1664,7 +1664,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103504 .fill_info = ip6gre_fill_info,
103505 };
103506
103507-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
103508+static struct rtnl_link_ops ip6gre_tap_ops = {
103509 .kind = "ip6gretap",
103510 .maxtype = IFLA_GRE_MAX,
103511 .policy = ip6gre_policy,
103512diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
103513index 92b3da5..77837b8 100644
103514--- a/net/ipv6/ip6_tunnel.c
103515+++ b/net/ipv6/ip6_tunnel.c
103516@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103517
103518 static int ip6_tnl_dev_init(struct net_device *dev);
103519 static void ip6_tnl_dev_setup(struct net_device *dev);
103520-static struct rtnl_link_ops ip6_link_ops __read_mostly;
103521+static struct rtnl_link_ops ip6_link_ops;
103522
103523 static int ip6_tnl_net_id __read_mostly;
103524 struct ip6_tnl_net {
103525@@ -1771,7 +1771,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
103526 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
103527 };
103528
103529-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
103530+static struct rtnl_link_ops ip6_link_ops = {
103531 .kind = "ip6tnl",
103532 .maxtype = IFLA_IPTUN_MAX,
103533 .policy = ip6_tnl_policy,
103534diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
103535index ace10d0..97a8b49 100644
103536--- a/net/ipv6/ip6_vti.c
103537+++ b/net/ipv6/ip6_vti.c
103538@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103539
103540 static int vti6_dev_init(struct net_device *dev);
103541 static void vti6_dev_setup(struct net_device *dev);
103542-static struct rtnl_link_ops vti6_link_ops __read_mostly;
103543+static struct rtnl_link_ops vti6_link_ops;
103544
103545 static int vti6_net_id __read_mostly;
103546 struct vti6_net {
103547@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
103548 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
103549 };
103550
103551-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
103552+static struct rtnl_link_ops vti6_link_ops = {
103553 .kind = "vti6",
103554 .maxtype = IFLA_VTI_MAX,
103555 .policy = vti6_policy,
103556diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
103557index 66980d8d..8aef0d1 100644
103558--- a/net/ipv6/ipv6_sockglue.c
103559+++ b/net/ipv6/ipv6_sockglue.c
103560@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
103561 if (sk->sk_type != SOCK_STREAM)
103562 return -ENOPROTOOPT;
103563
103564- msg.msg_control = optval;
103565+ msg.msg_control = (void __force_kernel *)optval;
103566 msg.msg_controllen = len;
103567 msg.msg_flags = flags;
103568
103569diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
103570index e080fbb..412b3cf 100644
103571--- a/net/ipv6/netfilter/ip6_tables.c
103572+++ b/net/ipv6/netfilter/ip6_tables.c
103573@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
103574 #endif
103575
103576 static int get_info(struct net *net, void __user *user,
103577- const int *len, int compat)
103578+ int len, int compat)
103579 {
103580 char name[XT_TABLE_MAXNAMELEN];
103581 struct xt_table *t;
103582 int ret;
103583
103584- if (*len != sizeof(struct ip6t_getinfo)) {
103585- duprintf("length %u != %zu\n", *len,
103586+ if (len != sizeof(struct ip6t_getinfo)) {
103587+ duprintf("length %u != %zu\n", len,
103588 sizeof(struct ip6t_getinfo));
103589 return -EINVAL;
103590 }
103591@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
103592 info.size = private->size;
103593 strcpy(info.name, name);
103594
103595- if (copy_to_user(user, &info, *len) != 0)
103596+ if (copy_to_user(user, &info, len) != 0)
103597 ret = -EFAULT;
103598 else
103599 ret = 0;
103600@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103601
103602 switch (cmd) {
103603 case IP6T_SO_GET_INFO:
103604- ret = get_info(sock_net(sk), user, len, 1);
103605+ ret = get_info(sock_net(sk), user, *len, 1);
103606 break;
103607 case IP6T_SO_GET_ENTRIES:
103608 ret = compat_get_entries(sock_net(sk), user, len);
103609@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103610
103611 switch (cmd) {
103612 case IP6T_SO_GET_INFO:
103613- ret = get_info(sock_net(sk), user, len, 0);
103614+ ret = get_info(sock_net(sk), user, *len, 0);
103615 break;
103616
103617 case IP6T_SO_GET_ENTRIES:
103618diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
103619index 6f187c8..34b367f 100644
103620--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
103621+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
103622@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
103623
103624 static int nf_ct_frag6_sysctl_register(struct net *net)
103625 {
103626- struct ctl_table *table;
103627+ ctl_table_no_const *table = NULL;
103628 struct ctl_table_header *hdr;
103629
103630- table = nf_ct_frag6_sysctl_table;
103631 if (!net_eq(net, &init_net)) {
103632- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
103633+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
103634 GFP_KERNEL);
103635 if (table == NULL)
103636 goto err_alloc;
103637@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103638 table[2].data = &net->nf_frag.frags.high_thresh;
103639 table[2].extra1 = &net->nf_frag.frags.low_thresh;
103640 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
103641- }
103642-
103643- hdr = register_net_sysctl(net, "net/netfilter", table);
103644+ hdr = register_net_sysctl(net, "net/netfilter", table);
103645+ } else
103646+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
103647 if (hdr == NULL)
103648 goto err_reg;
103649
103650@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103651 return 0;
103652
103653 err_reg:
103654- if (!net_eq(net, &init_net))
103655- kfree(table);
103656+ kfree(table);
103657 err_alloc:
103658 return -ENOMEM;
103659 }
103660diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
103661index fe7e3e4..47aba96 100644
103662--- a/net/ipv6/ping.c
103663+++ b/net/ipv6/ping.c
103664@@ -242,6 +242,24 @@ static struct pernet_operations ping_v6_net_ops = {
103665 };
103666 #endif
103667
103668+static struct pingv6_ops real_pingv6_ops = {
103669+ .ipv6_recv_error = ipv6_recv_error,
103670+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
103671+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
103672+ .icmpv6_err_convert = icmpv6_err_convert,
103673+ .ipv6_icmp_error = ipv6_icmp_error,
103674+ .ipv6_chk_addr = ipv6_chk_addr,
103675+};
103676+
103677+static struct pingv6_ops dummy_pingv6_ops = {
103678+ .ipv6_recv_error = dummy_ipv6_recv_error,
103679+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
103680+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
103681+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
103682+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
103683+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
103684+};
103685+
103686 int __init pingv6_init(void)
103687 {
103688 #ifdef CONFIG_PROC_FS
103689@@ -249,13 +267,7 @@ int __init pingv6_init(void)
103690 if (ret)
103691 return ret;
103692 #endif
103693- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
103694- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
103695- pingv6_ops.ip6_datagram_recv_specific_ctl =
103696- ip6_datagram_recv_specific_ctl;
103697- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
103698- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
103699- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
103700+ pingv6_ops = &real_pingv6_ops;
103701 return inet6_register_protosw(&pingv6_protosw);
103702 }
103703
103704@@ -264,14 +276,9 @@ int __init pingv6_init(void)
103705 */
103706 void pingv6_exit(void)
103707 {
103708- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
103709- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
103710- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
103711- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
103712- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
103713- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
103714 #ifdef CONFIG_PROC_FS
103715 unregister_pernet_subsys(&ping_v6_net_ops);
103716 #endif
103717+ pingv6_ops = &dummy_pingv6_ops;
103718 inet6_unregister_protosw(&pingv6_protosw);
103719 }
103720diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
103721index 679253d0..70b653c 100644
103722--- a/net/ipv6/proc.c
103723+++ b/net/ipv6/proc.c
103724@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
103725 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
103726 goto proc_snmp6_fail;
103727
103728- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
103729+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
103730 if (!net->mib.proc_net_devsnmp6)
103731 goto proc_dev_snmp6_fail;
103732 return 0;
103733diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
103734index ee25631..3c3ac5d 100644
103735--- a/net/ipv6/raw.c
103736+++ b/net/ipv6/raw.c
103737@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
103738 {
103739 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
103740 skb_checksum_complete(skb)) {
103741- atomic_inc(&sk->sk_drops);
103742+ atomic_inc_unchecked(&sk->sk_drops);
103743 kfree_skb(skb);
103744 return NET_RX_DROP;
103745 }
103746@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103747 struct raw6_sock *rp = raw6_sk(sk);
103748
103749 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
103750- atomic_inc(&sk->sk_drops);
103751+ atomic_inc_unchecked(&sk->sk_drops);
103752 kfree_skb(skb);
103753 return NET_RX_DROP;
103754 }
103755@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103756
103757 if (inet->hdrincl) {
103758 if (skb_checksum_complete(skb)) {
103759- atomic_inc(&sk->sk_drops);
103760+ atomic_inc_unchecked(&sk->sk_drops);
103761 kfree_skb(skb);
103762 return NET_RX_DROP;
103763 }
103764@@ -609,7 +609,7 @@ out:
103765 return err;
103766 }
103767
103768-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
103769+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
103770 struct flowi6 *fl6, struct dst_entry **dstp,
103771 unsigned int flags)
103772 {
103773@@ -916,12 +916,15 @@ do_confirm:
103774 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
103775 char __user *optval, int optlen)
103776 {
103777+ struct icmp6_filter filter;
103778+
103779 switch (optname) {
103780 case ICMPV6_FILTER:
103781 if (optlen > sizeof(struct icmp6_filter))
103782 optlen = sizeof(struct icmp6_filter);
103783- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
103784+ if (copy_from_user(&filter, optval, optlen))
103785 return -EFAULT;
103786+ raw6_sk(sk)->filter = filter;
103787 return 0;
103788 default:
103789 return -ENOPROTOOPT;
103790@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103791 char __user *optval, int __user *optlen)
103792 {
103793 int len;
103794+ struct icmp6_filter filter;
103795
103796 switch (optname) {
103797 case ICMPV6_FILTER:
103798@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103799 len = sizeof(struct icmp6_filter);
103800 if (put_user(len, optlen))
103801 return -EFAULT;
103802- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
103803+ filter = raw6_sk(sk)->filter;
103804+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
103805 return -EFAULT;
103806 return 0;
103807 default:
103808diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
103809index d7d70e6..bd5e9fc 100644
103810--- a/net/ipv6/reassembly.c
103811+++ b/net/ipv6/reassembly.c
103812@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
103813
103814 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103815 {
103816- struct ctl_table *table;
103817+ ctl_table_no_const *table = NULL;
103818 struct ctl_table_header *hdr;
103819
103820- table = ip6_frags_ns_ctl_table;
103821 if (!net_eq(net, &init_net)) {
103822- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
103823+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
103824 if (table == NULL)
103825 goto err_alloc;
103826
103827@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103828 /* Don't export sysctls to unprivileged users */
103829 if (net->user_ns != &init_user_ns)
103830 table[0].procname = NULL;
103831- }
103832+ hdr = register_net_sysctl(net, "net/ipv6", table);
103833+ } else
103834+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
103835
103836- hdr = register_net_sysctl(net, "net/ipv6", table);
103837 if (hdr == NULL)
103838 goto err_reg;
103839
103840@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103841 return 0;
103842
103843 err_reg:
103844- if (!net_eq(net, &init_net))
103845- kfree(table);
103846+ kfree(table);
103847 err_alloc:
103848 return -ENOMEM;
103849 }
103850diff --git a/net/ipv6/route.c b/net/ipv6/route.c
103851index 1528d84..f393960 100644
103852--- a/net/ipv6/route.c
103853+++ b/net/ipv6/route.c
103854@@ -2978,7 +2978,7 @@ struct ctl_table ipv6_route_table_template[] = {
103855
103856 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
103857 {
103858- struct ctl_table *table;
103859+ ctl_table_no_const *table;
103860
103861 table = kmemdup(ipv6_route_table_template,
103862 sizeof(ipv6_route_table_template),
103863diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
103864index cdbfe5a..e13eb31 100644
103865--- a/net/ipv6/sit.c
103866+++ b/net/ipv6/sit.c
103867@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
103868 static void ipip6_dev_free(struct net_device *dev);
103869 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
103870 __be32 *v4dst);
103871-static struct rtnl_link_ops sit_link_ops __read_mostly;
103872+static struct rtnl_link_ops sit_link_ops;
103873
103874 static int sit_net_id __read_mostly;
103875 struct sit_net {
103876@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
103877 unregister_netdevice_queue(dev, head);
103878 }
103879
103880-static struct rtnl_link_ops sit_link_ops __read_mostly = {
103881+static struct rtnl_link_ops sit_link_ops = {
103882 .kind = "sit",
103883 .maxtype = IFLA_IPTUN_MAX,
103884 .policy = ipip6_policy,
103885diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
103886index c5c10fa..2577d51 100644
103887--- a/net/ipv6/sysctl_net_ipv6.c
103888+++ b/net/ipv6/sysctl_net_ipv6.c
103889@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
103890
103891 static int __net_init ipv6_sysctl_net_init(struct net *net)
103892 {
103893- struct ctl_table *ipv6_table;
103894+ ctl_table_no_const *ipv6_table;
103895 struct ctl_table *ipv6_route_table;
103896 struct ctl_table *ipv6_icmp_table;
103897 int err;
103898diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
103899index 9c0b54e..5e7bd8f 100644
103900--- a/net/ipv6/tcp_ipv6.c
103901+++ b/net/ipv6/tcp_ipv6.c
103902@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
103903 }
103904 }
103905
103906+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103907+extern int grsec_enable_blackhole;
103908+#endif
103909+
103910 static void tcp_v6_hash(struct sock *sk)
103911 {
103912 if (sk->sk_state != TCP_CLOSE) {
103913@@ -1343,6 +1347,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
103914 return 0;
103915
103916 reset:
103917+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103918+ if (!grsec_enable_blackhole)
103919+#endif
103920 tcp_v6_send_reset(sk, skb);
103921 discard:
103922 if (opt_skb)
103923@@ -1443,12 +1450,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
103924
103925 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
103926 inet6_iif(skb));
103927- if (!sk)
103928+ if (!sk) {
103929+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103930+ ret = 1;
103931+#endif
103932 goto no_tcp_socket;
103933+ }
103934
103935 process:
103936- if (sk->sk_state == TCP_TIME_WAIT)
103937+ if (sk->sk_state == TCP_TIME_WAIT) {
103938+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103939+ ret = 2;
103940+#endif
103941 goto do_time_wait;
103942+ }
103943
103944 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
103945 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103946@@ -1499,6 +1514,10 @@ csum_error:
103947 bad_packet:
103948 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103949 } else {
103950+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103951+ if (!grsec_enable_blackhole || (ret == 1 &&
103952+ (skb->dev->flags & IFF_LOOPBACK)))
103953+#endif
103954 tcp_v6_send_reset(NULL, skb);
103955 }
103956
103957diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
103958index 189dc4a..458bec0 100644
103959--- a/net/ipv6/udp.c
103960+++ b/net/ipv6/udp.c
103961@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
103962 udp_ipv6_hash_secret + net_hash_mix(net));
103963 }
103964
103965+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103966+extern int grsec_enable_blackhole;
103967+#endif
103968+
103969 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
103970 {
103971 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
103972@@ -448,7 +452,7 @@ try_again:
103973 if (unlikely(err)) {
103974 trace_kfree_skb(skb, udpv6_recvmsg);
103975 if (!peeked) {
103976- atomic_inc(&sk->sk_drops);
103977+ atomic_inc_unchecked(&sk->sk_drops);
103978 if (is_udp4)
103979 UDP_INC_STATS_USER(sock_net(sk),
103980 UDP_MIB_INERRORS,
103981@@ -714,7 +718,7 @@ csum_error:
103982 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
103983 drop:
103984 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
103985- atomic_inc(&sk->sk_drops);
103986+ atomic_inc_unchecked(&sk->sk_drops);
103987 kfree_skb(skb);
103988 return -1;
103989 }
103990@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
103991 if (likely(skb1 == NULL))
103992 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
103993 if (!skb1) {
103994- atomic_inc(&sk->sk_drops);
103995+ atomic_inc_unchecked(&sk->sk_drops);
103996 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
103997 IS_UDPLITE(sk));
103998 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103999@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104000 goto csum_error;
104001
104002 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104003+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104004+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104005+#endif
104006 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104007
104008 kfree_skb(skb);
104009diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104010index 48bf5a0..691985a 100644
104011--- a/net/ipv6/xfrm6_policy.c
104012+++ b/net/ipv6/xfrm6_policy.c
104013@@ -223,11 +223,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104014 }
104015 }
104016
104017-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104018+static int xfrm6_garbage_collect(struct dst_ops *ops)
104019 {
104020 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104021
104022- xfrm6_policy_afinfo.garbage_collect(net);
104023+ xfrm_garbage_collect_deferred(net);
104024 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104025 }
104026
104027@@ -340,19 +340,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104028
104029 static int __net_init xfrm6_net_init(struct net *net)
104030 {
104031- struct ctl_table *table;
104032+ ctl_table_no_const *table = NULL;
104033 struct ctl_table_header *hdr;
104034
104035- table = xfrm6_policy_table;
104036 if (!net_eq(net, &init_net)) {
104037- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104038+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104039 if (!table)
104040 goto err_alloc;
104041
104042 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104043- }
104044+ hdr = register_net_sysctl(net, "net/ipv6", table);
104045+ } else
104046+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104047
104048- hdr = register_net_sysctl(net, "net/ipv6", table);
104049 if (!hdr)
104050 goto err_reg;
104051
104052@@ -360,8 +360,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104053 return 0;
104054
104055 err_reg:
104056- if (!net_eq(net, &init_net))
104057- kfree(table);
104058+ kfree(table);
104059 err_alloc:
104060 return -ENOMEM;
104061 }
104062diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104063index c1d247e..9e5949d 100644
104064--- a/net/ipx/ipx_proc.c
104065+++ b/net/ipx/ipx_proc.c
104066@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104067 struct proc_dir_entry *p;
104068 int rc = -ENOMEM;
104069
104070- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104071+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104072
104073 if (!ipx_proc_dir)
104074 goto out;
104075diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104076index 4efe486..dee966e 100644
104077--- a/net/irda/ircomm/ircomm_tty.c
104078+++ b/net/irda/ircomm/ircomm_tty.c
104079@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104080 add_wait_queue(&port->open_wait, &wait);
104081
104082 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
104083- __FILE__, __LINE__, tty->driver->name, port->count);
104084+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104085
104086 spin_lock_irqsave(&port->lock, flags);
104087- port->count--;
104088+ atomic_dec(&port->count);
104089 port->blocked_open++;
104090 spin_unlock_irqrestore(&port->lock, flags);
104091
104092@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104093 }
104094
104095 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
104096- __FILE__, __LINE__, tty->driver->name, port->count);
104097+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104098
104099 schedule();
104100 }
104101@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104102
104103 spin_lock_irqsave(&port->lock, flags);
104104 if (!tty_hung_up_p(filp))
104105- port->count++;
104106+ atomic_inc(&port->count);
104107 port->blocked_open--;
104108 spin_unlock_irqrestore(&port->lock, flags);
104109
104110 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
104111- __FILE__, __LINE__, tty->driver->name, port->count);
104112+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104113
104114 if (!retval)
104115 port->flags |= ASYNC_NORMAL_ACTIVE;
104116@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
104117
104118 /* ++ is not atomic, so this should be protected - Jean II */
104119 spin_lock_irqsave(&self->port.lock, flags);
104120- self->port.count++;
104121+ atomic_inc(&self->port.count);
104122 spin_unlock_irqrestore(&self->port.lock, flags);
104123 tty_port_tty_set(&self->port, tty);
104124
104125 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
104126- self->line, self->port.count);
104127+ self->line, atomic_read(&self->port.count));
104128
104129 /* Not really used by us, but lets do it anyway */
104130 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
104131@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
104132 tty_kref_put(port->tty);
104133 }
104134 port->tty = NULL;
104135- port->count = 0;
104136+ atomic_set(&port->count, 0);
104137 spin_unlock_irqrestore(&port->lock, flags);
104138
104139 wake_up_interruptible(&port->open_wait);
104140@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
104141 seq_putc(m, '\n');
104142
104143 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
104144- seq_printf(m, "Open count: %d\n", self->port.count);
104145+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
104146 seq_printf(m, "Max data size: %d\n", self->max_data_size);
104147 seq_printf(m, "Max header size: %d\n", self->max_header_size);
104148
104149diff --git a/net/irda/irproc.c b/net/irda/irproc.c
104150index b9ac598..f88cc56 100644
104151--- a/net/irda/irproc.c
104152+++ b/net/irda/irproc.c
104153@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
104154 {
104155 int i;
104156
104157- proc_irda = proc_mkdir("irda", init_net.proc_net);
104158+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
104159 if (proc_irda == NULL)
104160 return;
104161
104162diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
104163index 2e9953b..ed06350 100644
104164--- a/net/iucv/af_iucv.c
104165+++ b/net/iucv/af_iucv.c
104166@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
104167 {
104168 char name[12];
104169
104170- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
104171+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104172 while (__iucv_get_sock_by_name(name)) {
104173 sprintf(name, "%08x",
104174- atomic_inc_return(&iucv_sk_list.autobind_name));
104175+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104176 }
104177 memcpy(iucv->src_name, name, 8);
104178 }
104179diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
104180index 2a6a1fd..6c112b0 100644
104181--- a/net/iucv/iucv.c
104182+++ b/net/iucv/iucv.c
104183@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
104184 return NOTIFY_OK;
104185 }
104186
104187-static struct notifier_block __refdata iucv_cpu_notifier = {
104188+static struct notifier_block iucv_cpu_notifier = {
104189 .notifier_call = iucv_cpu_notify,
104190 };
104191
104192diff --git a/net/key/af_key.c b/net/key/af_key.c
104193index f8ac939..1e189bf 100644
104194--- a/net/key/af_key.c
104195+++ b/net/key/af_key.c
104196@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
104197 static u32 get_acqseq(void)
104198 {
104199 u32 res;
104200- static atomic_t acqseq;
104201+ static atomic_unchecked_t acqseq;
104202
104203 do {
104204- res = atomic_inc_return(&acqseq);
104205+ res = atomic_inc_return_unchecked(&acqseq);
104206 } while (!res);
104207 return res;
104208 }
104209diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
104210index 781b3a2..73a7434 100644
104211--- a/net/l2tp/l2tp_eth.c
104212+++ b/net/l2tp/l2tp_eth.c
104213@@ -42,12 +42,12 @@ struct l2tp_eth {
104214 struct sock *tunnel_sock;
104215 struct l2tp_session *session;
104216 struct list_head list;
104217- atomic_long_t tx_bytes;
104218- atomic_long_t tx_packets;
104219- atomic_long_t tx_dropped;
104220- atomic_long_t rx_bytes;
104221- atomic_long_t rx_packets;
104222- atomic_long_t rx_errors;
104223+ atomic_long_unchecked_t tx_bytes;
104224+ atomic_long_unchecked_t tx_packets;
104225+ atomic_long_unchecked_t tx_dropped;
104226+ atomic_long_unchecked_t rx_bytes;
104227+ atomic_long_unchecked_t rx_packets;
104228+ atomic_long_unchecked_t rx_errors;
104229 };
104230
104231 /* via l2tp_session_priv() */
104232@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
104233 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
104234
104235 if (likely(ret == NET_XMIT_SUCCESS)) {
104236- atomic_long_add(len, &priv->tx_bytes);
104237- atomic_long_inc(&priv->tx_packets);
104238+ atomic_long_add_unchecked(len, &priv->tx_bytes);
104239+ atomic_long_inc_unchecked(&priv->tx_packets);
104240 } else {
104241- atomic_long_inc(&priv->tx_dropped);
104242+ atomic_long_inc_unchecked(&priv->tx_dropped);
104243 }
104244 return NETDEV_TX_OK;
104245 }
104246@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
104247 {
104248 struct l2tp_eth *priv = netdev_priv(dev);
104249
104250- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
104251- stats->tx_packets = atomic_long_read(&priv->tx_packets);
104252- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
104253- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
104254- stats->rx_packets = atomic_long_read(&priv->rx_packets);
104255- stats->rx_errors = atomic_long_read(&priv->rx_errors);
104256+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
104257+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
104258+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
104259+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
104260+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
104261+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
104262 return stats;
104263 }
104264
104265@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
104266 nf_reset(skb);
104267
104268 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
104269- atomic_long_inc(&priv->rx_packets);
104270- atomic_long_add(data_len, &priv->rx_bytes);
104271+ atomic_long_inc_unchecked(&priv->rx_packets);
104272+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
104273 } else {
104274- atomic_long_inc(&priv->rx_errors);
104275+ atomic_long_inc_unchecked(&priv->rx_errors);
104276 }
104277 return;
104278
104279 error:
104280- atomic_long_inc(&priv->rx_errors);
104281+ atomic_long_inc_unchecked(&priv->rx_errors);
104282 kfree_skb(skb);
104283 }
104284
104285diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
104286index 1a3c7e0..80f8b0c 100644
104287--- a/net/llc/llc_proc.c
104288+++ b/net/llc/llc_proc.c
104289@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
104290 int rc = -ENOMEM;
104291 struct proc_dir_entry *p;
104292
104293- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
104294+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
104295 if (!llc_proc_dir)
104296 goto out;
104297
104298diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
104299index e75d5c5..429fc95 100644
104300--- a/net/mac80211/cfg.c
104301+++ b/net/mac80211/cfg.c
104302@@ -543,7 +543,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
104303 ret = ieee80211_vif_use_channel(sdata, chandef,
104304 IEEE80211_CHANCTX_EXCLUSIVE);
104305 }
104306- } else if (local->open_count == local->monitors) {
104307+ } else if (local_read(&local->open_count) == local->monitors) {
104308 local->_oper_chandef = *chandef;
104309 ieee80211_hw_config(local, 0);
104310 }
104311@@ -3416,7 +3416,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
104312 else
104313 local->probe_req_reg--;
104314
104315- if (!local->open_count)
104316+ if (!local_read(&local->open_count))
104317 break;
104318
104319 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
104320@@ -3551,8 +3551,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
104321 if (chanctx_conf) {
104322 *chandef = sdata->vif.bss_conf.chandef;
104323 ret = 0;
104324- } else if (local->open_count > 0 &&
104325- local->open_count == local->monitors &&
104326+ } else if (local_read(&local->open_count) > 0 &&
104327+ local_read(&local->open_count) == local->monitors &&
104328 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
104329 if (local->use_chanctx)
104330 *chandef = local->monitor_chandef;
104331diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
104332index cc6e964..029a3a3 100644
104333--- a/net/mac80211/ieee80211_i.h
104334+++ b/net/mac80211/ieee80211_i.h
104335@@ -29,6 +29,7 @@
104336 #include <net/ieee80211_radiotap.h>
104337 #include <net/cfg80211.h>
104338 #include <net/mac80211.h>
104339+#include <asm/local.h>
104340 #include "key.h"
104341 #include "sta_info.h"
104342 #include "debug.h"
104343@@ -1114,7 +1115,7 @@ struct ieee80211_local {
104344 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
104345 spinlock_t queue_stop_reason_lock;
104346
104347- int open_count;
104348+ local_t open_count;
104349 int monitors, cooked_mntrs;
104350 /* number of interfaces with corresponding FIF_ flags */
104351 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
104352diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
104353index 4173553..e3b5a3f 100644
104354--- a/net/mac80211/iface.c
104355+++ b/net/mac80211/iface.c
104356@@ -543,7 +543,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104357 break;
104358 }
104359
104360- if (local->open_count == 0) {
104361+ if (local_read(&local->open_count) == 0) {
104362 res = drv_start(local);
104363 if (res)
104364 goto err_del_bss;
104365@@ -590,7 +590,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104366 res = drv_add_interface(local, sdata);
104367 if (res)
104368 goto err_stop;
104369- } else if (local->monitors == 0 && local->open_count == 0) {
104370+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
104371 res = ieee80211_add_virtual_monitor(local);
104372 if (res)
104373 goto err_stop;
104374@@ -700,7 +700,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104375 atomic_inc(&local->iff_promiscs);
104376
104377 if (coming_up)
104378- local->open_count++;
104379+ local_inc(&local->open_count);
104380
104381 if (hw_reconf_flags)
104382 ieee80211_hw_config(local, hw_reconf_flags);
104383@@ -738,7 +738,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104384 err_del_interface:
104385 drv_remove_interface(local, sdata);
104386 err_stop:
104387- if (!local->open_count)
104388+ if (!local_read(&local->open_count))
104389 drv_stop(local);
104390 err_del_bss:
104391 sdata->bss = NULL;
104392@@ -906,7 +906,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104393 }
104394
104395 if (going_down)
104396- local->open_count--;
104397+ local_dec(&local->open_count);
104398
104399 switch (sdata->vif.type) {
104400 case NL80211_IFTYPE_AP_VLAN:
104401@@ -968,7 +968,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104402 }
104403 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
104404
104405- if (local->open_count == 0)
104406+ if (local_read(&local->open_count) == 0)
104407 ieee80211_clear_tx_pending(local);
104408
104409 /*
104410@@ -1011,7 +1011,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104411 if (cancel_scan)
104412 flush_delayed_work(&local->scan_work);
104413
104414- if (local->open_count == 0) {
104415+ if (local_read(&local->open_count) == 0) {
104416 ieee80211_stop_device(local);
104417
104418 /* no reconfiguring after stop! */
104419@@ -1022,7 +1022,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104420 ieee80211_configure_filter(local);
104421 ieee80211_hw_config(local, hw_reconf_flags);
104422
104423- if (local->monitors == local->open_count)
104424+ if (local->monitors == local_read(&local->open_count))
104425 ieee80211_add_virtual_monitor(local);
104426 }
104427
104428diff --git a/net/mac80211/main.c b/net/mac80211/main.c
104429index 6ab99da..f9502d4 100644
104430--- a/net/mac80211/main.c
104431+++ b/net/mac80211/main.c
104432@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
104433 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
104434 IEEE80211_CONF_CHANGE_POWER);
104435
104436- if (changed && local->open_count) {
104437+ if (changed && local_read(&local->open_count)) {
104438 ret = drv_config(local, changed);
104439 /*
104440 * Goal:
104441diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
104442index 4a95fe3..0bfd713 100644
104443--- a/net/mac80211/pm.c
104444+++ b/net/mac80211/pm.c
104445@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104446 struct ieee80211_sub_if_data *sdata;
104447 struct sta_info *sta;
104448
104449- if (!local->open_count)
104450+ if (!local_read(&local->open_count))
104451 goto suspend;
104452
104453 ieee80211_scan_cancel(local);
104454@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104455 cancel_work_sync(&local->dynamic_ps_enable_work);
104456 del_timer_sync(&local->dynamic_ps_timer);
104457
104458- local->wowlan = wowlan && local->open_count;
104459+ local->wowlan = wowlan && local_read(&local->open_count);
104460 if (local->wowlan) {
104461 int err = drv_suspend(local, wowlan);
104462 if (err < 0) {
104463@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104464 WARN_ON(!list_empty(&local->chanctx_list));
104465
104466 /* stop hardware - this must stop RX */
104467- if (local->open_count)
104468+ if (local_read(&local->open_count))
104469 ieee80211_stop_device(local);
104470
104471 suspend:
104472diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
104473index d53355b..21f583a 100644
104474--- a/net/mac80211/rate.c
104475+++ b/net/mac80211/rate.c
104476@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
104477
104478 ASSERT_RTNL();
104479
104480- if (local->open_count)
104481+ if (local_read(&local->open_count))
104482 return -EBUSY;
104483
104484 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
104485diff --git a/net/mac80211/util.c b/net/mac80211/util.c
104486index 974ebe7..57bcd3c 100644
104487--- a/net/mac80211/util.c
104488+++ b/net/mac80211/util.c
104489@@ -1757,7 +1757,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104490 }
104491 #endif
104492 /* everything else happens only if HW was up & running */
104493- if (!local->open_count)
104494+ if (!local_read(&local->open_count))
104495 goto wake_up;
104496
104497 /*
104498@@ -1987,7 +1987,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104499 local->in_reconfig = false;
104500 barrier();
104501
104502- if (local->monitors == local->open_count && local->monitors > 0)
104503+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
104504 ieee80211_add_virtual_monitor(local);
104505
104506 /*
104507diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
104508index b02660f..c0f791c 100644
104509--- a/net/netfilter/Kconfig
104510+++ b/net/netfilter/Kconfig
104511@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
104512
104513 To compile it as a module, choose M here. If unsure, say N.
104514
104515+config NETFILTER_XT_MATCH_GRADM
104516+ tristate '"gradm" match support'
104517+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
104518+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
104519+ ---help---
104520+ The gradm match allows to match on grsecurity RBAC being enabled.
104521+ It is useful when iptables rules are applied early on bootup to
104522+ prevent connections to the machine (except from a trusted host)
104523+ while the RBAC system is disabled.
104524+
104525 config NETFILTER_XT_MATCH_HASHLIMIT
104526 tristate '"hashlimit" match support'
104527 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
104528diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
104529index 89f73a9..e4e5bd9 100644
104530--- a/net/netfilter/Makefile
104531+++ b/net/netfilter/Makefile
104532@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
104533 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
104534 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
104535 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
104536+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
104537 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
104538 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
104539 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
104540diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
104541index d259da3..6a32b2c 100644
104542--- a/net/netfilter/ipset/ip_set_core.c
104543+++ b/net/netfilter/ipset/ip_set_core.c
104544@@ -1952,7 +1952,7 @@ done:
104545 return ret;
104546 }
104547
104548-static struct nf_sockopt_ops so_set __read_mostly = {
104549+static struct nf_sockopt_ops so_set = {
104550 .pf = PF_INET,
104551 .get_optmin = SO_IP_SET,
104552 .get_optmax = SO_IP_SET + 1,
104553diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
104554index b0f7b62..0541842 100644
104555--- a/net/netfilter/ipvs/ip_vs_conn.c
104556+++ b/net/netfilter/ipvs/ip_vs_conn.c
104557@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
104558 /* Increase the refcnt counter of the dest */
104559 ip_vs_dest_hold(dest);
104560
104561- conn_flags = atomic_read(&dest->conn_flags);
104562+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
104563 if (cp->protocol != IPPROTO_UDP)
104564 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
104565 flags = cp->flags;
104566@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
104567
104568 cp->control = NULL;
104569 atomic_set(&cp->n_control, 0);
104570- atomic_set(&cp->in_pkts, 0);
104571+ atomic_set_unchecked(&cp->in_pkts, 0);
104572
104573 cp->packet_xmit = NULL;
104574 cp->app = NULL;
104575@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
104576
104577 /* Don't drop the entry if its number of incoming packets is not
104578 located in [0, 8] */
104579- i = atomic_read(&cp->in_pkts);
104580+ i = atomic_read_unchecked(&cp->in_pkts);
104581 if (i > 8 || i < 0) return 0;
104582
104583 if (!todrop_rate[i]) return 0;
104584diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
104585index b87ca32..76c7799 100644
104586--- a/net/netfilter/ipvs/ip_vs_core.c
104587+++ b/net/netfilter/ipvs/ip_vs_core.c
104588@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
104589 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
104590 /* do not touch skb anymore */
104591
104592- atomic_inc(&cp->in_pkts);
104593+ atomic_inc_unchecked(&cp->in_pkts);
104594 ip_vs_conn_put(cp);
104595 return ret;
104596 }
104597@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
104598 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
104599 pkts = sysctl_sync_threshold(ipvs);
104600 else
104601- pkts = atomic_add_return(1, &cp->in_pkts);
104602+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104603
104604 if (ipvs->sync_state & IP_VS_STATE_MASTER)
104605 ip_vs_sync_conn(net, cp, pkts);
104606diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
104607index fdcda8b..dbc1979 100644
104608--- a/net/netfilter/ipvs/ip_vs_ctl.c
104609+++ b/net/netfilter/ipvs/ip_vs_ctl.c
104610@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
104611 */
104612 ip_vs_rs_hash(ipvs, dest);
104613 }
104614- atomic_set(&dest->conn_flags, conn_flags);
104615+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
104616
104617 /* bind the service */
104618 old_svc = rcu_dereference_protected(dest->svc, 1);
104619@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
104620 * align with netns init in ip_vs_control_net_init()
104621 */
104622
104623-static struct ctl_table vs_vars[] = {
104624+static ctl_table_no_const vs_vars[] __read_only = {
104625 {
104626 .procname = "amemthresh",
104627 .maxlen = sizeof(int),
104628@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104629 " %-7s %-6d %-10d %-10d\n",
104630 &dest->addr.in6,
104631 ntohs(dest->port),
104632- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104633+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104634 atomic_read(&dest->weight),
104635 atomic_read(&dest->activeconns),
104636 atomic_read(&dest->inactconns));
104637@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104638 "%-7s %-6d %-10d %-10d\n",
104639 ntohl(dest->addr.ip),
104640 ntohs(dest->port),
104641- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104642+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104643 atomic_read(&dest->weight),
104644 atomic_read(&dest->activeconns),
104645 atomic_read(&dest->inactconns));
104646@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
104647
104648 entry.addr = dest->addr.ip;
104649 entry.port = dest->port;
104650- entry.conn_flags = atomic_read(&dest->conn_flags);
104651+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
104652 entry.weight = atomic_read(&dest->weight);
104653 entry.u_threshold = dest->u_threshold;
104654 entry.l_threshold = dest->l_threshold;
104655@@ -3039,7 +3039,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
104656 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
104657 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
104658 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
104659- (atomic_read(&dest->conn_flags) &
104660+ (atomic_read_unchecked(&dest->conn_flags) &
104661 IP_VS_CONN_F_FWD_MASK)) ||
104662 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
104663 atomic_read(&dest->weight)) ||
104664@@ -3672,7 +3672,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
104665 {
104666 int idx;
104667 struct netns_ipvs *ipvs = net_ipvs(net);
104668- struct ctl_table *tbl;
104669+ ctl_table_no_const *tbl;
104670
104671 atomic_set(&ipvs->dropentry, 0);
104672 spin_lock_init(&ipvs->dropentry_lock);
104673diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
104674index 127f140..553d652 100644
104675--- a/net/netfilter/ipvs/ip_vs_lblc.c
104676+++ b/net/netfilter/ipvs/ip_vs_lblc.c
104677@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
104678 * IPVS LBLC sysctl table
104679 */
104680 #ifdef CONFIG_SYSCTL
104681-static struct ctl_table vs_vars_table[] = {
104682+static ctl_table_no_const vs_vars_table[] __read_only = {
104683 {
104684 .procname = "lblc_expiration",
104685 .data = NULL,
104686diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
104687index 2229d2d..b32b785 100644
104688--- a/net/netfilter/ipvs/ip_vs_lblcr.c
104689+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
104690@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
104691 * IPVS LBLCR sysctl table
104692 */
104693
104694-static struct ctl_table vs_vars_table[] = {
104695+static ctl_table_no_const vs_vars_table[] __read_only = {
104696 {
104697 .procname = "lblcr_expiration",
104698 .data = NULL,
104699diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
104700index d93ceeb..4556144 100644
104701--- a/net/netfilter/ipvs/ip_vs_sync.c
104702+++ b/net/netfilter/ipvs/ip_vs_sync.c
104703@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
104704 cp = cp->control;
104705 if (cp) {
104706 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104707- pkts = atomic_add_return(1, &cp->in_pkts);
104708+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104709 else
104710 pkts = sysctl_sync_threshold(ipvs);
104711 ip_vs_sync_conn(net, cp->control, pkts);
104712@@ -771,7 +771,7 @@ control:
104713 if (!cp)
104714 return;
104715 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104716- pkts = atomic_add_return(1, &cp->in_pkts);
104717+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104718 else
104719 pkts = sysctl_sync_threshold(ipvs);
104720 goto sloop;
104721@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
104722
104723 if (opt)
104724 memcpy(&cp->in_seq, opt, sizeof(*opt));
104725- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104726+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104727 cp->state = state;
104728 cp->old_state = cp->state;
104729 /*
104730diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
104731index 3aedbda..6a63567 100644
104732--- a/net/netfilter/ipvs/ip_vs_xmit.c
104733+++ b/net/netfilter/ipvs/ip_vs_xmit.c
104734@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
104735 else
104736 rc = NF_ACCEPT;
104737 /* do not touch skb anymore */
104738- atomic_inc(&cp->in_pkts);
104739+ atomic_inc_unchecked(&cp->in_pkts);
104740 goto out;
104741 }
104742
104743@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
104744 else
104745 rc = NF_ACCEPT;
104746 /* do not touch skb anymore */
104747- atomic_inc(&cp->in_pkts);
104748+ atomic_inc_unchecked(&cp->in_pkts);
104749 goto out;
104750 }
104751
104752diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
104753index a4b5e2a..13b1de3 100644
104754--- a/net/netfilter/nf_conntrack_acct.c
104755+++ b/net/netfilter/nf_conntrack_acct.c
104756@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
104757 #ifdef CONFIG_SYSCTL
104758 static int nf_conntrack_acct_init_sysctl(struct net *net)
104759 {
104760- struct ctl_table *table;
104761+ ctl_table_no_const *table;
104762
104763 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
104764 GFP_KERNEL);
104765diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
104766index 46d1b26..b7f3b76 100644
104767--- a/net/netfilter/nf_conntrack_core.c
104768+++ b/net/netfilter/nf_conntrack_core.c
104769@@ -1734,6 +1734,10 @@ void nf_conntrack_init_end(void)
104770 #define DYING_NULLS_VAL ((1<<30)+1)
104771 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
104772
104773+#ifdef CONFIG_GRKERNSEC_HIDESYM
104774+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
104775+#endif
104776+
104777 int nf_conntrack_init_net(struct net *net)
104778 {
104779 int ret = -ENOMEM;
104780@@ -1759,7 +1763,11 @@ int nf_conntrack_init_net(struct net *net)
104781 if (!net->ct.stat)
104782 goto err_pcpu_lists;
104783
104784+#ifdef CONFIG_GRKERNSEC_HIDESYM
104785+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
104786+#else
104787 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
104788+#endif
104789 if (!net->ct.slabname)
104790 goto err_slabname;
104791
104792diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
104793index 4e78c57..ec8fb74 100644
104794--- a/net/netfilter/nf_conntrack_ecache.c
104795+++ b/net/netfilter/nf_conntrack_ecache.c
104796@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
104797 #ifdef CONFIG_SYSCTL
104798 static int nf_conntrack_event_init_sysctl(struct net *net)
104799 {
104800- struct ctl_table *table;
104801+ ctl_table_no_const *table;
104802
104803 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
104804 GFP_KERNEL);
104805diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
104806index bd9d315..989947e 100644
104807--- a/net/netfilter/nf_conntrack_helper.c
104808+++ b/net/netfilter/nf_conntrack_helper.c
104809@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
104810
104811 static int nf_conntrack_helper_init_sysctl(struct net *net)
104812 {
104813- struct ctl_table *table;
104814+ ctl_table_no_const *table;
104815
104816 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
104817 GFP_KERNEL);
104818diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
104819index b65d586..beec902 100644
104820--- a/net/netfilter/nf_conntrack_proto.c
104821+++ b/net/netfilter/nf_conntrack_proto.c
104822@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
104823
104824 static void
104825 nf_ct_unregister_sysctl(struct ctl_table_header **header,
104826- struct ctl_table **table,
104827+ ctl_table_no_const **table,
104828 unsigned int users)
104829 {
104830 if (users > 0)
104831diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
104832index fc823fa..8311af3 100644
104833--- a/net/netfilter/nf_conntrack_standalone.c
104834+++ b/net/netfilter/nf_conntrack_standalone.c
104835@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
104836
104837 static int nf_conntrack_standalone_init_sysctl(struct net *net)
104838 {
104839- struct ctl_table *table;
104840+ ctl_table_no_const *table;
104841
104842 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
104843 GFP_KERNEL);
104844diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
104845index 7a394df..bd91a8a 100644
104846--- a/net/netfilter/nf_conntrack_timestamp.c
104847+++ b/net/netfilter/nf_conntrack_timestamp.c
104848@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
104849 #ifdef CONFIG_SYSCTL
104850 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
104851 {
104852- struct ctl_table *table;
104853+ ctl_table_no_const *table;
104854
104855 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
104856 GFP_KERNEL);
104857diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
104858index 43c926c..a5731d8 100644
104859--- a/net/netfilter/nf_log.c
104860+++ b/net/netfilter/nf_log.c
104861@@ -362,7 +362,7 @@ static const struct file_operations nflog_file_ops = {
104862
104863 #ifdef CONFIG_SYSCTL
104864 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
104865-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
104866+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
104867
104868 static int nf_log_proc_dostring(struct ctl_table *table, int write,
104869 void __user *buffer, size_t *lenp, loff_t *ppos)
104870@@ -393,13 +393,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
104871 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
104872 mutex_unlock(&nf_log_mutex);
104873 } else {
104874+ ctl_table_no_const nf_log_table = *table;
104875+
104876 mutex_lock(&nf_log_mutex);
104877 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
104878 if (!logger)
104879- table->data = "NONE";
104880+ nf_log_table.data = "NONE";
104881 else
104882- table->data = logger->name;
104883- r = proc_dostring(table, write, buffer, lenp, ppos);
104884+ nf_log_table.data = logger->name;
104885+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
104886 mutex_unlock(&nf_log_mutex);
104887 }
104888
104889diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
104890index c68c1e5..8b5d670 100644
104891--- a/net/netfilter/nf_sockopt.c
104892+++ b/net/netfilter/nf_sockopt.c
104893@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
104894 }
104895 }
104896
104897- list_add(&reg->list, &nf_sockopts);
104898+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
104899 out:
104900 mutex_unlock(&nf_sockopt_mutex);
104901 return ret;
104902@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
104903 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
104904 {
104905 mutex_lock(&nf_sockopt_mutex);
104906- list_del(&reg->list);
104907+ pax_list_del((struct list_head *)&reg->list);
104908 mutex_unlock(&nf_sockopt_mutex);
104909 }
104910 EXPORT_SYMBOL(nf_unregister_sockopt);
104911diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
104912index 11d85b3..7fcc420 100644
104913--- a/net/netfilter/nfnetlink_log.c
104914+++ b/net/netfilter/nfnetlink_log.c
104915@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
104916 struct nfnl_log_net {
104917 spinlock_t instances_lock;
104918 struct hlist_head instance_table[INSTANCE_BUCKETS];
104919- atomic_t global_seq;
104920+ atomic_unchecked_t global_seq;
104921 };
104922
104923 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
104924@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
104925 /* global sequence number */
104926 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
104927 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
104928- htonl(atomic_inc_return(&log->global_seq))))
104929+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
104930 goto nla_put_failure;
104931
104932 if (data_len) {
104933diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
104934new file mode 100644
104935index 0000000..c566332
104936--- /dev/null
104937+++ b/net/netfilter/xt_gradm.c
104938@@ -0,0 +1,51 @@
104939+/*
104940+ * gradm match for netfilter
104941